ngram
listlengths
0
82k
[ "self.typestr): attr_val = getattr(record, self.typestr) should_cb = bool(attr_val) if should_cb", "callback, override_tags=None): default_tags = { \"add_replica\": True } super().__init__(typestr=\"demo\", default_tags=default_tags,", "= typestr def update_tags(self, override_tags): self.tags.update(override_tags) def emit(self, record): \"\"\"", "= True continue else: should_cb = False break if should_cb:", "{}) self.typestr = typestr def update_tags(self, override_tags): self.tags.update(override_tags) def emit(self,", "import logging class CallbackHandler(logging.Handler): def __init__(self, typestr, default_tags, callback, override_tags):", "log record back to the CLI for rendering \"\"\" should_cb", "= default_tags self.update_tags(override_tags or {}) self.typestr = typestr def update_tags(self,", "= { \"add_replica\": True } super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback, override_tags=override_tags) class", "continue else: should_cb = False break if should_cb: self.callback(record, attr_val)", "self.tags = default_tags self.update_tags(override_tags or {}) self.typestr = typestr def", "Initialize the handler. \"\"\" super().__init__() self.tester = tester def emit(self,", "CliHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags = { \"add_replica\": True", "record): \"\"\" Passes the log record back to the CLI", "<reponame>andkononykhin/plenum<filename>stp_core/common/logging/handlers.py import logging class CallbackHandler(logging.Handler): def __init__(self, typestr, default_tags, callback,", "def __init__(self, typestr, default_tags, callback, override_tags): \"\"\" Initialize the handler.", "self.update_tags(override_tags or {}) self.typestr = typestr def update_tags(self, override_tags): self.tags.update(override_tags)", "DemoHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags = { \"add_replica\": True", "super().__init__() self.callback = callback self.tags = default_tags self.update_tags(override_tags or {})", "\"\"\" super().__init__() self.callback = callback self.tags = default_tags self.update_tags(override_tags or", "None and record.levelno >= logging.INFO: should_cb = True if hasattr(record,", "self.tags: if self.tags[t]: should_cb = True continue else: should_cb =", "Initialize the handler. \"\"\" super().__init__() self.callback = callback self.tags =", "in record.tags: if t in self.tags: if self.tags[t]: should_cb =", "= { \"add_replica\": True } super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback, override_tags=override_tags) class", "logging class CallbackHandler(logging.Handler): def __init__(self, typestr, default_tags, callback, override_tags): \"\"\"", "default_tags self.update_tags(override_tags or {}) self.typestr = typestr def update_tags(self, override_tags):", "\"add_replica\": True } super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback, override_tags=override_tags) class DemoHandler(CallbackHandler): def", "the handler. \"\"\" super().__init__() self.tester = tester def emit(self, record):", "t in record.tags: if t in self.tags: if self.tags[t]: should_cb", "back to the CLI for rendering \"\"\" should_cb = None", "self.tags[t]: should_cb = True continue else: should_cb = False break", "__init__(self, typestr, default_tags, callback, override_tags): \"\"\" Initialize the handler. \"\"\"", "self.tags.update(override_tags) def emit(self, record): \"\"\" Passes the log record back", "\"\"\" Initialize the handler. \"\"\" super().__init__() self.tester = tester def", "self.tester = tester def emit(self, record): \"\"\" Captures a record.", "def __init__(self, tester): \"\"\" Initialize the handler. \"\"\" super().__init__() self.tester", "emit(self, record): \"\"\" Passes the log record back to the", "self.typestr = typestr def update_tags(self, override_tags): self.tags.update(override_tags) def emit(self, record):", "def emit(self, record): \"\"\" Passes the log record back to", "should_cb = True continue else: should_cb = False break if", "CLI for rendering \"\"\" should_cb = None attr_val = None", "callback self.tags = default_tags self.update_tags(override_tags or {}) self.typestr = typestr", "if should_cb: self.callback(record, attr_val) class CliHandler(CallbackHandler): def __init__(self, callback, override_tags=None):", "attr_val = None if hasattr(record, self.typestr): attr_val = getattr(record, self.typestr)", "{ \"add_replica\": True } super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback, override_tags=override_tags) class DemoHandler(CallbackHandler):", "should_cb: self.callback(record, attr_val) class CliHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags", "should_cb = None attr_val = None if hasattr(record, self.typestr): attr_val", "t in self.tags: if self.tags[t]: should_cb = True continue else:", "super().__init__() self.tester = tester def emit(self, record): \"\"\" Captures a", "__init__(self, callback, override_tags=None): default_tags = { \"add_replica\": True } super().__init__(typestr=\"cli\",", "to the CLI for rendering \"\"\" should_cb = None attr_val", "False break if should_cb: self.callback(record, attr_val) class CliHandler(CallbackHandler): def __init__(self,", "__init__(self, callback, override_tags=None): default_tags = { \"add_replica\": True } super().__init__(typestr=\"demo\",", "record back to the CLI for rendering \"\"\" should_cb =", "break if should_cb: self.callback(record, attr_val) class CliHandler(CallbackHandler): def __init__(self, callback,", "record.levelno >= logging.INFO: should_cb = True if hasattr(record, 'tags'): for", "} super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback, override_tags=override_tags) class DemoHandler(CallbackHandler): def __init__(self, callback,", "typestr def update_tags(self, override_tags): self.tags.update(override_tags) def emit(self, record): \"\"\" Passes", "the log record back to the CLI for rendering \"\"\"", "override_tags): \"\"\" Initialize the handler. \"\"\" super().__init__() self.callback = callback", "is None and record.levelno >= logging.INFO: should_cb = True if", "True } super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback, override_tags=override_tags) class TestingHandler(logging.Handler): def __init__(self,", "should_cb is None and record.levelno >= logging.INFO: should_cb = True", "= None if hasattr(record, self.typestr): attr_val = getattr(record, self.typestr) should_cb", "bool(attr_val) if should_cb is None and record.levelno >= logging.INFO: should_cb", "= None attr_val = None if hasattr(record, self.typestr): attr_val =", "else: should_cb = False break if should_cb: self.callback(record, attr_val) class", "= False break if should_cb: self.callback(record, attr_val) class CliHandler(CallbackHandler): def", "True } super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback, override_tags=override_tags) class DemoHandler(CallbackHandler): def __init__(self,", "def __init__(self, callback, override_tags=None): default_tags = { \"add_replica\": True }", "logging.INFO: should_cb = True if hasattr(record, 'tags'): for t in", "= bool(attr_val) if should_cb is None and record.levelno >= logging.INFO:", "default_tags=default_tags, callback=callback, override_tags=override_tags) class TestingHandler(logging.Handler): def __init__(self, tester): \"\"\" Initialize", "rendering \"\"\" should_cb = None attr_val = None if hasattr(record,", "= tester def emit(self, record): \"\"\" Captures a record. \"\"\"", "attr_val = getattr(record, self.typestr) should_cb = bool(attr_val) if should_cb is", "override_tags=None): default_tags = { \"add_replica\": True } super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback,", "default_tags = { \"add_replica\": True } super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback, override_tags=override_tags)", "tester): \"\"\" Initialize the handler. \"\"\" super().__init__() self.tester = tester", "class CliHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags = { \"add_replica\":", "\"\"\" super().__init__() self.tester = tester def emit(self, record): \"\"\" Captures", "= getattr(record, self.typestr) should_cb = bool(attr_val) if should_cb is None", "hasattr(record, 'tags'): for t in record.tags: if t in self.tags:", "callback, override_tags): \"\"\" Initialize the handler. \"\"\" super().__init__() self.callback =", "for t in record.tags: if t in self.tags: if self.tags[t]:", "= callback self.tags = default_tags self.update_tags(override_tags or {}) self.typestr =", "if hasattr(record, self.typestr): attr_val = getattr(record, self.typestr) should_cb = bool(attr_val)", "class CallbackHandler(logging.Handler): def __init__(self, typestr, default_tags, callback, override_tags): \"\"\" Initialize", "if should_cb is None and record.levelno >= logging.INFO: should_cb =", "if hasattr(record, 'tags'): for t in record.tags: if t in", "handler. \"\"\" super().__init__() self.tester = tester def emit(self, record): \"\"\"", "default_tags, callback, override_tags): \"\"\" Initialize the handler. \"\"\" super().__init__() self.callback", "if self.tags[t]: should_cb = True continue else: should_cb = False", "def update_tags(self, override_tags): self.tags.update(override_tags) def emit(self, record): \"\"\" Passes the", "default_tags = { \"add_replica\": True } super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback, override_tags=override_tags)", "None if hasattr(record, self.typestr): attr_val = getattr(record, self.typestr) should_cb =", "override_tags=override_tags) class DemoHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags = {", "__init__(self, tester): \"\"\" Initialize the handler. \"\"\" super().__init__() self.tester =", "True if hasattr(record, 'tags'): for t in record.tags: if t", "callback=callback, override_tags=override_tags) class DemoHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags =", "None attr_val = None if hasattr(record, self.typestr): attr_val = getattr(record,", "'tags'): for t in record.tags: if t in self.tags: if", "for rendering \"\"\" should_cb = None attr_val = None if", "in self.tags: if self.tags[t]: should_cb = True continue else: should_cb", "the handler. \"\"\" super().__init__() self.callback = callback self.tags = default_tags", "True continue else: should_cb = False break if should_cb: self.callback(record,", "Passes the log record back to the CLI for rendering", "record.tags: if t in self.tags: if self.tags[t]: should_cb = True", "} super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback, override_tags=override_tags) class TestingHandler(logging.Handler): def __init__(self, tester):", "or {}) self.typestr = typestr def update_tags(self, override_tags): self.tags.update(override_tags) def", "callback, override_tags=None): default_tags = { \"add_replica\": True } super().__init__(typestr=\"cli\", default_tags=default_tags,", "override_tags=None): default_tags = { \"add_replica\": True } super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback,", "hasattr(record, self.typestr): attr_val = getattr(record, self.typestr) should_cb = bool(attr_val) if", "should_cb = False break if should_cb: self.callback(record, attr_val) class CliHandler(CallbackHandler):", "class DemoHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags = { \"add_replica\":", "super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback, override_tags=override_tags) class TestingHandler(logging.Handler): def __init__(self, tester): \"\"\"", "class TestingHandler(logging.Handler): def __init__(self, tester): \"\"\" Initialize the handler. \"\"\"", "self.callback = callback self.tags = default_tags self.update_tags(override_tags or {}) self.typestr", "self.typestr) should_cb = bool(attr_val) if should_cb is None and record.levelno", "update_tags(self, override_tags): self.tags.update(override_tags) def emit(self, record): \"\"\" Passes the log", "\"\"\" should_cb = None attr_val = None if hasattr(record, self.typestr):", "attr_val) class CliHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags = {", "tester def emit(self, record): \"\"\" Captures a record. \"\"\" self.tester(record)", "TestingHandler(logging.Handler): def __init__(self, tester): \"\"\" Initialize the handler. \"\"\" super().__init__()", ">= logging.INFO: should_cb = True if hasattr(record, 'tags'): for t", "getattr(record, self.typestr) should_cb = bool(attr_val) if should_cb is None and", "= True if hasattr(record, 'tags'): for t in record.tags: if", "the CLI for rendering \"\"\" should_cb = None attr_val =", "CallbackHandler(logging.Handler): def __init__(self, typestr, default_tags, callback, override_tags): \"\"\" Initialize the", "default_tags=default_tags, callback=callback, override_tags=override_tags) class DemoHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags", "override_tags): self.tags.update(override_tags) def emit(self, record): \"\"\" Passes the log record", "\"add_replica\": True } super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback, override_tags=override_tags) class TestingHandler(logging.Handler): def", "override_tags=override_tags) class TestingHandler(logging.Handler): def __init__(self, tester): \"\"\" Initialize the handler.", "handler. \"\"\" super().__init__() self.callback = callback self.tags = default_tags self.update_tags(override_tags", "{ \"add_replica\": True } super().__init__(typestr=\"demo\", default_tags=default_tags, callback=callback, override_tags=override_tags) class TestingHandler(logging.Handler):", "and record.levelno >= logging.INFO: should_cb = True if hasattr(record, 'tags'):", "if t in self.tags: if self.tags[t]: should_cb = True continue", "should_cb = bool(attr_val) if should_cb is None and record.levelno >=", "callback=callback, override_tags=override_tags) class TestingHandler(logging.Handler): def __init__(self, tester): \"\"\" Initialize the", "super().__init__(typestr=\"cli\", default_tags=default_tags, callback=callback, override_tags=override_tags) class DemoHandler(CallbackHandler): def __init__(self, callback, override_tags=None):", "typestr, default_tags, callback, override_tags): \"\"\" Initialize the handler. \"\"\" super().__init__()", "\"\"\" Initialize the handler. \"\"\" super().__init__() self.callback = callback self.tags", "should_cb = True if hasattr(record, 'tags'): for t in record.tags:", "\"\"\" Passes the log record back to the CLI for", "self.callback(record, attr_val) class CliHandler(CallbackHandler): def __init__(self, callback, override_tags=None): default_tags =" ]
[]
[]
[ "<reponame>cuenca-mx/agave from functools import wraps from agave.blueprints.decorators import copy_attributes def", "'hello' def test_copy_properties_from() -> None: def retrieve(): ... assert not", "wrapper.i_am_test = True return wrapper class TestResource: @i_am_test def retrieve(self)", "class TestResource: @i_am_test def retrieve(self) -> str: return 'hello' def", "return 'hello' def test_copy_properties_from() -> None: def retrieve(): ... assert", "return wrapper class TestResource: @i_am_test def retrieve(self) -> str: return", "@i_am_test def retrieve(self) -> str: return 'hello' def test_copy_properties_from() ->", "**kwargs) wrapper.i_am_test = True return wrapper class TestResource: @i_am_test def", "@wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.i_am_test = True", "wraps from agave.blueprints.decorators import copy_attributes def i_am_test(func): @wraps(func) def wrapper(*args,", "import copy_attributes def i_am_test(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args,", "wrapper class TestResource: @i_am_test def retrieve(self) -> str: return 'hello'", "-> None: def retrieve(): ... assert not hasattr(retrieve, 'i_am_test') retrieve", "test_copy_properties_from() -> None: def retrieve(): ... assert not hasattr(retrieve, 'i_am_test')", "... assert not hasattr(retrieve, 'i_am_test') retrieve = copy_attributes(TestResource)(retrieve) assert hasattr(retrieve,", "TestResource: @i_am_test def retrieve(self) -> str: return 'hello' def test_copy_properties_from()", "import wraps from agave.blueprints.decorators import copy_attributes def i_am_test(func): @wraps(func) def", "str: return 'hello' def test_copy_properties_from() -> None: def retrieve(): ...", "def retrieve(): ... assert not hasattr(retrieve, 'i_am_test') retrieve = copy_attributes(TestResource)(retrieve)", "assert not hasattr(retrieve, 'i_am_test') retrieve = copy_attributes(TestResource)(retrieve) assert hasattr(retrieve, 'i_am_test')", "from agave.blueprints.decorators import copy_attributes def i_am_test(func): @wraps(func) def wrapper(*args, **kwargs):", "def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.i_am_test = True return", "-> str: return 'hello' def test_copy_properties_from() -> None: def retrieve():", "retrieve(): ... assert not hasattr(retrieve, 'i_am_test') retrieve = copy_attributes(TestResource)(retrieve) assert", "def retrieve(self) -> str: return 'hello' def test_copy_properties_from() -> None:", "wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.i_am_test = True return wrapper", "func(*args, **kwargs) wrapper.i_am_test = True return wrapper class TestResource: @i_am_test", "= True return wrapper class TestResource: @i_am_test def retrieve(self) ->", "retrieve(self) -> str: return 'hello' def test_copy_properties_from() -> None: def", "True return wrapper class TestResource: @i_am_test def retrieve(self) -> str:", "agave.blueprints.decorators import copy_attributes def i_am_test(func): @wraps(func) def wrapper(*args, **kwargs): return", "def i_am_test(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.i_am_test", "i_am_test(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.i_am_test =", "functools import wraps from agave.blueprints.decorators import copy_attributes def i_am_test(func): @wraps(func)", "from functools import wraps from agave.blueprints.decorators import copy_attributes def i_am_test(func):", "copy_attributes def i_am_test(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs)", "return func(*args, **kwargs) wrapper.i_am_test = True return wrapper class TestResource:", "def test_copy_properties_from() -> None: def retrieve(): ... assert not hasattr(retrieve,", "**kwargs): return func(*args, **kwargs) wrapper.i_am_test = True return wrapper class", "None: def retrieve(): ... assert not hasattr(retrieve, 'i_am_test') retrieve =" ]
[ "'bf16_fp32': return mace_pb2.DT_BFLOAT16 else: return mace_pb2.DT_HALF def to_list(x): if isinstance(x,", "[parse_int_array(shape) for shape in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) == len( conf[ModelKeys.check_shapes]), \"check", "= 0 GPU = 2 HEXAGON = 3 HTA =", "conf[ModelKeys.data_type] = parse_internal_data_type( conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type] = mace_pb2.DT_HALF # parse", "shape count do not match\") input_data_types = [parse_data_type(dt) for dt", "2.0 (the \"License\"); # you may not use this file", "output_data_formats = \"output_data_formats\" check_tensors = \"check_tensors\" check_shapes = \"check_shapes\" model_file_path", "\"data type %s not supported\" % str) def parse_internal_data_type(str): if", "check_tensors = \"check_tensors\" check_shapes = \"check_shapes\" model_file_path = \"model_file_path\" model_sha256_checksum", "'int', 'long', 'module', 'mutable', 'namespace', 'new', 'noexcept', 'not', 'not_eq', 'nullptr',", "for shape in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) == output_count, \"output node count", "[input_data_types[0]] * input_count mace_check(len(input_data_types) == input_count, \"the number of input_data_types", "mace_check(len(input_ranges) == input_count, \"the number of input_ranges should be \"", "yaml from enum import Enum from utils.util import mace_check from", "conf[ModelKeys.input_tensors] = [str(i) for i in conf[ModelKeys.input_tensors]] input_count = len(conf[ModelKeys.input_tensors])", "%s not supported\" % str) def parse_internal_data_type(str): if str ==", "HTA = 4 APU = 5 CPU_GPU = 100 DEVICE_MAP", "\" \"the same as input tensors\") conf[ModelKeys.input_ranges] = input_ranges #", "+ \"$\", r\": '\" + w + \"'\", s) #", "str == 'fp32_fp32': return mace_pb2.DT_FLOAT elif str == 'bf16_fp32': return", "\"weight_sha256_checksum\" quantize_range_file = \"quantize_range_file\" quantize = \"quantize\" quantize_schema = \"quantize_schema\"", "1: conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT else: if ModelKeys.data_type in conf: conf[ModelKeys.data_type]", "be \" \"the same as input tensors\") conf[ModelKeys.input_data_formats] = input_data_formats", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if len(input_ranges) == 1 and input_count >", "input_ranges = [input_ranges[0]] * input_count mace_check(len(input_ranges) == input_count, \"the number", "def to_list(x): if isinstance(x, list): return x else: return [x]", "1: input_data_types = [input_data_types[0]] * input_count mace_check(len(input_data_types) == input_count, \"the", "Copyright 2019 The MACE Authors. All Rights Reserved. # #", "yaml.load(s) def parse(path): with open(path) as f: config = sanitize_load(f.read())", "OHWI = 103 AUTO = 1000 def parse_data_format(str): str =", "'static_cast', 'struct', 'switch', 'synchronized', 'template', 'this', 'thread_local', 'throw', 'true', 'try',", "'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final', 'transaction_safe', 'transaction_safe_dynamic', 'if',", "input tensors\") conf[ModelKeys.input_data_types] = input_data_types input_data_formats = [parse_data_format(df) for df", "= 'models' graph_optimize_options = \"graph_optimize_options\" input_tensors = \"input_tensors\" input_shapes =", "in conf and conf[ModelKeys.quantize] == 1: conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT else:", "} def parse_device_type(str): mace_check(str in DEVICE_MAP, \"unknown device %s\" %", "+ w + \"$\", r\": '\" + w + \"'\",", "parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize in conf and conf[ModelKeys.quantize]", "node count and shape count do not match\") input_data_types =", "isinstance(x, list): return x else: return [x] def parse_int_array(xs): if", "2 HWIO = 100 OIHW = 101 HWOI = 102", "language governing permissions and # limitations under the License. from", "OIHW = 101 HWOI = 102 OHWI = 103 AUTO", "mace_check(len(output_data_types) == output_count, \"the number of output_data_types should be \"", "same as input tensors\") conf[ModelKeys.input_data_formats] = input_data_formats input_ranges = [parse_float_array(r)", "del conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime]) if", "mace_pb2.DT_BFLOAT16 else: return mace_pb2.DT_HALF def to_list(x): if isinstance(x, list): return", "[\"float32\"]))] if len(output_data_types) == 1 and output_count > 1: output_data_types", "'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override',", "== input_count, \"input node count and shape count do not", "= [input_data_types[0]] * input_count mace_check(len(input_data_types) == input_count, \"the number of", "use this file except in compliance with the License. #", "number of input_data_formats should be \" \"the same as input", "number of output_data_formats should be \" \"the same as output", "input_data_types input_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if", "change_concat_ranges = \"change_concat_ranges\" winograd = \"winograd\" cl_mem_type = \"cl_mem_type\" data_type", "for i in conf[ModelKeys.input_tensors]] input_count = len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] = [parse_int_array(shape)", "be \" \"the same as output tensors\") conf[ModelKeys.output_data_types] = output_data_types", "output_count mace_check(len(output_data_formats) == output_count, \"the number of output_data_formats should be", "len(output_data_formats) == 1 and output_count > 1: output_data_formats = [output_data_formats[0]]", "conf[ModelKeys.input_data_types] = input_data_types input_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.input_data_formats,", "= [input_ranges[0]] * input_count mace_check(len(input_ranges) == input_count, \"the number of", "to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) == len( conf[ModelKeys.check_shapes]), \"check tensors count and shape", "parse_int_array(xs): if len(xs) is 0: return [1] return [int(x) for", "'char32_t', 'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast', 'continue', 'co_await', 'co_return',", "0 CAFFE = 1 ONNX = 2 MEGENGINE = 3", "type %s not supported\" % str) def parse_internal_data_type(str): if str", "\"input_data_types\" input_data_formats = \"input_data_formats\" input_ranges = \"input_ranges\" output_tensors = \"output_tensors\"", "x in xs.split(\",\")] def parse_float_array(xs): return [float(x) for x in", "if ModelKeys.check_tensors in conf: conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] = [parse_int_array(shape)", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= \"input_tensors\" input_shapes = \"input_shapes\" input_data_types = \"input_data_types\" input_data_formats =", "DEVICE_MAP[str] class Platform(Enum): TENSORFLOW = 0 CAFFE = 1 ONNX", "conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize in conf and conf[ModelKeys.quantize] ==", "return [1] return [int(x) for x in xs.split(\",\")] def parse_float_array(xs):", "License. # You may obtain a copy of the License", "\"quantize\" quantize_schema = \"quantize_schema\" quantize_large_weights = \"quantize_large_weights\" quantize_stat = \"quantize_stat\"", "output_count mace_check(len(output_data_types) == output_count, \"the number of output_data_types should be", "do not match\") output_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.output_data_types,", "\"the number of output_data_formats should be \" \"the same as", "ModelKeys.data_type in conf: conf[ModelKeys.data_type] = parse_internal_data_type( conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type] =", "= \"model_sha256_checksum\" weight_file_path = \"weight_file_path\" weight_sha256_checksum = \"weight_sha256_checksum\" quantize_range_file =", "= input_data_types input_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))]", "'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq', 'private', 'protected', 'public', 'register',", "conf = copy.deepcopy(conf) if ModelKeys.subgraphs in conf: subgraph = conf[ModelKeys.subgraphs][0]", "output_count, \"output node count and shape count do not match\")", "'final', 'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else', 'endif', 'defined', 'ifdef', 'ifndef',", "under the License is distributed on an \"AS IS\" BASIS,", "runtime = \"runtime\" models = 'models' graph_optimize_options = \"graph_optimize_options\" input_tensors", "mace_pb2.DT_FLOAT else: if ModelKeys.data_type in conf: conf[ModelKeys.data_type] = parse_internal_data_type( conf[ModelKeys.data_type])", "mace_check(str in [e.name for e in Platform], \"unknown platform %s\"", "License for the specific language governing permissions and # limitations", "xs.split(\",\")] def parse_float_array(xs): return [float(x) for x in xs.split(\",\")] def", "mace_check(False, \"data type %s not supported\" % str) def parse_internal_data_type(str):", "import print_function import re import os import copy import yaml", "DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON, \"hta\": DeviceType.HTA, \"apu\": DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU }", "mace_pb2.DT_FLOAT elif str == 'bf16_fp32': return mace_pb2.DT_BFLOAT16 else: return mace_pb2.DT_HALF", "= \"cl_mem_type\" data_type = \"data_type\" subgraphs = \"subgraphs\" validation_inputs_data =", "class DataFormat(Enum): NONE = 0 NHWC = 1 NCHW =", "df in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if len(input_data_formats) == 1 and input_count", "Reserved. # # Licensed under the Apache License, Version 2.0", "under the License. from __future__ import absolute_import from __future__ import", "else: if ModelKeys.data_type in conf: conf[ModelKeys.data_type] = parse_internal_data_type( conf[ModelKeys.data_type]) else:", "= 2 MEGENGINE = 3 def parse_platform(str): str = str.upper()", "def normalize_model_config(conf): conf = copy.deepcopy(conf) if ModelKeys.subgraphs in conf: subgraph", "in conf: conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape", "conf[ModelKeys.input_tensors]] input_count = len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in", "= 1 NCHW = 2 HWIO = 100 OIHW =", "parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1: conf[ModelKeys.data_type]", "str == \"int32\": return mace_pb2.DT_INT32 else: mace_check(False, \"data type %s", "return mace_pb2.DT_BFLOAT16 else: return mace_pb2.DT_HALF def to_list(x): if isinstance(x, list):", "node count and shape count do not match\") output_data_types =", "DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU } def parse_device_type(str): mace_check(str in DEVICE_MAP, \"unknown", "conf[ModelKeys.data_type] = mace_pb2.DT_HALF # parse input conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors]", "in to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes]) == input_count, \"input node count and", "and shape count do not match\") output_data_types = [parse_data_type(dt) for", "\"subgraphs\" validation_inputs_data = \"validation_inputs_data\" class DataFormat(Enum): NONE = 0 NHWC", "[x] def parse_int_array(xs): if len(xs) is 0: return [1] return", "conf = parse(path) return conf[\"devices\"] class ModelKeys(object): platform = \"platform\"", "mace_check( len(conf[ModelKeys.input_shapes]) == input_count, \"input node count and shape count", "mace_check(str in [e.name for e in DataFormat], \"unknown data format", "r\": '\" + w + \"'\", s) # sub ${}", "mace_check from utils.util import MaceLogger from py_proto import mace_pb2 CPP_KEYWORDS", "= mace_pb2.DT_HALF # parse input conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] =", "\"the number of input_ranges should be \" \"the same as", "from __future__ import absolute_import from __future__ import division from __future__", "= str.upper() mace_check(str in [e.name for e in Platform], \"unknown", "in compliance with the License. # You may obtain a", "for dt in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if len(output_data_types) == 1 and", "output_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if len(output_data_types)", "software # distributed under the License is distributed on an", "The MACE Authors. All Rights Reserved. # # Licensed under", "class ModelKeys(object): platform = \"platform\" runtime = \"runtime\" models =", "number of input_data_types should be \" \"the same as input", "count and shape count do not match.\") MaceLogger.summary(conf) return conf", "'else', 'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include', 'line', 'error',", "i in conf[ModelKeys.input_tensors]] input_count = len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] = [parse_int_array(shape) for", "1 and input_count > 1: input_data_types = [input_data_types[0]] * input_count", "= \"winograd\" cl_mem_type = \"cl_mem_type\" data_type = \"data_type\" subgraphs =", "% str) return DEVICE_MAP[str] class Platform(Enum): TENSORFLOW = 0 CAFFE", "print_function import re import os import copy import yaml from", "import os import copy import yaml from enum import Enum", "'xor', 'xor_eq', 'override', 'final', 'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else', 'endif',", "models = 'models' graph_optimize_options = \"graph_optimize_options\" input_tensors = \"input_tensors\" input_shapes", "\"input_shapes\" input_data_types = \"input_data_types\" input_data_formats = \"input_data_formats\" input_ranges = \"input_ranges\"", "\"quantize_schema\" quantize_large_weights = \"quantize_large_weights\" quantize_stat = \"quantize_stat\" change_concat_ranges = \"change_concat_ranges\"", "CPU = 0 GPU = 2 HEXAGON = 3 HTA", "== 1 and input_count > 1: input_data_types = [input_data_types[0]] *", "== 1: conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT else: if ModelKeys.data_type in conf:", "\"graph_optimize_options\" input_tensors = \"input_tensors\" input_shapes = \"input_shapes\" input_data_types = \"input_data_types\"", "'atomic_noexcept', 'auto', 'bitand', 'bitor', 'bool', 'break', 'case', 'catch', 'char', 'char16_t',", "'define', 'undef', 'include', 'line', 'error', 'pragma', ] def sanitize_load(s): #", "parse_device_type(str): mace_check(str in DEVICE_MAP, \"unknown device %s\" % str) return", "s = re.sub(r\"\\${(\\w+)}\", lambda x: os.environ[x.group(1)], s) return yaml.load(s) def", "= 103 AUTO = 1000 def parse_data_format(str): str = str.upper()", "\"dsp\": DeviceType.HEXAGON, \"hta\": DeviceType.HTA, \"apu\": DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU } def", "parse(path) return conf[\"devices\"] class ModelKeys(object): platform = \"platform\" runtime =", "\"input_ranges\" output_tensors = \"output_tensors\" output_shapes = \"output_shapes\" output_data_types = \"output_data_types\"", "and shape count do not match\") input_data_types = [parse_data_type(dt) for", "1 ONNX = 2 MEGENGINE = 3 def parse_platform(str): str", "= 3 HTA = 4 APU = 5 CPU_GPU =", "HWOI = 102 OHWI = 103 AUTO = 1000 def", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "output_data_types = \"output_data_types\" output_data_formats = \"output_data_formats\" check_tensors = \"check_tensors\" check_shapes", "\"cpu\": DeviceType.CPU, \"gpu\": DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON, \"hta\": DeviceType.HTA,", "and input_count > 1: input_ranges = [input_ranges[0]] * input_count mace_check(len(input_ranges)", "output_count > 1: output_data_types = [output_data_types[0]] * output_count mace_check(len(output_data_types) ==", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "'operator', 'or', 'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'requires', 'return',", "count and shape count do not match\") output_data_types = [parse_data_type(dt)", "mace_check(len(output_data_formats) == output_count, \"the number of output_data_formats should be \"", "from __future__ import print_function import re import os import copy", "s = re.sub(r\":\\s+\" + w + \"$\", r\": '\" +", "ON/OFF to boolean for w in [\"ON\", \"OFF\", \"on\", \"off\"]:", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "with open(path) as f: config = sanitize_load(f.read()) return config def", "DataFormat(Enum): NONE = 0 NHWC = 1 NCHW = 2", "do not match\") input_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.input_data_types,", "s) return yaml.load(s) def parse(path): with open(path) as f: config", "output_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if len(output_data_formats)", "to in writing, software # distributed under the License is", "0: return [1] return [int(x) for x in xs.split(\",\")] def", "conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) == len(", "# See the License for the specific language governing permissions", "str = str.upper() mace_check(str in [e.name for e in DataFormat],", "len(xs) is 0: return [1] return [int(x) for x in", "[parse_int_array(shape) for shape in to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes]) == input_count, \"input", "= 2 HEXAGON = 3 HTA = 4 APU =", "'xor_eq', 'override', 'final', 'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else', 'endif', 'defined',", "or agreed to in writing, software # distributed under the", "if str == \"float32\": return mace_pb2.DT_FLOAT elif str == \"int32\":", "= \"check_tensors\" check_shapes = \"check_shapes\" model_file_path = \"model_file_path\" model_sha256_checksum =", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "input tensors\") conf[ModelKeys.input_data_formats] = input_data_formats input_ranges = [parse_float_array(r) for r", "'enum', 'explicit', 'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',", "with the License. # You may obtain a copy of", "= 101 HWOI = 102 OHWI = 103 AUTO =", "conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize", "import re import os import copy import yaml from enum", "'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union',", "\"off\"]: s = re.sub(r\":\\s+\" + w + \"$\", r\": '\"", "of input_ranges should be \" \"the same as input tensors\")", "conf[ModelKeys.input_ranges] = input_ranges # parse output conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors]", "quantize = \"quantize\" quantize_schema = \"quantize_schema\" quantize_large_weights = \"quantize_large_weights\" quantize_stat", "if len(output_data_formats) == 1 and output_count > 1: output_data_formats =", "[\"NHWC\"]))] if len(input_data_formats) == 1 and input_count > 1: input_data_formats", "\"unknown platform %s\" % str) return Platform[str] DATA_TYPE_MAP = {", "'nullptr', 'operator', 'or', 'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'requires',", "'\" + w + \"'\", s) # sub ${} to", "e in Platform], \"unknown platform %s\" % str) return Platform[str]", "input_count > 1: input_data_types = [input_data_types[0]] * input_count mace_check(len(input_data_types) ==", "103 AUTO = 1000 def parse_data_format(str): str = str.upper() mace_check(str", "input_data_formats should be \" \"the same as input tensors\") conf[ModelKeys.input_data_formats]", "'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include', 'line', 'error', 'pragma',", "> 1: input_data_types = [input_data_types[0]] * input_count mace_check(len(input_data_types) == input_count,", "# parse input conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] = [str(i) for", "compliance with the License. # You may obtain a copy", "1: input_ranges = [input_ranges[0]] * input_count mace_check(len(input_ranges) == input_count, \"the", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "parse_data_type(str): if str == \"float32\": return mace_pb2.DT_FLOAT elif str ==", "conf[ModelKeys.output_tensors] = [str(i) for i in conf[ModelKeys.output_tensors]] output_count = len(conf[ModelKeys.output_tensors])", "\" \"the same as output tensors\") conf[ModelKeys.output_data_types] = output_data_types output_data_formats", "str.upper() mace_check(str in [e.name for e in DataFormat], \"unknown data", "distributed under the License is distributed on an \"AS IS\"", "import yaml from enum import Enum from utils.util import mace_check", "DeviceType.HTA, \"apu\": DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU } def parse_device_type(str): mace_check(str in", "\"cpu+gpu\": DeviceType.CPU_GPU } def parse_device_type(str): mace_check(str in DEVICE_MAP, \"unknown device", "\"cl_mem_type\" data_type = \"data_type\" subgraphs = \"subgraphs\" validation_inputs_data = \"validation_inputs_data\"", "'protected', 'public', 'register', 'reinterpret_cast', 'requires', 'return', 'short', 'signed', 'sizeof', 'static',", "'goto', 'if', 'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace', 'new',", "'models' graph_optimize_options = \"graph_optimize_options\" input_tensors = \"input_tensors\" input_shapes = \"input_shapes\"", "ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1: conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT", "'ifndef', 'define', 'undef', 'include', 'line', 'error', 'pragma', ] def sanitize_load(s):", "to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) ==", "\"data_type\" subgraphs = \"subgraphs\" validation_inputs_data = \"validation_inputs_data\" class DataFormat(Enum): NONE", "conf and conf[ModelKeys.quantize] == 1: conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT else: if", "= \"input_shapes\" input_data_types = \"input_data_types\" input_data_formats = \"input_data_formats\" input_ranges =", "'transaction_safe_dynamic', 'if', 'elif', 'else', 'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef',", "\"unknown data format %s\" % str) return DataFormat[str] class DeviceType(Enum):", "= [parse_float_array(r) for r in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if len(input_ranges) ==", "in conf: subgraph = conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform] =", "+ w + \"'\", s) # sub ${} to env", "DataFormat], \"unknown data format %s\" % str) return DataFormat[str] class", "input_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if len(input_data_types)", "validation_inputs_data = \"validation_inputs_data\" class DataFormat(Enum): NONE = 0 NHWC =", "express or implied. # See the License for the specific", "os.environ[x.group(1)], s) return yaml.load(s) def parse(path): with open(path) as f:", "'bitand', 'bitor', 'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t', 'class',", "except in compliance with the License. # You may obtain", "env value s = re.sub(r\"\\${(\\w+)}\", lambda x: os.environ[x.group(1)], s) return", "output_tensors = \"output_tensors\" output_shapes = \"output_shapes\" output_data_types = \"output_data_types\" output_data_formats", "to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] = [str(i) for i in conf[ModelKeys.input_tensors]] input_count =", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "\"input node count and shape count do not match\") input_data_types", "= parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize in conf and", "not use this file except in compliance with the License.", "cl_mem_type = \"cl_mem_type\" data_type = \"data_type\" subgraphs = \"subgraphs\" validation_inputs_data", "'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel', 'atomic_commit', 'atomic_noexcept', 'auto', 'bitand',", "\"on\", \"off\"]: s = re.sub(r\":\\s+\" + w + \"$\", r\":", "return [x] def parse_int_array(xs): if len(xs) is 0: return [1]", "input_count = len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.input_shapes])]", "writing, software # distributed under the License is distributed on", "\"output_shapes\" output_data_types = \"output_data_types\" output_data_formats = \"output_data_formats\" check_tensors = \"check_tensors\"", "== input_count, \"the number of input_data_types should be \" \"the", "= 102 OHWI = 103 AUTO = 1000 def parse_data_format(str):", "you may not use this file except in compliance with", "s) # sub ${} to env value s = re.sub(r\"\\${(\\w+)}\",", "as output tensors\") conf[ModelKeys.output_data_types] = output_data_types output_data_formats = [parse_data_format(df) for", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq',", "input_tensors = \"input_tensors\" input_shapes = \"input_shapes\" input_data_types = \"input_data_types\" input_data_formats", "'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final', 'transaction_safe',", "DEVICE_MAP = { \"cpu\": DeviceType.CPU, \"gpu\": DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON, \"dsp\":", "0 NHWC = 1 NCHW = 2 HWIO = 100", "conf[ModelKeys.input_data_formats] = input_data_formats input_ranges = [parse_float_array(r) for r in to_list(conf.get(ModelKeys.input_ranges,", "to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] = [str(i) for i in conf[ModelKeys.output_tensors]] output_count =", "= input_ranges # parse output conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] =", "as input tensors\") conf[ModelKeys.input_ranges] = input_ranges # parse output conf[ModelKeys.output_tensors]", "def parse_data_format(str): str = str.upper() mace_check(str in [e.name for e", "if len(input_data_formats) == 1 and input_count > 1: input_data_formats =", "'while', 'xor', 'xor_eq', 'override', 'final', 'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',", "mace_pb2.DT_FLOAT elif str == \"int32\": return mace_pb2.DT_INT32 else: mace_check(False, \"data", "from py_proto import mace_pb2 CPP_KEYWORDS = [ 'alignas', 'alignof', 'and',", "for e in DataFormat], \"unknown data format %s\" % str)", "conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes]) ==", "'sizeof', 'static', 'static_assert', 'static_cast', 'struct', 'switch', 'synchronized', 'template', 'this', 'thread_local',", "= 0 CAFFE = 1 ONNX = 2 MEGENGINE =", "CONDITIONS OF ANY KIND, either express or implied. # See", "\"output_data_formats\" check_tensors = \"check_tensors\" check_shapes = \"check_shapes\" model_file_path = \"model_file_path\"", "shape in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) == output_count, \"output node count and", "= [parse_int_array(shape) for shape in to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes]) == input_count,", "to_list(x): if isinstance(x, list): return x else: return [x] def", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "return mace_pb2.DT_INT32 else: mace_check(False, \"data type %s not supported\" %", "for i in conf[ModelKeys.output_tensors]] output_count = len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] = [parse_int_array(shape)", "utils.util import mace_check from utils.util import MaceLogger from py_proto import", "in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if len(input_data_formats) == 1 and input_count >", "\"the same as input tensors\") conf[ModelKeys.input_data_formats] = input_data_formats input_ranges =", "1: output_data_types = [output_data_types[0]] * output_count mace_check(len(output_data_types) == output_count, \"the", "= 100 DEVICE_MAP = { \"cpu\": DeviceType.CPU, \"gpu\": DeviceType.GPU, \"hexagon\":", "same as output tensors\") conf[ModelKeys.output_data_formats] = output_data_formats if ModelKeys.check_tensors in", "= output_data_types output_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))]", "'mutable', 'namespace', 'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',", "model_sha256_checksum = \"model_sha256_checksum\" weight_file_path = \"weight_file_path\" weight_sha256_checksum = \"weight_sha256_checksum\" quantize_range_file", "str) return DEVICE_MAP[str] class Platform(Enum): TENSORFLOW = 0 CAFFE =", "ModelKeys(object): platform = \"platform\" runtime = \"runtime\" models = 'models'", "\"hta\": DeviceType.HTA, \"apu\": DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU } def parse_device_type(str): mace_check(str", "'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if', 'import', 'inline',", "= \"data_type\" subgraphs = \"subgraphs\" validation_inputs_data = \"validation_inputs_data\" class DataFormat(Enum):", "not match\") input_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))]", "'requires', 'return', 'short', 'signed', 'sizeof', 'static', 'static_assert', 'static_cast', 'struct', 'switch',", "GPU = 2 HEXAGON = 3 HTA = 4 APU", "conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type] = mace_pb2.DT_HALF # parse input conf[ModelKeys.input_tensors] =", "subgraph = conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime]", "re.sub(r\"\\${(\\w+)}\", lambda x: os.environ[x.group(1)], s) return yaml.load(s) def parse(path): with", "parse_internal_data_type( conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type] = mace_pb2.DT_HALF # parse input conf[ModelKeys.input_tensors]", "should be \" \"the same as output tensors\") conf[ModelKeys.output_data_formats] =", "\"the same as output tensors\") conf[ModelKeys.output_data_types] = output_data_types output_data_formats =", "'register', 'reinterpret_cast', 'requires', 'return', 'short', 'signed', 'sizeof', 'static', 'static_assert', 'static_cast',", "if len(input_ranges) == 1 and input_count > 1: input_ranges =", "if ModelKeys.data_type in conf: conf[ModelKeys.data_type] = parse_internal_data_type( conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type]", "conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) == output_count,", "'const', 'constexpr', 'const_cast', 'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default', 'delete',", "== output_count, \"output node count and shape count do not", "OR CONDITIONS OF ANY KIND, either express or implied. #", "conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.check_shapes])]", "subgraphs = \"subgraphs\" validation_inputs_data = \"validation_inputs_data\" class DataFormat(Enum): NONE =", "len(input_ranges) == 1 and input_count > 1: input_ranges = [input_ranges[0]]", "mace_check(len(input_data_types) == input_count, \"the number of input_data_types should be \"", "def parse_data_type(str): if str == \"float32\": return mace_pb2.DT_FLOAT elif str", "to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if len(input_ranges) == 1 and input_count > 1:", "the License is distributed on an \"AS IS\" BASIS, #", "'for', 'friend', 'goto', 'if', 'import', 'inline', 'int', 'long', 'module', 'mutable',", "df in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if len(output_data_formats) == 1 and output_count", "DeviceType(Enum): CPU = 0 GPU = 2 HEXAGON = 3", "\"runtime\" models = 'models' graph_optimize_options = \"graph_optimize_options\" input_tensors = \"input_tensors\"", "[parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if len(output_data_types) == 1", "5 CPU_GPU = 100 DEVICE_MAP = { \"cpu\": DeviceType.CPU, \"gpu\":", "import MaceLogger from py_proto import mace_pb2 CPP_KEYWORDS = [ 'alignas',", "'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else', 'endif', 'defined', 'ifdef', 'ifndef', 'define',", "str) return Platform[str] DATA_TYPE_MAP = { 'float32': mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32,", "boolean for w in [\"ON\", \"OFF\", \"on\", \"off\"]: s =", "in xs.split(\",\")] def parse_float_array(xs): return [float(x) for x in xs.split(\",\")]", "= parse_internal_data_type( conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type] = mace_pb2.DT_HALF # parse input", "= \"weight_sha256_checksum\" quantize_range_file = \"quantize_range_file\" quantize = \"quantize\" quantize_schema =", "tensors\") conf[ModelKeys.input_data_types] = input_data_types input_data_formats = [parse_data_format(df) for df in", "should be \" \"the same as input tensors\") conf[ModelKeys.input_data_formats] =", "'line', 'error', 'pragma', ] def sanitize_load(s): # do not let", "'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual',", "= \"output_tensors\" output_shapes = \"output_shapes\" output_data_types = \"output_data_types\" output_data_formats =", "'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast', 'continue', 'co_await', 'co_return', 'co_yield',", "input_count, \"the number of input_data_types should be \" \"the same", "= \"quantize_stat\" change_concat_ranges = \"change_concat_ranges\" winograd = \"winograd\" cl_mem_type =", "conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT else: if ModelKeys.data_type in conf: conf[ModelKeys.data_type] =", "'char16_t', 'char32_t', 'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast', 'continue', 'co_await',", "1: input_data_formats = [input_data_formats[0]] * input_count mace_check(len(input_data_formats) == input_count, \"the", "'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include', 'line', 'error', 'pragma', ]", "* input_count mace_check(len(input_ranges) == input_count, \"the number of input_ranges should", "\"gpu\": DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON, \"hta\": DeviceType.HTA, \"apu\": DeviceType.APU,", "output conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] = [str(i) for i in", "len(input_data_formats) == 1 and input_count > 1: input_data_formats = [input_data_formats[0]]", "output tensors\") conf[ModelKeys.output_data_types] = output_data_types output_data_formats = [parse_data_format(df) for df", "\"check tensors count and shape count do not match.\") MaceLogger.summary(conf)", "'and', 'and_eq', 'asm', 'atomic_cancel', 'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor', 'bool',", "\" \"the same as output tensors\") conf[ModelKeys.output_data_formats] = output_data_formats if", "device %s\" % str) return DEVICE_MAP[str] class Platform(Enum): TENSORFLOW =", "law or agreed to in writing, software # distributed under", "= conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] =", "import mace_pb2 CPP_KEYWORDS = [ 'alignas', 'alignof', 'and', 'and_eq', 'asm',", "4 APU = 5 CPU_GPU = 100 DEVICE_MAP = {", "in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if len(input_data_types) == 1 and input_count >", "tensors\") conf[ModelKeys.input_data_formats] = input_data_formats input_ranges = [parse_float_array(r) for r in", "os import copy import yaml from enum import Enum from", "yaml parse ON/OFF to boolean for w in [\"ON\", \"OFF\",", "\"OFF\", \"on\", \"off\"]: s = re.sub(r\":\\s+\" + w + \"$\",", "== output_count, \"the number of output_data_formats should be \" \"the", "Platform[str] DATA_TYPE_MAP = { 'float32': mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32, } def", "return yaml.load(s) def parse(path): with open(path) as f: config =", "> 1: input_data_formats = [input_data_formats[0]] * input_count mace_check(len(input_data_formats) == input_count,", "for df in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if len(output_data_formats) == 1 and", "== output_count, \"the number of output_data_types should be \" \"the", "tensors\") conf[ModelKeys.output_data_types] = output_data_types output_data_formats = [parse_data_format(df) for df in", "\"$\", r\": '\" + w + \"'\", s) # sub", "parse_device_info(path): conf = parse(path) return conf[\"devices\"] class ModelKeys(object): platform =", "output_count, \"the number of output_data_types should be \" \"the same", "len( conf[ModelKeys.check_shapes]), \"check tensors count and shape count do not", "== 1 and input_count > 1: input_ranges = [input_ranges[0]] *", "value s = re.sub(r\"\\${(\\w+)}\", lambda x: os.environ[x.group(1)], s) return yaml.load(s)", "= str.upper() mace_check(str in [e.name for e in DataFormat], \"unknown", "\"change_concat_ranges\" winograd = \"winograd\" cl_mem_type = \"cl_mem_type\" data_type = \"data_type\"", "# parse output conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] = [str(i) for", "i in conf[ModelKeys.output_tensors]] output_count = len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] = [parse_int_array(shape) for", "tensors\") conf[ModelKeys.output_data_formats] = output_data_formats if ModelKeys.check_tensors in conf: conf[ModelKeys.check_tensors] =", "conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize in conf", "in [e.name for e in Platform], \"unknown platform %s\" %", "return mace_pb2.DT_FLOAT elif str == 'bf16_fp32': return mace_pb2.DT_BFLOAT16 else: return", "'inline', 'int', 'long', 'module', 'mutable', 'namespace', 'new', 'noexcept', 'not', 'not_eq',", "and input_count > 1: input_data_types = [input_data_types[0]] * input_count mace_check(len(input_data_types)", "may obtain a copy of the License at # #", "[\"NHWC\"]))] if len(output_data_formats) == 1 and output_count > 1: output_data_formats", "for e in Platform], \"unknown platform %s\" % str) return", "\"the same as input tensors\") conf[ModelKeys.input_ranges] = input_ranges # parse", "'include', 'line', 'error', 'pragma', ] def sanitize_load(s): # do not", "return mace_pb2.DT_FLOAT elif str == \"int32\": return mace_pb2.DT_INT32 else: mace_check(False,", "is 0: return [1] return [int(x) for x in xs.split(\",\")]", "'module', 'mutable', 'namespace', 'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or',", "%s\" % str) return DataFormat[str] class DeviceType(Enum): CPU = 0", "= [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if len(input_data_types) ==", "and # limitations under the License. from __future__ import absolute_import", "%s\" % str) return DEVICE_MAP[str] class Platform(Enum): TENSORFLOW = 0", "def parse(path): with open(path) as f: config = sanitize_load(f.read()) return", "\"float32\": return mace_pb2.DT_FLOAT elif str == \"int32\": return mace_pb2.DT_INT32 else:", "quantize_large_weights = \"quantize_large_weights\" quantize_stat = \"quantize_stat\" change_concat_ranges = \"change_concat_ranges\" winograd", "conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "let yaml parse ON/OFF to boolean for w in [\"ON\",", "in conf: conf[ModelKeys.data_type] = parse_internal_data_type( conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type] = mace_pb2.DT_HALF", "ModelKeys.check_tensors in conf: conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] = [parse_int_array(shape) for", "# do not let yaml parse ON/OFF to boolean for", "CAFFE = 1 ONNX = 2 MEGENGINE = 3 def", "* input_count mace_check(len(input_data_formats) == input_count, \"the number of input_data_formats should", "= re.sub(r\":\\s+\" + w + \"$\", r\": '\" + w", "may not use this file except in compliance with the", "w + \"$\", r\": '\" + w + \"'\", s)", "format %s\" % str) return DataFormat[str] class DeviceType(Enum): CPU =", "platform %s\" % str) return Platform[str] DATA_TYPE_MAP = { 'float32':", "= input_data_formats input_ranges = [parse_float_array(r) for r in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))]", "'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace', 'new', 'noexcept', 'not',", "parse_data_format(str): str = str.upper() mace_check(str in [e.name for e in", "[float(x) for x in xs.split(\",\")] def normalize_model_config(conf): conf = copy.deepcopy(conf)", "'synchronized', 'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename',", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "as input tensors\") conf[ModelKeys.input_data_types] = input_data_types input_data_formats = [parse_data_format(df) for", "utils.util import MaceLogger from py_proto import mace_pb2 CPP_KEYWORDS = [", "import absolute_import from __future__ import division from __future__ import print_function", "= 1000 def parse_data_format(str): str = str.upper() mace_check(str in [e.name", "= to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] = [str(i) for i in conf[ModelKeys.output_tensors]] output_count", "e in DataFormat], \"unknown data format %s\" % str) return", "= { 'float32': mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32, } def parse_data_type(str): if", "same as output tensors\") conf[ModelKeys.output_data_types] = output_data_types output_data_formats = [parse_data_format(df)", "in [\"ON\", \"OFF\", \"on\", \"off\"]: s = re.sub(r\":\\s+\" + w", "\"input_tensors\" input_shapes = \"input_shapes\" input_data_types = \"input_data_types\" input_data_formats = \"input_data_formats\"", "= \"weight_file_path\" weight_sha256_checksum = \"weight_sha256_checksum\" quantize_range_file = \"quantize_range_file\" quantize =", "return conf[\"devices\"] class ModelKeys(object): platform = \"platform\" runtime = \"runtime\"", "weight_sha256_checksum = \"weight_sha256_checksum\" quantize_range_file = \"quantize_range_file\" quantize = \"quantize\" quantize_schema", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "\"'\", s) # sub ${} to env value s =", "import division from __future__ import print_function import re import os", "'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float', 'for', 'friend',", "# # Licensed under the Apache License, Version 2.0 (the", "in xs.split(\",\")] def normalize_model_config(conf): conf = copy.deepcopy(conf) if ModelKeys.subgraphs in", "to boolean for w in [\"ON\", \"OFF\", \"on\", \"off\"]: s", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "CPP_KEYWORDS = [ 'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel', 'atomic_commit',", "normalize_model_config(conf): conf = copy.deepcopy(conf) if ModelKeys.subgraphs in conf: subgraph =", "division from __future__ import print_function import re import os import", "elif str == \"int32\": return mace_pb2.DT_INT32 else: mace_check(False, \"data type", "DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON, \"hta\": DeviceType.HTA, \"apu\": DeviceType.APU, \"cpu+gpu\":", "1: output_data_formats = [output_data_formats[0]] * output_count mace_check(len(output_data_formats) == output_count, \"the", "output_shapes = \"output_shapes\" output_data_types = \"output_data_types\" output_data_formats = \"output_data_formats\" check_tensors", "1 and input_count > 1: input_data_formats = [input_data_formats[0]] * input_count", "= \"input_data_types\" input_data_formats = \"input_data_formats\" input_ranges = \"input_ranges\" output_tensors =", "output_count, \"the number of output_data_formats should be \" \"the same", "[str(i) for i in conf[ModelKeys.input_tensors]] input_count = len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] =", "and output_count > 1: output_data_types = [output_data_types[0]] * output_count mace_check(len(output_data_types)", "config def parse_device_info(path): conf = parse(path) return conf[\"devices\"] class ModelKeys(object):", "input_ranges = [parse_float_array(r) for r in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if len(input_ranges)", "= 2 HWIO = 100 OIHW = 101 HWOI =", "= parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:", "% str) return DataFormat[str] class DeviceType(Enum): CPU = 0 GPU", "len(output_data_types) == 1 and output_count > 1: output_data_types = [output_data_types[0]]", "x else: return [x] def parse_int_array(xs): if len(xs) is 0:", "= \"check_shapes\" model_file_path = \"model_file_path\" model_sha256_checksum = \"model_sha256_checksum\" weight_file_path =", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "[\"float32\"]))] if len(input_data_types) == 1 and input_count > 1: input_data_types", "of output_data_formats should be \" \"the same as output tensors\")", "graph_optimize_options = \"graph_optimize_options\" input_tensors = \"input_tensors\" input_shapes = \"input_shapes\" input_data_types", "= to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] = [str(i) for i in conf[ModelKeys.input_tensors]] input_count", "101 HWOI = 102 OHWI = 103 AUTO = 1000", "to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if len(output_data_formats) == 1 and output_count > 1:", "'friend', 'goto', 'if', 'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',", "= [output_data_formats[0]] * output_count mace_check(len(output_data_formats) == output_count, \"the number of", "2 MEGENGINE = 3 def parse_platform(str): str = str.upper() mace_check(str", "[parse_data_format(df) for df in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if len(input_data_formats) == 1", "'float32': mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32, } def parse_data_type(str): if str ==", "= [str(i) for i in conf[ModelKeys.output_tensors]] output_count = len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes]", "'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern', 'false',", "\"check_shapes\" model_file_path = \"model_file_path\" model_sha256_checksum = \"model_sha256_checksum\" weight_file_path = \"weight_file_path\"", "input_data_types = [input_data_types[0]] * input_count mace_check(len(input_data_types) == input_count, \"the number", "[input_ranges[0]] * input_count mace_check(len(input_ranges) == input_count, \"the number of input_ranges", "Enum from utils.util import mace_check from utils.util import MaceLogger from", "'static_assert', 'static_cast', 'struct', 'switch', 'synchronized', 'template', 'this', 'thread_local', 'throw', 'true',", "'catch', 'char', 'char16_t', 'char32_t', 'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',", "* input_count mace_check(len(input_data_types) == input_count, \"the number of input_data_types should", "\"the number of output_data_types should be \" \"the same as", "enum import Enum from utils.util import mace_check from utils.util import", "conf.update(subgraph) conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform]) conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime]) if ModelKeys.quantize in", "\"the same as input tensors\") conf[ModelKeys.input_data_types] = input_data_types input_data_formats =", "'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',", "\"output_data_types\" output_data_formats = \"output_data_formats\" check_tensors = \"check_tensors\" check_shapes = \"check_shapes\"", "\"output node count and shape count do not match\") output_data_types", "config = sanitize_load(f.read()) return config def parse_device_info(path): conf = parse(path)", "= [parse_data_format(df) for df in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if len(input_data_formats) ==", "conf[ModelKeys.check_shapes]), \"check tensors count and shape count do not match.\")", "the License. from __future__ import absolute_import from __future__ import division", "CPU_GPU = 100 DEVICE_MAP = { \"cpu\": DeviceType.CPU, \"gpu\": DeviceType.GPU,", "'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq', 'private', 'protected', 'public',", "for shape in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) == len( conf[ModelKeys.check_shapes]), \"check tensors", "'short', 'signed', 'sizeof', 'static', 'static_assert', 'static_cast', 'struct', 'switch', 'synchronized', 'template',", "= \"input_ranges\" output_tensors = \"output_tensors\" output_shapes = \"output_shapes\" output_data_types =", "'if', 'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace', 'new', 'noexcept',", "'explicit', 'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if', 'import',", "quantize_stat = \"quantize_stat\" change_concat_ranges = \"change_concat_ranges\" winograd = \"winograd\" cl_mem_type", "re.sub(r\":\\s+\" + w + \"$\", r\": '\" + w +", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "{ 'float32': mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32, } def parse_data_type(str): if str", "\"model_sha256_checksum\" weight_file_path = \"weight_file_path\" weight_sha256_checksum = \"weight_sha256_checksum\" quantize_range_file = \"quantize_range_file\"", "input_data_formats input_ranges = [parse_float_array(r) for r in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if", "output_data_types output_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if", "2 HEXAGON = 3 HTA = 4 APU = 5", "from utils.util import MaceLogger from py_proto import mace_pb2 CPP_KEYWORDS =", "\"quantize_large_weights\" quantize_stat = \"quantize_stat\" change_concat_ranges = \"change_concat_ranges\" winograd = \"winograd\"", "limitations under the License. from __future__ import absolute_import from __future__", "3 HTA = 4 APU = 5 CPU_GPU = 100", "if len(input_data_types) == 1 and input_count > 1: input_data_types =", "'fp32_fp32': return mace_pb2.DT_FLOAT elif str == 'bf16_fp32': return mace_pb2.DT_BFLOAT16 else:", "'or', 'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'requires', 'return', 'short',", "] def sanitize_load(s): # do not let yaml parse ON/OFF", "'co_yield', 'decltype', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',", "'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float', 'for',", "else: return [x] def parse_int_array(xs): if len(xs) is 0: return", "same as input tensors\") conf[ModelKeys.input_data_types] = input_data_types input_data_formats = [parse_data_format(df)", "'elif', 'else', 'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include', 'line',", "\"quantize_range_file\" quantize = \"quantize\" quantize_schema = \"quantize_schema\" quantize_large_weights = \"quantize_large_weights\"", "of input_data_types should be \" \"the same as input tensors\")", "if len(xs) is 0: return [1] return [int(x) for x", "or implied. # See the License for the specific language", "governing permissions and # limitations under the License. from __future__", "Rights Reserved. # # Licensed under the Apache License, Version", "= len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors])", "'char', 'char16_t', 'char32_t', 'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast', 'continue',", "Platform], \"unknown platform %s\" % str) return Platform[str] DATA_TYPE_MAP =", "r in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if len(input_ranges) == 1 and input_count", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "[ 'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel', 'atomic_commit', 'atomic_noexcept', 'auto',", "conf: conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in", "xs.split(\",\")] def normalize_model_config(conf): conf = copy.deepcopy(conf) if ModelKeys.subgraphs in conf:", "= \"quantize_large_weights\" quantize_stat = \"quantize_stat\" change_concat_ranges = \"change_concat_ranges\" winograd =", "HEXAGON = 3 HTA = 4 APU = 5 CPU_GPU", "\"unknown device %s\" % str) return DEVICE_MAP[str] class Platform(Enum): TENSORFLOW", "str.upper() mace_check(str in [e.name for e in Platform], \"unknown platform", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= \"model_file_path\" model_sha256_checksum = \"model_sha256_checksum\" weight_file_path = \"weight_file_path\" weight_sha256_checksum =", "= 3 def parse_platform(str): str = str.upper() mace_check(str in [e.name", "parse(path): with open(path) as f: config = sanitize_load(f.read()) return config", "= parse(path) return conf[\"devices\"] class ModelKeys(object): platform = \"platform\" runtime", "% str) def parse_internal_data_type(str): if str == 'fp32_fp32': return mace_pb2.DT_FLOAT", "in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if len(output_data_types) == 1 and output_count >", "data_type = \"data_type\" subgraphs = \"subgraphs\" validation_inputs_data = \"validation_inputs_data\" class", "if len(output_data_types) == 1 and output_count > 1: output_data_types =", "winograd = \"winograd\" cl_mem_type = \"cl_mem_type\" data_type = \"data_type\" subgraphs", "\"the number of input_data_formats should be \" \"the same as", "\" \"the same as input tensors\") conf[ModelKeys.input_data_formats] = input_data_formats input_ranges", "'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final', 'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif',", "parse_float_array(xs): return [float(x) for x in xs.split(\",\")] def normalize_model_config(conf): conf", "== 'fp32_fp32': return mace_pb2.DT_FLOAT elif str == 'bf16_fp32': return mace_pb2.DT_BFLOAT16", "input_ranges # parse output conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] = [str(i)", "AUTO = 1000 def parse_data_format(str): str = str.upper() mace_check(str in", "%s\" % str) return Platform[str] DATA_TYPE_MAP = { 'float32': mace_pb2.DT_FLOAT,", "(the \"License\"); # you may not use this file except", "parse_internal_data_type(str): if str == 'fp32_fp32': return mace_pb2.DT_FLOAT elif str ==", "# you may not use this file except in compliance", "count and shape count do not match\") input_data_types = [parse_data_type(dt)", "should be \" \"the same as input tensors\") conf[ModelKeys.input_ranges] =", "conf: subgraph = conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])", "= { \"cpu\": DeviceType.CPU, \"gpu\": DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON,", "TENSORFLOW = 0 CAFFE = 1 ONNX = 2 MEGENGINE", "'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'requires', 'return', 'short', 'signed',", "== \"float32\": return mace_pb2.DT_FLOAT elif str == \"int32\": return mace_pb2.DT_INT32", "= [ 'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel', 'atomic_commit', 'atomic_noexcept',", "len(input_data_types) == 1 and input_count > 1: input_data_types = [input_data_types[0]]", "'bitor', 'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t', 'class', 'compl',", "'switch', 'synchronized', 'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef', 'typeid',", "0 GPU = 2 HEXAGON = 3 HTA = 4", "'decltype', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export',", "= [str(i) for i in conf[ModelKeys.input_tensors]] input_count = len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes]", "= len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.input_shapes])] mace_check(", "${} to env value s = re.sub(r\"\\${(\\w+)}\", lambda x: os.environ[x.group(1)],", "not match\") output_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))]", "permissions and # limitations under the License. from __future__ import", "in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if len(output_data_formats) == 1 and output_count >", "'compl', 'concept', 'const', 'constexpr', 'const_cast', 'continue', 'co_await', 'co_return', 'co_yield', 'decltype',", "mace_check(len(conf[ModelKeys.output_tensors]) == output_count, \"output node count and shape count do", "# sub ${} to env value s = re.sub(r\"\\${(\\w+)}\", lambda", "as f: config = sanitize_load(f.read()) return config def parse_device_info(path): conf", "= \"input_data_formats\" input_ranges = \"input_ranges\" output_tensors = \"output_tensors\" output_shapes =", "100 DEVICE_MAP = { \"cpu\": DeviceType.CPU, \"gpu\": DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON,", "== len( conf[ModelKeys.check_shapes]), \"check tensors count and shape count do", "# # Unless required by applicable law or agreed to", "str = str.upper() mace_check(str in [e.name for e in Platform],", "= 5 CPU_GPU = 100 DEVICE_MAP = { \"cpu\": DeviceType.CPU,", "import Enum from utils.util import mace_check from utils.util import MaceLogger", "in DataFormat], \"unknown data format %s\" % str) return DataFormat[str]", "'false', 'float', 'for', 'friend', 'goto', 'if', 'import', 'inline', 'int', 'long',", "not let yaml parse ON/OFF to boolean for w in", "for r in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if len(input_ranges) == 1 and", "w in [\"ON\", \"OFF\", \"on\", \"off\"]: s = re.sub(r\":\\s+\" +", "'asm', 'atomic_cancel', 'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor', 'bool', 'break', 'case',", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void', 'volatile',", "[parse_int_array(shape) for shape in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) == output_count, \"output node", "'and_eq', 'asm', 'atomic_cancel', 'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor', 'bool', 'break',", "DeviceType.CPU, \"gpu\": DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON, \"hta\": DeviceType.HTA, \"apu\":", "from enum import Enum from utils.util import mace_check from utils.util", "Version 2.0 (the \"License\"); # you may not use this", "mace_pb2.DT_HALF # parse input conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] = [str(i)", "'this', 'thread_local', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned',", "2019 The MACE Authors. All Rights Reserved. # # Licensed", "def parse_platform(str): str = str.upper() mace_check(str in [e.name for e", "= mace_pb2.DT_FLOAT else: if ModelKeys.data_type in conf: conf[ModelKeys.data_type] = parse_internal_data_type(", "NHWC = 1 NCHW = 2 HWIO = 100 OIHW", "'undef', 'include', 'line', 'error', 'pragma', ] def sanitize_load(s): # do", "supported\" % str) def parse_internal_data_type(str): if str == 'fp32_fp32': return", "'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq', 'private', 'protected',", "output_data_formats if ModelKeys.check_tensors in conf: conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] =", "dt in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if len(output_data_types) == 1 and output_count", "and conf[ModelKeys.quantize] == 1: conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT else: if ModelKeys.data_type", "__future__ import absolute_import from __future__ import division from __future__ import", "'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern',", "implied. # See the License for the specific language governing", "re import os import copy import yaml from enum import", "return [int(x) for x in xs.split(\",\")] def parse_float_array(xs): return [float(x)", "'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor', 'bool', 'break', 'case', 'catch', 'char',", "== 'bf16_fp32': return mace_pb2.DT_BFLOAT16 else: return mace_pb2.DT_HALF def to_list(x): if", "under the Apache License, Version 2.0 (the \"License\"); # you", "ModelKeys.subgraphs in conf: subgraph = conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs] conf.update(subgraph) conf[ModelKeys.platform]", "= \"subgraphs\" validation_inputs_data = \"validation_inputs_data\" class DataFormat(Enum): NONE = 0", "input_count, \"the number of input_data_formats should be \" \"the same", "in [e.name for e in DataFormat], \"unknown data format %s\"", "DeviceType.HEXAGON, \"hta\": DeviceType.HTA, \"apu\": DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU } def parse_device_type(str):", "} def parse_data_type(str): if str == \"float32\": return mace_pb2.DT_FLOAT elif", "[parse_data_format(df) for df in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if len(output_data_formats) == 1", "in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) == output_count, \"output node count and shape", "License. from __future__ import absolute_import from __future__ import division from", "'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t',", "mace_pb2.DT_HALF def to_list(x): if isinstance(x, list): return x else: return", "by applicable law or agreed to in writing, software #", "__future__ import print_function import re import os import copy import", "len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes])", "return DataFormat[str] class DeviceType(Enum): CPU = 0 GPU = 2", "= 1 ONNX = 2 MEGENGINE = 3 def parse_platform(str):", "str == \"float32\": return mace_pb2.DT_FLOAT elif str == \"int32\": return", "= [parse_int_array(shape) for shape in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) == len( conf[ModelKeys.check_shapes]),", "MACE Authors. All Rights Reserved. # # Licensed under the", "mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32, } def parse_data_type(str): if str == \"float32\":", "lambda x: os.environ[x.group(1)], s) return yaml.load(s) def parse(path): with open(path)", "conf[ModelKeys.output_tensors]] output_count = len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in", "shape in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) == len( conf[ModelKeys.check_shapes]), \"check tensors count", "as output tensors\") conf[ModelKeys.output_data_formats] = output_data_formats if ModelKeys.check_tensors in conf:", "'static', 'static_assert', 'static_cast', 'struct', 'switch', 'synchronized', 'template', 'this', 'thread_local', 'throw',", "dt in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if len(input_data_types) == 1 and input_count", "= [parse_data_format(df) for df in to_list(conf.get(ModelKeys.output_data_formats, [\"NHWC\"]))] if len(output_data_formats) ==", "[e.name for e in DataFormat], \"unknown data format %s\" %", "and input_count > 1: input_data_formats = [input_data_formats[0]] * input_count mace_check(len(input_data_formats)", "HWIO = 100 OIHW = 101 HWOI = 102 OHWI", "len(conf[ModelKeys.input_shapes]) == input_count, \"input node count and shape count do", "be \" \"the same as output tensors\") conf[ModelKeys.output_data_formats] = output_data_formats", "\"the same as output tensors\") conf[ModelKeys.output_data_formats] = output_data_formats if ModelKeys.check_tensors", "'case', 'catch', 'char', 'char16_t', 'char32_t', 'class', 'compl', 'concept', 'const', 'constexpr',", "1000 def parse_data_format(str): str = str.upper() mace_check(str in [e.name for", "check_shapes = \"check_shapes\" model_file_path = \"model_file_path\" model_sha256_checksum = \"model_sha256_checksum\" weight_file_path", "input_ranges should be \" \"the same as input tensors\") conf[ModelKeys.input_ranges]", "= output_data_formats if ModelKeys.check_tensors in conf: conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes]", "conf[ModelKeys.output_data_types] = output_data_types output_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.output_data_formats,", "return config def parse_device_info(path): conf = parse(path) return conf[\"devices\"] class", "'return', 'short', 'signed', 'sizeof', 'static', 'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',", "parse input conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] = [str(i) for i", "for x in xs.split(\",\")] def parse_float_array(xs): return [float(x) for x", "output_count > 1: output_data_formats = [output_data_formats[0]] * output_count mace_check(len(output_data_formats) ==", "parse ON/OFF to boolean for w in [\"ON\", \"OFF\", \"on\",", "w + \"'\", s) # sub ${} to env value", "'typename', 'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor',", "ONNX = 2 MEGENGINE = 3 def parse_platform(str): str =", "mace_check(len(conf[ModelKeys.check_tensors]) == len( conf[ModelKeys.check_shapes]), \"check tensors count and shape count", "{ \"cpu\": DeviceType.CPU, \"gpu\": DeviceType.GPU, \"hexagon\": DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON, \"hta\":", "conf[\"devices\"] class ModelKeys(object): platform = \"platform\" runtime = \"runtime\" models", "'float', 'for', 'friend', 'goto', 'if', 'import', 'inline', 'int', 'long', 'module',", "return Platform[str] DATA_TYPE_MAP = { 'float32': mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32, }", "else: conf[ModelKeys.data_type] = mace_pb2.DT_HALF # parse input conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])", "1 NCHW = 2 HWIO = 100 OIHW = 101", "number of input_ranges should be \" \"the same as input", "match\") output_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if", "'signed', 'sizeof', 'static', 'static_assert', 'static_cast', 'struct', 'switch', 'synchronized', 'template', 'this',", "output_data_types should be \" \"the same as output tensors\") conf[ModelKeys.output_data_types]", "not supported\" % str) def parse_internal_data_type(str): if str == 'fp32_fp32':", "elif str == 'bf16_fp32': return mace_pb2.DT_BFLOAT16 else: return mace_pb2.DT_HALF def", "mace_pb2.DT_INT32, } def parse_data_type(str): if str == \"float32\": return mace_pb2.DT_FLOAT", "= 4 APU = 5 CPU_GPU = 100 DEVICE_MAP =", "[str(i) for i in conf[ModelKeys.output_tensors]] output_count = len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] =", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "py_proto import mace_pb2 CPP_KEYWORDS = [ 'alignas', 'alignof', 'and', 'and_eq',", "Unless required by applicable law or agreed to in writing,", "'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float',", "[parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if len(input_data_types) == 1", "\"weight_file_path\" weight_sha256_checksum = \"weight_sha256_checksum\" quantize_range_file = \"quantize_range_file\" quantize = \"quantize\"", "input_data_formats = [input_data_formats[0]] * input_count mace_check(len(input_data_formats) == input_count, \"the number", "if str == 'fp32_fp32': return mace_pb2.DT_FLOAT elif str == 'bf16_fp32':", "in Platform], \"unknown platform %s\" % str) return Platform[str] DATA_TYPE_MAP", "'private', 'protected', 'public', 'register', 'reinterpret_cast', 'requires', 'return', 'short', 'signed', 'sizeof',", "'const_cast', 'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default', 'delete', 'do', 'double',", "= re.sub(r\"\\${(\\w+)}\", lambda x: os.environ[x.group(1)], s) return yaml.load(s) def parse(path):", "input_data_types = \"input_data_types\" input_data_formats = \"input_data_formats\" input_ranges = \"input_ranges\" output_tensors", "the specific language governing permissions and # limitations under the", "[int(x) for x in xs.split(\",\")] def parse_float_array(xs): return [float(x) for", "'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final', 'transaction_safe', 'transaction_safe_dynamic',", "DATA_TYPE_MAP = { 'float32': mace_pb2.DT_FLOAT, 'int32': mace_pb2.DT_INT32, } def parse_data_type(str):", "'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',", "input_data_types should be \" \"the same as input tensors\") conf[ModelKeys.input_data_types]", "def parse_float_array(xs): return [float(x) for x in xs.split(\",\")] def normalize_model_config(conf):", "applicable law or agreed to in writing, software # distributed", "to env value s = re.sub(r\"\\${(\\w+)}\", lambda x: os.environ[x.group(1)], s)", "weight_file_path = \"weight_file_path\" weight_sha256_checksum = \"weight_sha256_checksum\" quantize_range_file = \"quantize_range_file\" quantize", "= 100 OIHW = 101 HWOI = 102 OHWI =", "if isinstance(x, list): return x else: return [x] def parse_int_array(xs):", "parse_platform(str): str = str.upper() mace_check(str in [e.name for e in", "copy.deepcopy(conf) if ModelKeys.subgraphs in conf: subgraph = conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs]", "from __future__ import division from __future__ import print_function import re", "= \"graph_optimize_options\" input_tensors = \"input_tensors\" input_shapes = \"input_shapes\" input_data_types =", "'constexpr', 'const_cast', 'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default', 'delete', 'do',", "102 OHWI = 103 AUTO = 1000 def parse_data_format(str): str", "f: config = sanitize_load(f.read()) return config def parse_device_info(path): conf =", "100 OIHW = 101 HWOI = 102 OHWI = 103", "\" \"the same as input tensors\") conf[ModelKeys.input_data_types] = input_data_types input_data_formats", "len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) ==", "\"int32\": return mace_pb2.DT_INT32 else: mace_check(False, \"data type %s not supported\"", "[output_data_types[0]] * output_count mace_check(len(output_data_types) == output_count, \"the number of output_data_types", "str) def parse_internal_data_type(str): if str == 'fp32_fp32': return mace_pb2.DT_FLOAT elif", "match\") input_data_types = [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if", "input tensors\") conf[ModelKeys.input_ranges] = input_ranges # parse output conf[ModelKeys.output_tensors] =", "'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t', 'class', 'compl', 'concept',", "input_count mace_check(len(input_data_types) == input_count, \"the number of input_data_types should be", "== input_count, \"the number of input_data_formats should be \" \"the", "output_data_formats should be \" \"the same as output tensors\") conf[ModelKeys.output_data_formats]", "in writing, software # distributed under the License is distributed", "'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while',", "conf[ModelKeys.output_data_formats] = output_data_formats if ModelKeys.check_tensors in conf: conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])", "conf: conf[ModelKeys.data_type] = parse_internal_data_type( conf[ModelKeys.data_type]) else: conf[ModelKeys.data_type] = mace_pb2.DT_HALF #", "for x in xs.split(\",\")] def normalize_model_config(conf): conf = copy.deepcopy(conf) if", "input_shapes = \"input_shapes\" input_data_types = \"input_data_types\" input_data_formats = \"input_data_formats\" input_ranges", "for df in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if len(input_data_formats) == 1 and", "def parse_device_type(str): mace_check(str in DEVICE_MAP, \"unknown device %s\" % str)", "1 and output_count > 1: output_data_formats = [output_data_formats[0]] * output_count", "and output_count > 1: output_data_formats = [output_data_formats[0]] * output_count mace_check(len(output_data_formats)", "\"output_tensors\" output_shapes = \"output_shapes\" output_data_types = \"output_data_types\" output_data_formats = \"output_data_formats\"", "to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes]) == input_count, \"input node count and shape", "\"apu\": DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU } def parse_device_type(str): mace_check(str in DEVICE_MAP,", "'int32': mace_pb2.DT_INT32, } def parse_data_type(str): if str == \"float32\": return", "be \" \"the same as input tensors\") conf[ModelKeys.input_data_types] = input_data_types", "list): return x else: return [x] def parse_int_array(xs): if len(xs)", "to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) == output_count, \"output node count and shape count", "__future__ import division from __future__ import print_function import re import", "= \"quantize\" quantize_schema = \"quantize_schema\" quantize_large_weights = \"quantize_large_weights\" quantize_stat =", "else: return mace_pb2.DT_HALF def to_list(x): if isinstance(x, list): return x", "% str) return Platform[str] DATA_TYPE_MAP = { 'float32': mace_pb2.DT_FLOAT, 'int32':", "> 1: input_ranges = [input_ranges[0]] * input_count mace_check(len(input_ranges) == input_count,", "str == 'bf16_fp32': return mace_pb2.DT_BFLOAT16 else: return mace_pb2.DT_HALF def to_list(x):", "def parse_device_info(path): conf = parse(path) return conf[\"devices\"] class ModelKeys(object): platform", "\"validation_inputs_data\" class DataFormat(Enum): NONE = 0 NHWC = 1 NCHW", "+ \"'\", s) # sub ${} to env value s", "number of output_data_types should be \" \"the same as output", "\"platform\" runtime = \"runtime\" models = 'models' graph_optimize_options = \"graph_optimize_options\"", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "> 1: output_data_formats = [output_data_formats[0]] * output_count mace_check(len(output_data_formats) == output_count,", "'concept', 'const', 'constexpr', 'const_cast', 'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',", "License, Version 2.0 (the \"License\"); # you may not use", "import mace_check from utils.util import MaceLogger from py_proto import mace_pb2", "str) return DataFormat[str] class DeviceType(Enum): CPU = 0 GPU =", "to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if len(input_data_types) == 1 and input_count > 1:", "= to_list(conf[ModelKeys.check_tensors]) conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors])", "# You may obtain a copy of the License at", "\"winograd\" cl_mem_type = \"cl_mem_type\" data_type = \"data_type\" subgraphs = \"subgraphs\"", "in DEVICE_MAP, \"unknown device %s\" % str) return DEVICE_MAP[str] class", "== 1 and output_count > 1: output_data_types = [output_data_types[0]] *", "count do not match\") input_data_types = [parse_data_type(dt) for dt in", "\"input_data_formats\" input_ranges = \"input_ranges\" output_tensors = \"output_tensors\" output_shapes = \"output_shapes\"", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"check_tensors\" check_shapes = \"check_shapes\" model_file_path = \"model_file_path\" model_sha256_checksum = \"model_sha256_checksum\"", "1 and input_count > 1: input_ranges = [input_ranges[0]] * input_count", "== 1 and output_count > 1: output_data_formats = [output_data_formats[0]] *", "input_count mace_check(len(input_data_formats) == input_count, \"the number of input_data_formats should be", "# Copyright 2019 The MACE Authors. All Rights Reserved. #", "Authors. All Rights Reserved. # # Licensed under the Apache", "MEGENGINE = 3 def parse_platform(str): str = str.upper() mace_check(str in", "conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] = [str(i) for i in conf[ModelKeys.input_tensors]]", "'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel', 'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',", "'override', 'final', 'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else', 'endif', 'defined', 'ifdef',", "= \"quantize_schema\" quantize_large_weights = \"quantize_large_weights\" quantize_stat = \"quantize_stat\" change_concat_ranges =", "x in xs.split(\",\")] def normalize_model_config(conf): conf = copy.deepcopy(conf) if ModelKeys.subgraphs", "for w in [\"ON\", \"OFF\", \"on\", \"off\"]: s = re.sub(r\":\\s+\"", "return DEVICE_MAP[str] class Platform(Enum): TENSORFLOW = 0 CAFFE = 1", "should be \" \"the same as input tensors\") conf[ModelKeys.input_data_types] =", "# limitations under the License. from __future__ import absolute_import from", "mace_pb2.DT_INT32 else: mace_check(False, \"data type %s not supported\" % str)", "'reinterpret_cast', 'requires', 'return', 'short', 'signed', 'sizeof', 'static', 'static_assert', 'static_cast', 'struct',", "shape count do not match\") output_data_types = [parse_data_type(dt) for dt", "1 and output_count > 1: output_data_types = [output_data_types[0]] * output_count", "shape in to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes]) == input_count, \"input node count", "* output_count mace_check(len(output_data_formats) == output_count, \"the number of output_data_formats should", "same as input tensors\") conf[ModelKeys.input_ranges] = input_ranges # parse output", "the License for the specific language governing permissions and #", "> 1: output_data_types = [output_data_types[0]] * output_count mace_check(len(output_data_types) == output_count,", "import copy import yaml from enum import Enum from utils.util", "= sanitize_load(f.read()) return config def parse_device_info(path): conf = parse(path) return", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "tensors\") conf[ModelKeys.input_ranges] = input_ranges # parse output conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])", "data format %s\" % str) return DataFormat[str] class DeviceType(Enum): CPU", "mace_check(str in DEVICE_MAP, \"unknown device %s\" % str) return DEVICE_MAP[str]", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "DeviceType.CPU_GPU } def parse_device_type(str): mace_check(str in DEVICE_MAP, \"unknown device %s\"", "of input_data_formats should be \" \"the same as input tensors\")", "'error', 'pragma', ] def sanitize_load(s): # do not let yaml", "be \" \"the same as input tensors\") conf[ModelKeys.input_ranges] = input_ranges", "[\"-1.0,1.0\"]))] if len(input_ranges) == 1 and input_count > 1: input_ranges", "NONE = 0 NHWC = 1 NCHW = 2 HWIO", "'auto', 'bitand', 'bitor', 'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',", "DataFormat[str] class DeviceType(Enum): CPU = 0 GPU = 2 HEXAGON", "= \"platform\" runtime = \"runtime\" models = 'models' graph_optimize_options =", "else: mace_check(False, \"data type %s not supported\" % str) def", "'public', 'register', 'reinterpret_cast', 'requires', 'return', 'short', 'signed', 'sizeof', 'static', 'static_assert',", "input_data_formats = [parse_data_format(df) for df in to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if len(input_data_formats)", "as input tensors\") conf[ModelKeys.input_data_formats] = input_data_formats input_ranges = [parse_float_array(r) for", "= \"quantize_range_file\" quantize = \"quantize\" quantize_schema = \"quantize_schema\" quantize_large_weights =", "= \"validation_inputs_data\" class DataFormat(Enum): NONE = 0 NHWC = 1", "mace_check(len(input_data_formats) == input_count, \"the number of input_data_formats should be \"", "= [input_data_formats[0]] * input_count mace_check(len(input_data_formats) == input_count, \"the number of", "do not let yaml parse ON/OFF to boolean for w", "in conf[ModelKeys.input_tensors]] input_count = len(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape", "[e.name for e in Platform], \"unknown platform %s\" % str)", "= [parse_data_type(dt) for dt in to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if len(output_data_types) ==", "== 1 and input_count > 1: input_data_formats = [input_data_formats[0]] *", "conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] = [str(i) for i in conf[ModelKeys.output_tensors]]", "'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float', 'for', 'friend', 'goto',", "= \"output_data_formats\" check_tensors = \"check_tensors\" check_shapes = \"check_shapes\" model_file_path =", "parse output conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_tensors] = [str(i) for i", "input_count > 1: input_ranges = [input_ranges[0]] * input_count mace_check(len(input_ranges) ==", "to_list(conf.get(ModelKeys.output_data_types, [\"float32\"]))] if len(output_data_types) == 1 and output_count > 1:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "'atomic_cancel', 'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor', 'bool', 'break', 'case', 'catch',", "= [parse_int_array(shape) for shape in to_list(conf[ModelKeys.output_shapes])] mace_check(len(conf[ModelKeys.output_tensors]) == output_count, \"output", "'struct', 'switch', 'synchronized', 'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',", "'namespace', 'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq', 'private',", "return x else: return [x] def parse_int_array(xs): if len(xs) is", "[parse_float_array(r) for r in to_list(conf.get(ModelKeys.input_ranges, [\"-1.0,1.0\"]))] if len(input_ranges) == 1", "quantize_range_file = \"quantize_range_file\" quantize = \"quantize\" quantize_schema = \"quantize_schema\" quantize_large_weights", "'if', 'elif', 'else', 'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',", "APU = 5 CPU_GPU = 100 DEVICE_MAP = { \"cpu\":", "'pragma', ] def sanitize_load(s): # do not let yaml parse", "def sanitize_load(s): # do not let yaml parse ON/OFF to", "conf[ModelKeys.quantize] == 1: conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT else: if ModelKeys.data_type in", "'ifdef', 'ifndef', 'define', 'undef', 'include', 'line', 'error', 'pragma', ] def", "= [output_data_types[0]] * output_count mace_check(len(output_data_types) == output_count, \"the number of", "sanitize_load(f.read()) return config def parse_device_info(path): conf = parse(path) return conf[\"devices\"]", "if ModelKeys.subgraphs in conf: subgraph = conf[ModelKeys.subgraphs][0] del conf[ModelKeys.subgraphs] conf.update(subgraph)", "output_data_formats = [output_data_formats[0]] * output_count mace_check(len(output_data_formats) == output_count, \"the number", "def parse_int_array(xs): if len(xs) is 0: return [1] return [int(x)", "'co_await', 'co_return', 'co_yield', 'decltype', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else',", "sanitize_load(s): # do not let yaml parse ON/OFF to boolean", "copy import yaml from enum import Enum from utils.util import", "output_count = len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in to_list(conf[ModelKeys.output_shapes])]", "input conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors]) conf[ModelKeys.input_tensors] = [str(i) for i in", "absolute_import from __future__ import division from __future__ import print_function import", "\"model_file_path\" model_sha256_checksum = \"model_sha256_checksum\" weight_file_path = \"weight_file_path\" weight_sha256_checksum = \"weight_sha256_checksum\"", "'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default', 'delete', 'do', 'double', 'dynamic_cast',", "[1] return [int(x) for x in xs.split(\",\")] def parse_float_array(xs): return", "3 def parse_platform(str): str = str.upper() mace_check(str in [e.name for", "NCHW = 2 HWIO = 100 OIHW = 101 HWOI", "quantize_schema = \"quantize_schema\" quantize_large_weights = \"quantize_large_weights\" quantize_stat = \"quantize_stat\" change_concat_ranges", "\"License\"); # you may not use this file except in", "from utils.util import mace_check from utils.util import MaceLogger from py_proto", "= \"output_data_types\" output_data_formats = \"output_data_formats\" check_tensors = \"check_tensors\" check_shapes =", "== \"int32\": return mace_pb2.DT_INT32 else: mace_check(False, \"data type %s not", "'long', 'module', 'mutable', 'namespace', 'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator',", "open(path) as f: config = sanitize_load(f.read()) return config def parse_device_info(path):", "= 0 NHWC = 1 NCHW = 2 HWIO =", "input_count, \"input node count and shape count do not match\")", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "def parse_internal_data_type(str): if str == 'fp32_fp32': return mace_pb2.DT_FLOAT elif str", "input_count mace_check(len(input_ranges) == input_count, \"the number of input_ranges should be", "of output_data_types should be \" \"the same as output tensors\")", "[\"ON\", \"OFF\", \"on\", \"off\"]: s = re.sub(r\":\\s+\" + w +", "== input_count, \"the number of input_ranges should be \" \"the", "'not_eq', 'nullptr', 'operator', 'or', 'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast',", "x: os.environ[x.group(1)], s) return yaml.load(s) def parse(path): with open(path) as", "\"quantize_stat\" change_concat_ranges = \"change_concat_ranges\" winograd = \"winograd\" cl_mem_type = \"cl_mem_type\"", "\"hexagon\": DeviceType.HEXAGON, \"dsp\": DeviceType.HEXAGON, \"hta\": DeviceType.HTA, \"apu\": DeviceType.APU, \"cpu+gpu\": DeviceType.CPU_GPU", "'thread_local', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using',", "* output_count mace_check(len(output_data_types) == output_count, \"the number of output_data_types should", "input_data_formats = \"input_data_formats\" input_ranges = \"input_ranges\" output_tensors = \"output_tensors\" output_shapes", "# distributed under the License is distributed on an \"AS", "MaceLogger from py_proto import mace_pb2 CPP_KEYWORDS = [ 'alignas', 'alignof',", "= \"runtime\" models = 'models' graph_optimize_options = \"graph_optimize_options\" input_tensors =", "= copy.deepcopy(conf) if ModelKeys.subgraphs in conf: subgraph = conf[ModelKeys.subgraphs][0] del", "\"the number of input_data_types should be \" \"the same as", "to_list(conf.get(ModelKeys.input_data_formats, [\"NHWC\"]))] if len(input_data_formats) == 1 and input_count > 1:", "# Unless required by applicable law or agreed to in", "'break', 'case', 'catch', 'char', 'char16_t', 'char32_t', 'class', 'compl', 'concept', 'const',", "platform = \"platform\" runtime = \"runtime\" models = 'models' graph_optimize_options", "should be \" \"the same as output tensors\") conf[ModelKeys.output_data_types] =", "return mace_pb2.DT_HALF def to_list(x): if isinstance(x, list): return x else:", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "mace_pb2 CPP_KEYWORDS = [ 'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',", "input_count > 1: input_data_formats = [input_data_formats[0]] * input_count mace_check(len(input_data_formats) ==", "model_file_path = \"model_file_path\" model_sha256_checksum = \"model_sha256_checksum\" weight_file_path = \"weight_file_path\" weight_sha256_checksum", "output_data_types = [output_data_types[0]] * output_count mace_check(len(output_data_types) == output_count, \"the number", "tensors count and shape count do not match.\") MaceLogger.summary(conf) return", "= \"output_shapes\" output_data_types = \"output_data_types\" output_data_formats = \"output_data_formats\" check_tensors =", "DEVICE_MAP, \"unknown device %s\" % str) return DEVICE_MAP[str] class Platform(Enum):", "Platform(Enum): TENSORFLOW = 0 CAFFE = 1 ONNX = 2", "in to_list(conf[ModelKeys.check_shapes])] mace_check(len(conf[ModelKeys.check_tensors]) == len( conf[ModelKeys.check_shapes]), \"check tensors count and", "sub ${} to env value s = re.sub(r\"\\${(\\w+)}\", lambda x:", "You may obtain a copy of the License at #", "[input_data_formats[0]] * input_count mace_check(len(input_data_formats) == input_count, \"the number of input_data_formats", "count do not match\") output_data_types = [parse_data_type(dt) for dt in", "= \"change_concat_ranges\" winograd = \"winograd\" cl_mem_type = \"cl_mem_type\" data_type =", "return [float(x) for x in xs.split(\",\")] def normalize_model_config(conf): conf =", "output tensors\") conf[ModelKeys.output_data_formats] = output_data_formats if ModelKeys.check_tensors in conf: conf[ModelKeys.check_tensors]", "class Platform(Enum): TENSORFLOW = 0 CAFFE = 1 ONNX =", "input_count, \"the number of input_ranges should be \" \"the same", "input_ranges = \"input_ranges\" output_tensors = \"output_tensors\" output_shapes = \"output_shapes\" output_data_types", "for dt in to_list(conf.get(ModelKeys.input_data_types, [\"float32\"]))] if len(input_data_types) == 1 and", "the Apache License, Version 2.0 (the \"License\"); # you may", "if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1: conf[ModelKeys.data_type] =", "in conf[ModelKeys.output_tensors]] output_count = len(conf[ModelKeys.output_tensors]) conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape", "[output_data_formats[0]] * output_count mace_check(len(output_data_formats) == output_count, \"the number of output_data_formats", "'co_return', 'co_yield', 'decltype', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum',", "class DeviceType(Enum): CPU = 0 GPU = 2 HEXAGON =", "for shape in to_list(conf[ModelKeys.input_shapes])] mace_check( len(conf[ModelKeys.input_shapes]) == input_count, \"input node", "'extern', 'false', 'float', 'for', 'friend', 'goto', 'if', 'import', 'inline', 'int'," ]
[ "not None: MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities,", "max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test)", "prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None # Concat", "input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),", "from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType from deepNormalize.factories.customModelFactory import CustomModelFactory from", "2: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256,", "alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor =", "# Create datasets if dataset_configs.get(\"iSEG\", None) is not None: iSEG_train,", "patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"],", "model_trainers, dataloaders, reconstruction_datasets, None, input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor, None, gt_reconstructor, run_config,", "copy of the License at # # https://opensource.org/licenses/MIT # #", "= None iSEG_CSV = None MRBrainS_train = None MRBrainS_CSV =", "source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size,", "= VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None, config_html)) visdom_logger(VisdomData(\"Experiment\",", "the data. train_datasets = list() valid_datasets = list() test_datasets =", "ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split,", "source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size,", "model_trainers = [model_trainers] # Create datasets if dataset_configs.get(\"iSEG\", None) is", "the License. # ============================================================================== import logging import multiprocessing import numpy", "config: config.to_html(), model_trainer_configs))] # Prepare the data. train_datasets = list()", "None else 0], y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"], params={\"opts\": {\"title\": \"Patch count\"}}))", "iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects,", "from torch.utils.data.dataloader import DataLoader from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType from", "import torch import torch.backends.cudnn as cudnn from kerosene.configs.configs import RunConfiguration,", "CustomModelFactory from deepNormalize.factories.customTrainerFactory import TrainerFactory from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory,", "Basic settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create", "iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step,", "valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\", None) is not None: MRBrainS_train, MRBrainS_valid,", "os.path.join(exp[0], exp[1], os.path.basename( os.path.normpath(visdom_config.env))) else: save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model),", "import logging import multiprocessing import numpy as np import os", "drop_last=True, pin_memory=True), [train_dataset, valid_dataset, test_dataset])) # Initialize the loggers. visdom_config", "DatasetConfiguration from kerosene.configs.parsers import YamlConfigurationParser from kerosene.loggers.visdom import PlotType, PlotFrequency", "list(map(lambda dataset: DataLoader(dataset, training_config.batch_size, sampler=None, shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True),", "Initialize the model trainers model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers =", "collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True), [train_dataset, valid_dataset, test_dataset])) # Initialize the loggers.", "= train_datasets[0] valid_dataset = valid_datasets[0] test_dataset = test_datasets[0] # Create", "= list() iSEG_train = None iSEG_CSV = None MRBrainS_train =", "if len(dataset_configs) > 1: train_dataset = torch.utils.data.ConcatDataset(train_datasets) valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)", "valid_datasets[0] test_dataset = test_datasets[0] # Create loaders. dataloaders = list(map(lambda", "ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]],", "patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50,", "All Rights Reserved. # # Licensed under the MIT License;", "None # Concat datasets. if len(dataset_configs) > 1: train_dataset =", "models=[model_trainers[0]], normalize_and_segment=True, batch_size=4) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1,", "use this file except in compliance with the License. #", "deepNormalize.utils.image_slicer import ImageReconstructor cudnn.benchmark = True cudnn.enabled = True np.random.seed(42)", "256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor", "len(list(dataset_configs.keys())) == 2: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1,", "None else 0, len(ABIDE_train) if ABIDE_train is not None else", "iSEG_train is not None else 0, len(MRBrainS_train) if MRBrainS_train is", "License. # ============================================================================== import logging import multiprocessing import numpy as", "= iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size,", "None: iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID,", "count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if iSEG_train is not None else", "import YamlConfigurationParser from kerosene.loggers.visdom import PlotType, PlotFrequency from kerosene.loggers.visdom.config import", "visdom_config.save_destination + os.path.join(exp[0], exp[1], os.path.basename( os.path.normpath(visdom_config.env))) else: save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env)))", "step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None", "from deepNormalize.factories.customTrainerFactory import TrainerFactory from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory", "models=[model_trainers[0]], segment=True, batch_size=8) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1,", "License. # You may obtain a copy of the License", "192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True, batch_size=4) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],", "ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites,", "\"visdom\") exp = args.config_file.split(\"/\")[-3:] if visdom_config.save_destination is not None: save_folder", "\"Generator\", \"Segmenter\"]] visdom_logger = VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH,", "# Initialize the loggers. visdom_config = VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp =", "train_datasets[0] valid_dataset = valid_datasets[0] test_dataset = test_datasets[0] # Create loaders.", "= None MRBrainS_train = None MRBrainS_CSV = None ABIDE_train =", "from torch.utils.data import DataLoader from torch.utils.data.dataloader import DataLoader from deepNormalize.config.parsers", "under the License is distributed on an \"AS IS\" BASIS,", "save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1], os.path.basename( os.path.normpath(visdom_config.env))) else: save_folder", "run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file) if", "valid_dataset = torch.utils.data.ConcatDataset(valid_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets) else: train_dataset = train_datasets[0]", "License for the specific language governing permissions and # limitations", "= None MRBrainS_augmentation_strategy = None ABIDE_augmentation_strategy = None # Initialize", "32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True,", "in [\"Discriminator\", \"Generator\", \"Segmenter\"]] visdom_logger = VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\",", "MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from deepNormalize.nn.criterions import CustomCriterionFactory from deepNormalize.utils.constants import *", "not None else 0, len(MRBrainS_train) if MRBrainS_train is not None", "ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50,", "None ABIDE_CSV = None iSEG_augmentation_strategy = None MRBrainS_augmentation_strategy = None", "PlotType, PlotFrequency from kerosene.loggers.visdom.config import VisdomConfiguration from kerosene.loggers.visdom.visdom import VisdomLogger,", "trainers model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers = model_trainer_factory.create(model_trainer_configs) if not", "test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys())) ==", "governing permissions and # limitations under the License. # ==============================================================================", "batch_size=50) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1,", "step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],", "is_ground_truth=True) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1,", "= YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),", "reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor", "trainer = TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders, reconstruction_datasets, None, input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor,", "from kerosene.loggers.visdom.config import VisdomConfiguration from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData from", "data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys())) == 2: segmentation_reconstructor", "snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None else:", "cudnn.enabled = True np.random.seed(42) random.seed(42) if __name__ == '__main__': #", "\"dataset\") dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}", "else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None else: segmentation_reconstructor =", "is not None else 0, len(MRBrainS_train) if MRBrainS_train is not", "32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"],", "iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment,", "if not isinstance(model_trainers, list): model_trainers = [model_trainers] # Create datasets", "in compliance with the License. # You may obtain a", "ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size,", "augmented_normalized_input_reconstructor = None else: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]],", "software # distributed under the License is distributed on an", "config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())), list(map(lambda config: config.to_html(),", "reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0],", "> 1: train_dataset = torch.utils.data.ConcatDataset(train_datasets) valid_dataset = torch.utils.data.ConcatDataset(valid_datasets) test_dataset =", "ArgsParserFactory, ArgsParserType from deepNormalize.factories.customModelFactory import CustomModelFactory from deepNormalize.factories.customTrainerFactory import TrainerFactory", "config.to_html(), model_trainer_configs))] # Prepare the data. train_datasets = list() valid_datasets", "x=[len(iSEG_train) if iSEG_train is not None else 0, len(MRBrainS_train) if", "PlotFrequency.EVERY_EPOCH, None, config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if", "None: save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1], os.path.basename( os.path.normpath(visdom_config.env))) else:", "augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32),", "= TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders, reconstruction_datasets, None, input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor, None,", "dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config)", "local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file) if not isinstance(model_trainer_configs, list):", "256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True, batch_size=4) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0],", "\"Experiment Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None, config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch count\", PlotType.BAR_PLOT,", "samitorch.inputs.utils import augmented_sample_collate from torch.utils.data import DataLoader from torch.utils.data.dataloader import", "= None else: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1,", "= test_datasets[0] # Create loaders. dataloaders = list(map(lambda dataset: DataLoader(dataset,", "192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0],", "settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create configurations.", "= {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()} data_augmentation_config =", "============================================================================== import logging import multiprocessing import numpy as np import", "VisdomData from kerosene.training.trainers import ModelTrainerFactory from samitorch.inputs.utils import augmented_sample_collate from", "is not None: ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path,", "the loggers. visdom_config = VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp = args.config_file.split(\"/\")[-3:] if", "deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from deepNormalize.nn.criterions import CustomCriterionFactory from", "and # limitations under the License. # ============================================================================== import logging", "valid_dataset = valid_datasets[0] test_dataset = test_datasets[0] # Create loaders. dataloaders", "random import torch import torch.backends.cudnn as cudnn from kerosene.configs.configs import", "32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True)", "MRBrainS_CSV = None ABIDE_train = None ABIDE_CSV = None iSEG_augmentation_strategy", "= None ABIDE_CSV = None iSEG_augmentation_strategy = None MRBrainS_augmentation_strategy =", "batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1, 32, 32,", "torch.utils.data.ConcatDataset(train_datasets) valid_dataset = torch.utils.data.ConcatDataset(valid_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets) else: train_dataset =", "256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]],", "logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create configurations. run_config", "if dataset_configs.get(\"MRBrainS\", None) is not None: MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction", "# # https://opensource.org/licenses/MIT # # Unless required by applicable law", "deepNormalize.nn.criterions import CustomCriterionFactory from deepNormalize.utils.constants import * from deepNormalize.utils.image_slicer import", "= model_trainer_factory.create(model_trainer_configs) if not isinstance(model_trainers, list): model_trainers = [model_trainers] #", "ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step,", "dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()} data_augmentation_config", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ABIDE_reconstruction._target_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step,", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "iSEG_train = None iSEG_CSV = None MRBrainS_train = None MRBrainS_CSV", "# # Licensed under the MIT License; # you may", "256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],", "the License. # You may obtain a copy of the", "for the specific language governing permissions and # limitations under", "deepNormalize.factories.customModelFactory import CustomModelFactory from deepNormalize.factories.customTrainerFactory import TrainerFactory from deepNormalize.inputs.datasets import", "to in writing, software # distributed under the License is", "0], y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"], params={\"opts\": {\"title\": \"Patch count\"}})) trainer =", "step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\", None)", "DataLoader from torch.utils.data.dataloader import DataLoader from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType", "ImageReconstructor cudnn.benchmark = True cudnn.enabled = True np.random.seed(42) random.seed(42) if", "input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32),", "ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256,", "# See the License for the specific language governing permissions", "np import os import random import torch import torch.backends.cudnn as", "datasets. if len(dataset_configs) > 1: train_dataset = torch.utils.data.ConcatDataset(train_datasets) valid_dataset =", "Licensed under the MIT License; # you may not use", "else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None # Concat datasets.", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "sampler=None, shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True), [train_dataset, valid_dataset, test_dataset])) #", "required by applicable law or agreed to in writing, software", "dataset_configs = YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs = {k: DatasetConfiguration(v) for k,", "segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32),", "__name__ == '__main__': # Basic settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "under the License. # ============================================================================== import logging import multiprocessing import", "YamlConfigurationParser.parse(args.config_file) if not isinstance(model_trainer_configs, list): model_trainer_configs = [model_trainer_configs] dataset_configs =", "= None # Initialize the model trainers model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),", "with the License. # You may obtain a copy of", "v, in dataset_configs.items()} data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html = [training_config.to_html(),", "ArgsParserType from deepNormalize.factories.customModelFactory import CustomModelFactory from deepNormalize.factories.customTrainerFactory import TrainerFactory from", "MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256, 192),", "= True cudnn.enabled = True np.random.seed(42) random.seed(42) if __name__ ==", "None MRBrainS_CSV = None ABIDE_train = None ABIDE_CSV = None", "# Prepare the data. train_datasets = list() valid_datasets = list()", "normalize_and_segment=True, batch_size=4) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32,", "batch_size=4) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32,", "dataloaders = list(map(lambda dataset: DataLoader(dataset, training_config.batch_size, sampler=None, shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate,", "max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid)", "YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs = {k: DatasetConfiguration(v) for k, v, in", "np.random.seed(42) random.seed(42) if __name__ == '__main__': # Basic settings logging.basicConfig(level=logging.INFO)", "[model_trainer_configs] dataset_configs = YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs = {k: DatasetConfiguration(v) for", "from deepNormalize.nn.criterions import CustomCriterionFactory from deepNormalize.utils.constants import * from deepNormalize.utils.image_slicer", "not isinstance(model_trainers, list): model_trainers = [model_trainers] # Create datasets if", "License; # you may not use this file except in", "compliance with the License. # You may obtain a copy", "MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment,", "agreed to in writing, software # distributed under the License", "max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train)", "reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"])", "distributed under the License is distributed on an \"AS IS\"", "else 0, len(MRBrainS_train) if MRBrainS_train is not None else 0,", "config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if iSEG_train is", "test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys())) == 2:", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "is not None else 0, len(ABIDE_train) if ABIDE_train is not", "# Create loaders. dataloaders = list(map(lambda dataset: DataLoader(dataset, training_config.batch_size, sampler=None,", "= ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers = model_trainer_factory.create(model_trainer_configs) if not isinstance(model_trainers, list):", "valid_datasets = list() test_datasets = list() reconstruction_datasets = list() iSEG_train", "not use this file except in compliance with the License.", "PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if iSEG_train is not None else 0, len(MRBrainS_train)", "-*- # Copyright 2019 <NAME>. All Rights Reserved. # #", "configurations. run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)", "deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType from deepNormalize.factories.customModelFactory import CustomModelFactory from deepNormalize.factories.customTrainerFactory", "loggers. visdom_config = VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp = args.config_file.split(\"/\")[-3:] if visdom_config.save_destination", "writing, software # distributed under the License is distributed on", "not None: ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities,", "you may not use this file except in compliance with", "\"ABIDE\"], params={\"opts\": {\"title\": \"Patch count\"}})) trainer = TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders,", "0, len(ABIDE_train) if ABIDE_train is not None else 0], y=[\"iSEG\",", "y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"], params={\"opts\": {\"title\": \"Patch count\"}})) trainer = TrainerFactory(training_config.trainer).create(training_config,", "True cudnn.enabled = True np.random.seed(42) random.seed(42) if __name__ == '__main__':", "max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction)", "in dataset_configs.items()} data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html = [training_config.to_html(), list(map(lambda", "[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256,", "* from deepNormalize.utils.image_slicer import ImageReconstructor cudnn.benchmark = True cudnn.enabled =", "CONDITIONS OF ANY KIND, either express or implied. # See", "test_dataset = test_datasets[0] # Create loaders. dataloaders = list(map(lambda dataset:", "= ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step,", "ABIDE_train = None ABIDE_CSV = None iSEG_augmentation_strategy = None MRBrainS_augmentation_strategy", "iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches,", "if iSEG_train is not None else 0, len(MRBrainS_train) if MRBrainS_train", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "# Licensed under the MIT License; # you may not", "list() reconstruction_datasets = list() iSEG_train = None iSEG_CSV = None", "config.to_html(), dataset_configs.values())), list(map(lambda config: config.to_html(), model_trainer_configs))] # Prepare the data.", "test_datasets[0] # Create loaders. dataloaders = list(map(lambda dataset: DataLoader(dataset, training_config.batch_size,", "params={\"opts\": {\"title\": \"Patch count\"}})) trainer = TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders, reconstruction_datasets,", "[model_trainers] # Create datasets if dataset_configs.get(\"iSEG\", None) is not None:", "Copyright 2019 <NAME>. All Rights Reserved. # # Licensed under", "test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\", None) is", "1: train_dataset = torch.utils.data.ConcatDataset(train_datasets) valid_dataset = torch.utils.data.ConcatDataset(valid_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets)", "32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True) if dataset_configs[\"iSEG\"].augment:", "amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file) if not isinstance(model_trainer_configs, list): model_trainer_configs", "training_config = YamlConfigurationParser.parse(args.config_file) if not isinstance(model_trainer_configs, list): model_trainer_configs = [model_trainer_configs]", "augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction)", "None: ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID,", "pin_memory=True), [train_dataset, valid_dataset, test_dataset])) # Initialize the loggers. visdom_config =", "test_dataset = torch.utils.data.ConcatDataset(test_datasets) else: train_dataset = train_datasets[0] valid_dataset = valid_datasets[0]", "visdom_logger(VisdomData(\"Experiment\", \"Patch count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if iSEG_train is not", "import TrainerFactory from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from deepNormalize.nn.criterions", "ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects,", "= visdom_config.save_destination + os.path.join(exp[0], exp[1], os.path.basename( os.path.normpath(visdom_config.env))) else: save_folder =", "if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32,", "augmented_sample_collate from torch.utils.data import DataLoader from torch.utils.data.dataloader import DataLoader from", "# Copyright 2019 <NAME>. All Rights Reserved. # # Licensed", "training_config.batch_size, sampler=None, shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True), [train_dataset, valid_dataset, test_dataset]))", "import VisdomLogger, VisdomData from kerosene.training.trainers import ModelTrainerFactory from samitorch.inputs.utils import", "OR CONDITIONS OF ANY KIND, either express or implied. #", "torch import torch.backends.cudnn as cudnn from kerosene.configs.configs import RunConfiguration, DatasetConfiguration", "import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from deepNormalize.nn.criterions import CustomCriterionFactory from deepNormalize.utils.constants", "step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1, 32,", "the License is distributed on an \"AS IS\" BASIS, #", "= torch.utils.data.ConcatDataset(valid_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets) else: train_dataset = train_datasets[0] valid_dataset", "import ImageReconstructor cudnn.benchmark = True cudnn.enabled = True np.random.seed(42) random.seed(42)", "batch_size=8) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256,", "is not None else 0], y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"], params={\"opts\": {\"title\":", "list): model_trainers = [model_trainers] # Create datasets if dataset_configs.get(\"iSEG\", None)", "test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\", None) is not None:", "= list() reconstruction_datasets = list() iSEG_train = None iSEG_CSV =", "None iSEG_CSV = None MRBrainS_train = None MRBrainS_CSV = None", "else: save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True) for model in", "import RunConfiguration, DatasetConfiguration from kerosene.configs.parsers import YamlConfigurationParser from kerosene.loggers.visdom import", "exp = args.config_file.split(\"/\")[-3:] if visdom_config.save_destination is not None: save_folder =", "segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),", "import random import torch import torch.backends.cudnn as cudnn from kerosene.configs.configs", "as cudnn from kerosene.configs.configs import RunConfiguration, DatasetConfiguration from kerosene.configs.parsers import", "if not isinstance(model_trainer_configs, list): model_trainer_configs = [model_trainer_configs] dataset_configs = YamlConfigurationParser.parse_section(args.config_file,", "License at # # https://opensource.org/licenses/MIT # # Unless required by", "import DataLoader from torch.utils.data.dataloader import DataLoader from deepNormalize.config.parsers import ArgsParserFactory,", "coding: utf-8 -*- # Copyright 2019 <NAME>. All Rights Reserved.", "= args.config_file.split(\"/\")[-3:] if visdom_config.save_destination is not None: save_folder = visdom_config.save_destination", "torch.set_num_interop_threads(multiprocessing.cpu_count()) args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create configurations. run_config = RunConfiguration(use_amp=args.use_amp,", "32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"],", "else 0, len(ABIDE_train) if ABIDE_train is not None else 0],", "law or agreed to in writing, software # distributed under", "[train_dataset, valid_dataset, test_dataset])) # Initialize the loggers. visdom_config = VisdomConfiguration.from_yml(args.config_file,", "list(map(lambda config: config.to_html(), dataset_configs.values())), list(map(lambda config: config.to_html(), model_trainer_configs))] # Prepare", "reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys())) == 2: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],", "visdom_config = VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp = args.config_file.split(\"/\")[-3:] if visdom_config.save_destination is", "RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file) if not isinstance(model_trainer_configs,", "num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True), [train_dataset, valid_dataset, test_dataset])) # Initialize the", "train_datasets = list() valid_datasets = list() test_datasets = list() reconstruction_datasets", "MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects,", "dataset_configs.items()} data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html = [training_config.to_html(), list(map(lambda config:", "192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor =", "import augmented_sample_collate from torch.utils.data import DataLoader from torch.utils.data.dataloader import DataLoader", "at # # https://opensource.org/licenses/MIT # # Unless required by applicable", "Rights Reserved. # # Licensed under the MIT License; #", "import ArgsParserFactory, ArgsParserType from deepNormalize.factories.customModelFactory import CustomModelFactory from deepNormalize.factories.customTrainerFactory import", "[os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True) for model in [\"Discriminator\", \"Generator\", \"Segmenter\"]] visdom_logger", "\"data_augmentation\") config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())), list(map(lambda config:", "test_dataset])) # Initialize the loggers. visdom_config = VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp", "multiprocessing import numpy as np import os import random import", "limitations under the License. # ============================================================================== import logging import multiprocessing", "32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor =", "model trainers model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers = model_trainer_factory.create(model_trainer_configs) if", "may obtain a copy of the License at # #", "train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys())) == 2: segmentation_reconstructor =", "192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1,", "for model in [\"Discriminator\", \"Generator\", \"Segmenter\"]] visdom_logger = VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\",", "if MRBrainS_train is not None else 0, len(ABIDE_train) if ABIDE_train", "None ABIDE_train = None ABIDE_CSV = None iSEG_augmentation_strategy = None", "exist_ok=True) for model in [\"Discriminator\", \"Generator\", \"Segmenter\"]] visdom_logger = VisdomLogger(visdom_config)", "= YamlConfigurationParser.parse(args.config_file) if not isinstance(model_trainer_configs, list): model_trainer_configs = [model_trainer_configs] dataset_configs", "= [model_trainers] # Create datasets if dataset_configs.get(\"iSEG\", None) is not", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "[training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())), list(map(lambda config: config.to_html(), model_trainer_configs))] #", "model_trainer_factory.create(model_trainer_configs) if not isinstance(model_trainers, list): model_trainers = [model_trainers] # Create", "None else 0, len(MRBrainS_train) if MRBrainS_train is not None else", "a copy of the License at # # https://opensource.org/licenses/MIT #", "under the MIT License; # you may not use this", "\"Patch count\"}})) trainer = TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders, reconstruction_datasets, None, input_reconstructor,", "may not use this file except in compliance with the", "train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\", None) is not None: MRBrainS_train,", "valid_dataset, test_dataset])) # Initialize the loggers. visdom_config = VisdomConfiguration.from_yml(args.config_file, \"visdom\")", "model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers = model_trainer_factory.create(model_trainer_configs) if not isinstance(model_trainers,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "the License at # # https://opensource.org/licenses/MIT # # Unless required", "kerosene.configs.configs import RunConfiguration, DatasetConfiguration from kerosene.configs.parsers import YamlConfigurationParser from kerosene.loggers.visdom", "this file except in compliance with the License. # You", "cudnn.benchmark = True cudnn.enabled = True np.random.seed(42) random.seed(42) if __name__", "256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor =", "is_ground_truth=True, batch_size=50) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size,", "256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size,", "https://opensource.org/licenses/MIT # # Unless required by applicable law or agreed", "model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file) if not isinstance(model_trainer_configs, list): model_trainer_configs =", "args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create configurations. run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank,", "test_datasets = list() reconstruction_datasets = list() iSEG_train = None iSEG_CSV", "test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\", None) is not", "192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True, batch_size=8) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],", "modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step,", "dataset_configs.values())), list(map(lambda config: config.to_html(), model_trainer_configs))] # Prepare the data. train_datasets", "VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp = args.config_file.split(\"/\")[-3:] if visdom_config.save_destination is not None:", "gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1, 32, 32, 32),", "from kerosene.loggers.visdom import PlotType, PlotFrequency from kerosene.loggers.visdom.config import VisdomConfiguration from", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),", "32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True, batch_size=4) input_reconstructor", "= ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1,", "loaders. dataloaders = list(map(lambda dataset: DataLoader(dataset, training_config.batch_size, sampler=None, shuffle=True, num_workers=args.num_workers,", "deepNormalize.utils.constants import * from deepNormalize.utils.image_slicer import ImageReconstructor cudnn.benchmark = True", "= [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())), list(map(lambda config: config.to_html(), model_trainer_configs))]", "\"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True) for model in [\"Discriminator\", \"Generator\", \"Segmenter\"]]", "[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50)", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),", "patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]],", "VisdomConfiguration from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData from kerosene.training.trainers import ModelTrainerFactory", "= ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment,", "ABIDE_CSV = None iSEG_augmentation_strategy = None MRBrainS_augmentation_strategy = None ABIDE_augmentation_strategy", "kerosene.training.trainers import ModelTrainerFactory from samitorch.inputs.utils import augmented_sample_collate from torch.utils.data import", "ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50)", "len(ABIDE_train) if ABIDE_train is not None else 0], y=[\"iSEG\", \"MRBrainS\",", "= YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs = {k: DatasetConfiguration(v) for k, v,", "import ModelTrainerFactory from samitorch.inputs.utils import augmented_sample_collate from torch.utils.data import DataLoader", "dataset_configs.get(\"MRBrainS\", None) is not None: MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction =", "None ABIDE_augmentation_strategy = None # Initialize the model trainers model_trainer_factory", "sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config)", "TrainerFactory from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from deepNormalize.nn.criterions import", "torch.utils.data.ConcatDataset(valid_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets) else: train_dataset = train_datasets[0] valid_dataset =", "MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step,", "None augmented_normalized_input_reconstructor = None # Concat datasets. if len(dataset_configs) >", "os.path.basename( os.path.normpath(visdom_config.env))) else: save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True) for", "if __name__ == '__main__': # Basic settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count())", "patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if", "[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor", "ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create configurations. run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs,", "DataLoader(dataset, training_config.batch_size, sampler=None, shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True), [train_dataset, valid_dataset,", "PlotFrequency from kerosene.loggers.visdom.config import VisdomConfiguration from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData", "Create loaders. dataloaders = list(map(lambda dataset: DataLoader(dataset, training_config.batch_size, sampler=None, shuffle=True,", "visdom_config.save_destination is not None: save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],", "ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256,", "# https://opensource.org/licenses/MIT # # Unless required by applicable law or", "= torch.utils.data.ConcatDataset(train_datasets) valid_dataset = torch.utils.data.ConcatDataset(valid_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets) else: train_dataset", "2019 <NAME>. All Rights Reserved. # # Licensed under the", "or implied. # See the License for the specific language", "source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step,", "train_dataset = train_datasets[0] valid_dataset = valid_datasets[0] test_dataset = test_datasets[0] #", "import torch.backends.cudnn as cudnn from kerosene.configs.configs import RunConfiguration, DatasetConfiguration from", "KIND, either express or implied. # See the License for", "<NAME>. All Rights Reserved. # # Licensed under the MIT", "specific language governing permissions and # limitations under the License.", "= None MRBrainS_CSV = None ABIDE_train = None ABIDE_CSV =", "test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\", None) is not", "len(dataset_configs) > 1: train_dataset = torch.utils.data.ConcatDataset(train_datasets) valid_dataset = torch.utils.data.ConcatDataset(valid_datasets) test_dataset", "segment=True, batch_size=8) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256,", "torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create configurations. run_config =", "step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],", "dataset: DataLoader(dataset, training_config.batch_size, sampler=None, shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True), [train_dataset,", "torch.backends.cudnn as cudnn from kerosene.configs.configs import RunConfiguration, DatasetConfiguration from kerosene.configs.parsers", "192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0],", "MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256, 192),", "[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True,", "criterion_factory=CustomCriterionFactory()) model_trainers = model_trainer_factory.create(model_trainer_configs) if not isinstance(model_trainers, list): model_trainers =", "step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256,", "None augmented_normalized_input_reconstructor = None else: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],", "= list(map(lambda dataset: DataLoader(dataset, training_config.batch_size, sampler=None, shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True,", "isinstance(model_trainer_configs, list): model_trainer_configs = [model_trainer_configs] dataset_configs = YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs", "input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor, None, gt_reconstructor, run_config, dataset_configs, save_folder, visdom_logger) trainer.train(training_config.nb_epochs)", "permissions and # limitations under the License. # ============================================================================== import", "32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True, batch_size=4)", "batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256,", "datasets if dataset_configs.get(\"iSEG\", None) is not None: iSEG_train, iSEG_valid, iSEG_test,", "= ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step,", "Concat datasets. if len(dataset_configs) > 1: train_dataset = torch.utils.data.ConcatDataset(train_datasets) valid_dataset", "if len(list(dataset_configs.keys())) == 2: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size,", "reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True, batch_size=8) input_reconstructor =", "# you may not use this file except in compliance", "= None augmented_normalized_input_reconstructor = None # Concat datasets. if len(dataset_configs)", "data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\", None) is not None: ABIDE_train,", "model in [\"Discriminator\", \"Generator\", \"Segmenter\"]] visdom_logger = VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment", "[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0],", "the model trainers model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers = model_trainer_factory.create(model_trainer_configs)", "[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction._target_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256,", "YamlConfigurationParser from kerosene.loggers.visdom import PlotType, PlotFrequency from kerosene.loggers.visdom.config import VisdomConfiguration", "+ os.path.join(exp[0], exp[1], os.path.basename( os.path.normpath(visdom_config.env))) else: save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder,", "dataloaders, reconstruction_datasets, None, input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor, None, gt_reconstructor, run_config, dataset_configs,", "None) is not None: MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(", "Reserved. # # Licensed under the MIT License; # you", "shuffle=True, num_workers=args.num_workers, collate_fn=augmented_sample_collate, drop_last=True, pin_memory=True), [train_dataset, valid_dataset, test_dataset])) # Initialize", "# # Unless required by applicable law or agreed to", "else: train_dataset = train_datasets[0] valid_dataset = valid_datasets[0] test_dataset = test_datasets[0]", "# Initialize the model trainers model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers", "{\"title\": \"Patch count\"}})) trainer = TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders, reconstruction_datasets, None,", "modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size,", "from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from deepNormalize.nn.criterions import CustomCriterionFactory", "prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None", "step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True, batch_size=4) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]],", "= RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file) if not", "256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],", "data. train_datasets = list() valid_datasets = list() test_datasets = list()", "import os import random import torch import torch.backends.cudnn as cudnn", "ModelTrainerFactory from samitorch.inputs.utils import augmented_sample_collate from torch.utils.data import DataLoader from", "cudnn from kerosene.configs.configs import RunConfiguration, DatasetConfiguration from kerosene.configs.parsers import YamlConfigurationParser", "= VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp = args.config_file.split(\"/\")[-3:] if visdom_config.save_destination is not", "reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor", "None iSEG_augmentation_strategy = None MRBrainS_augmentation_strategy = None ABIDE_augmentation_strategy = None", "the MIT License; # you may not use this file", "= \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True) for model in [\"Discriminator\", \"Generator\",", "# Basic settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() #", "= None # Concat datasets. if len(dataset_configs) > 1: train_dataset", "augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if", "exp[1], os.path.basename( os.path.normpath(visdom_config.env))) else: save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True)", "# Create configurations. run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config", "list() test_datasets = list() reconstruction_datasets = list() iSEG_train = None", "implied. # See the License for the specific language governing", "list(map(lambda config: config.to_html(), model_trainer_configs))] # Prepare the data. train_datasets =", "from kerosene.training.trainers import ModelTrainerFactory from samitorch.inputs.utils import augmented_sample_collate from torch.utils.data", "from samitorch.inputs.utils import augmented_sample_collate from torch.utils.data import DataLoader from torch.utils.data.dataloader", "Create configurations. run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level) model_trainer_configs, training_config =", "DataLoader from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType from deepNormalize.factories.customModelFactory import CustomModelFactory", "None: MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID,", "reconstruction_datasets, None, input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor, None, gt_reconstructor, run_config, dataset_configs, save_folder,", "None) is not None: iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(", "= torch.utils.data.ConcatDataset(test_datasets) else: train_dataset = train_datasets[0] valid_dataset = valid_datasets[0] test_dataset", "by applicable law or agreed to in writing, software #", "logging import multiprocessing import numpy as np import os import", "not None: save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1], os.path.basename( os.path.normpath(visdom_config.env)))", "MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True, batch_size=8)", "YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())), list(map(lambda", "random.seed(42) if __name__ == '__main__': # Basic settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count())", "if dataset_configs.get(\"iSEG\", None) is not None: iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction", "step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\", None) is", "test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\", None) is not None: ABIDE_train, ABIDE_valid,", "not None else 0], y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"], params={\"opts\": {\"title\": \"Patch", "'__main__': # Basic settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()", "if visdom_config.save_destination is not None: save_folder = visdom_config.save_destination + os.path.join(exp[0],", "max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if", "from kerosene.configs.parsers import YamlConfigurationParser from kerosene.loggers.visdom import PlotType, PlotFrequency from", "= True np.random.seed(42) random.seed(42) if __name__ == '__main__': # Basic", "True np.random.seed(42) random.seed(42) if __name__ == '__main__': # Basic settings", "test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test)", "\"Patch count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if iSEG_train is not None", "256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True, batch_size=4) input_reconstructor = ImageReconstructor(", "{k: DatasetConfiguration(v) for k, v, in dataset_configs.items()} data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file,", "\"MRBrainS\", \"ABIDE\"], params={\"opts\": {\"title\": \"Patch count\"}})) trainer = TrainerFactory(training_config.trainer).create(training_config, model_trainers,", "snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None #", "max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction)", "32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True) if", "data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\", None) is not None:", "32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0],", "augmented_normalized_input_reconstructor = None # Concat datasets. if len(dataset_configs) > 1:", "as np import os import random import torch import torch.backends.cudnn", "else 0], y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"], params={\"opts\": {\"title\": \"Patch count\"}})) trainer", "None) is not None: ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(", "PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if iSEG_train is not None else 0,", "None MRBrainS_augmentation_strategy = None ABIDE_augmentation_strategy = None # Initialize the", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "import multiprocessing import numpy as np import os import random", "Unless required by applicable law or agreed to in writing,", "0, len(MRBrainS_train) if MRBrainS_train is not None else 0, len(ABIDE_train)", "None MRBrainS_train = None MRBrainS_CSV = None ABIDE_train = None", "patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50) if dataset_configs[\"iSEG\"].augment:", "\"Segmenter\"]] visdom_logger = VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,", "DatasetConfiguration(v) for k, v, in dataset_configs.items()} data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\")", "Create datasets if dataset_configs.get(\"iSEG\", None) is not None: iSEG_train, iSEG_valid,", "augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\",", "dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step,", "MRBrainS_augmentation_strategy = None ABIDE_augmentation_strategy = None # Initialize the model", "from deepNormalize.utils.image_slicer import ImageReconstructor cudnn.benchmark = True cudnn.enabled = True", "= ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args() # Create configurations. run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)", "256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True, batch_size=8) input_reconstructor = ImageReconstructor(", "RunConfiguration, DatasetConfiguration from kerosene.configs.parsers import YamlConfigurationParser from kerosene.loggers.visdom import PlotType,", "the specific language governing permissions and # limitations under the", "else: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32,", "is not None: save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1], os.path.basename(", "dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256,", "applicable law or agreed to in writing, software # distributed", "None, config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train) if iSEG_train", "obtain a copy of the License at # # https://opensource.org/licenses/MIT", "MRBrainS_train is not None else 0, len(ABIDE_train) if ABIDE_train is", "list() valid_datasets = list() test_datasets = list() reconstruction_datasets = list()", "= None iSEG_augmentation_strategy = None MRBrainS_augmentation_strategy = None ABIDE_augmentation_strategy =", "# limitations under the License. # ============================================================================== import logging import", "-*- coding: utf-8 -*- # Copyright 2019 <NAME>. All Rights", "augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None # Concat datasets. if", "kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData from kerosene.training.trainers import ModelTrainerFactory from samitorch.inputs.utils", "= None ABIDE_augmentation_strategy = None # Initialize the model trainers", "augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None else: segmentation_reconstructor = ImageReconstructor(", "in writing, software # distributed under the License is distributed", "None else: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32,", "visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None, config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch count\",", "not isinstance(model_trainer_configs, list): model_trainer_configs = [model_trainer_configs] dataset_configs = YamlConfigurationParser.parse_section(args.config_file, \"dataset\")", "None # Initialize the model trainers model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory())", "ModelTrainerFactory(model_factory=CustomModelFactory(), criterion_factory=CustomCriterionFactory()) model_trainers = model_trainer_factory.create(model_trainer_configs) if not isinstance(model_trainers, list): model_trainers", "MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split,", "== 2: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256,", "import VisdomConfiguration from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData from kerosene.training.trainers import", "modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step,", "model_trainer_configs = [model_trainer_configs] dataset_configs = YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs = {k:", "Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None, config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,", "batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor", "= None augmented_normalized_input_reconstructor = None else: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0],", "dataset_configs.get(\"ABIDE\", None) is not None: ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction =", "from kerosene.configs.configs import RunConfiguration, DatasetConfiguration from kerosene.configs.parsers import YamlConfigurationParser from", "32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor", "of the License at # # https://opensource.org/licenses/MIT # # Unless", "not None else 0, len(ABIDE_train) if ABIDE_train is not None", "if dataset_configs.get(\"ABIDE\", None) is not None: ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction", "os.path.normpath(visdom_config.env))) else: save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True) for model", "iSEG_augmentation_strategy = None MRBrainS_augmentation_strategy = None ABIDE_augmentation_strategy = None #", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "== '__main__': # Basic settings logging.basicConfig(level=logging.INFO) torch.set_num_threads(multiprocessing.cpu_count()) torch.set_num_interop_threads(multiprocessing.cpu_count()) args =", "= None ABIDE_train = None ABIDE_CSV = None iSEG_augmentation_strategy =", "patch_size=dataset_configs[\"MRBrainS\"].patch_size, step=dataset_configs[\"MRBrainS\"].step, test_patch_size=dataset_configs[\"MRBrainS\"].test_patch_size, test_step=dataset_configs[\"MRBrainS\"].test_step, data_augmentation_config=data_augmentation_config) test_datasets.append(MRBrainS_test) reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\", None)", "# You may obtain a copy of the License at", "deepNormalize.factories.customTrainerFactory import TrainerFactory from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from", "ABIDESliceDatasetFactory from deepNormalize.nn.criterions import CustomCriterionFactory from deepNormalize.utils.constants import * from", "torch.utils.data.ConcatDataset(test_datasets) else: train_dataset = train_datasets[0] valid_dataset = valid_datasets[0] test_dataset =", "kerosene.loggers.visdom.config import VisdomConfiguration from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData from kerosene.training.trainers", "is not None: MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path,", "256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"], snr=data_augmentation_config[\"test\"][\"noise\"][\"snr\"], prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else:", "reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\", None) is not None: MRBrainS_train, MRBrainS_valid, MRBrainS_test,", "model), exist_ok=True) for model in [\"Discriminator\", \"Generator\", \"Segmenter\"]] visdom_logger =", "test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches, augment=dataset_configs[\"ABIDE\"].augment, patch_size=dataset_configs[\"ABIDE\"].patch_size, step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid)", "kerosene.configs.parsers import YamlConfigurationParser from kerosene.loggers.visdom import PlotType, PlotFrequency from kerosene.loggers.visdom.config", "patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train) valid_datasets.append(iSEG_valid) reconstruction_datasets.append(iSEG_reconstruction) if dataset_configs.get(\"MRBrainS\",", "ABIDE_train is not None else 0], y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"], params={\"opts\":", "iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities, dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split,", "step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True, batch_size=8) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size,", "[\"Discriminator\", \"Generator\", \"Segmenter\"]] visdom_logger = VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\", PlotType.TEXT_PLOT,", "import CustomModelFactory from deepNormalize.factories.customTrainerFactory import TrainerFactory from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory,", "dataset_configs.get(\"iSEG\", None) is not None: iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction =", "for k, v, in dataset_configs.items()} data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html", "the License for the specific language governing permissions and #", "= list() test_datasets = list() reconstruction_datasets = list() iSEG_train =", "MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches,", "torch.utils.data import DataLoader from torch.utils.data.dataloader import DataLoader from deepNormalize.config.parsers import", "dataset_id=ISEG_ID, test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config)", "either express or implied. # See the License for the", "# Concat datasets. if len(dataset_configs) > 1: train_dataset = torch.utils.data.ConcatDataset(train_datasets)", "import CustomCriterionFactory from deepNormalize.utils.constants import * from deepNormalize.utils.image_slicer import ImageReconstructor", "data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html = [training_config.to_html(), list(map(lambda config: config.to_html(),", "192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1,", "import * from deepNormalize.utils.image_slicer import ImageReconstructor cudnn.benchmark = True cudnn.enabled", "TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders, reconstruction_datasets, None, input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor, None, gt_reconstructor,", "list): model_trainer_configs = [model_trainer_configs] dataset_configs = YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs =", "# ============================================================================== import logging import multiprocessing import numpy as np", "torch.utils.data.dataloader import DataLoader from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType from deepNormalize.factories.customModelFactory", "reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], normalize_and_segment=True, batch_size=4) input_reconstructor =", "# -*- coding: utf-8 -*- # Copyright 2019 <NAME>. All", "= [model_trainer_configs] dataset_configs = YamlConfigurationParser.parse_section(args.config_file, \"dataset\") dataset_configs = {k: DatasetConfiguration(v)", "import DataLoader from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType from deepNormalize.factories.customModelFactory import", "from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData from kerosene.training.trainers import ModelTrainerFactory from", "iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory from deepNormalize.nn.criterions import CustomCriterionFactory from deepNormalize.utils.constants import", "VisdomLogger, VisdomData from kerosene.training.trainers import ModelTrainerFactory from samitorch.inputs.utils import augmented_sample_collate", "valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys())) == 2: segmentation_reconstructor = ImageReconstructor(", "MIT License; # you may not use this file except", "MRBrainS_train = None MRBrainS_CSV = None ABIDE_train = None ABIDE_CSV", "list() iSEG_train = None iSEG_CSV = None MRBrainS_train = None", "256, 192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor(", "not None: iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path, modalities=dataset_configs[\"iSEG\"].modalities,", "PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None, config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch count\", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH, x=[len(iSEG_train)", "MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, alpha=data_augmentation_config[\"test\"][\"bias_field\"][\"alpha\"][0], prob_bias=data_augmentation_config[\"test\"][\"bias_field\"][\"prob_bias\"],", "dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32,", "k, v, in dataset_configs.items()} data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, \"data_augmentation\") config_html =", "args.config_file.split(\"/\")[-3:] if visdom_config.save_destination is not None: save_folder = visdom_config.save_destination +", "is not None: iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"iSEG\"].path,", "256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor =", "patch_size=(1, 32, 32, 32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50)", "patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor(", "= ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]], patch_size=(1, 32, 32, 32), reconstructed_image_size=(1,", "utf-8 -*- # Copyright 2019 <NAME>. All Rights Reserved. #", "from deepNormalize.factories.customModelFactory import CustomModelFactory from deepNormalize.factories.customTrainerFactory import TrainerFactory from deepNormalize.inputs.datasets", "patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True, batch_size=8) input_reconstructor", "MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor =", "save_folder = \"saves/{}\".format(os.path.basename(os.path.normpath(visdom_config.env))) [os.makedirs(\"{}/{}\".format(save_folder, model), exist_ok=True) for model in [\"Discriminator\",", "kerosene.loggers.visdom import PlotType, PlotFrequency from kerosene.loggers.visdom.config import VisdomConfiguration from kerosene.loggers.visdom.visdom", "os import random import torch import torch.backends.cudnn as cudnn from", "train_dataset = torch.utils.data.ConcatDataset(train_datasets) valid_dataset = torch.utils.data.ConcatDataset(valid_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets) else:", "ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"ABIDE\"].path, modalities=dataset_configs[\"ABIDE\"].modalities, dataset_id=ABIDE_ID, sites=dataset_configs[\"ABIDE\"].sites, max_subjects=dataset_configs[\"ABIDE\"].max_subjects, test_size=dataset_configs[\"ABIDE\"].validation_split, max_num_patches=dataset_configs[\"ABIDE\"].max_num_patches,", "reconstruction_datasets.append(MRBrainS_reconstruction) if dataset_configs.get(\"ABIDE\", None) is not None: ABIDE_train, ABIDE_valid, ABIDE_test,", "if ABIDE_train is not None else 0], y=[\"iSEG\", \"MRBrainS\", \"ABIDE\"],", "prob_noise=data_augmentation_config[\"test\"][\"noise\"][\"prob_noise\"]) else: augmented_input_reconstructor = None augmented_normalized_input_reconstructor = None else: segmentation_reconstructor", "step=dataset_configs[\"ABIDE\"].step, test_patch_size=dataset_configs[\"ABIDE\"].test_patch_size, test_step=dataset_configs[\"ABIDE\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(ABIDE_train) valid_datasets.append(ABIDE_valid) test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys()))", "test_size=dataset_configs[\"iSEG\"].validation_split, max_subjects=dataset_configs[\"iSEG\"].max_subjects, max_num_patches=dataset_configs[\"iSEG\"].max_num_patches, augment=dataset_configs[\"iSEG\"].augment, patch_size=dataset_configs[\"iSEG\"].patch_size, step=dataset_configs[\"iSEG\"].step, test_patch_size=dataset_configs[\"iSEG\"].test_patch_size, test_step=dataset_configs[\"iSEG\"].test_step, data_augmentation_config=data_augmentation_config) train_datasets.append(iSEG_train)", "reconstruction_datasets = list() iSEG_train = None iSEG_CSV = None MRBrainS_train", "VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None, config_html)) visdom_logger(VisdomData(\"Experiment\", \"Patch", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "count\"}})) trainer = TrainerFactory(training_config.trainer).create(training_config, model_trainers, dataloaders, reconstruction_datasets, None, input_reconstructor, segmentation_reconstructor,", "if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256,", "Prepare the data. train_datasets = list() valid_datasets = list() test_datasets", "CustomCriterionFactory from deepNormalize.utils.constants import * from deepNormalize.utils.image_slicer import ImageReconstructor cudnn.benchmark", "# distributed under the License is distributed on an \"AS", "len(MRBrainS_train) if MRBrainS_train is not None else 0, len(ABIDE_train) if", "# Unless required by applicable law or agreed to in", "from deepNormalize.utils.constants import * from deepNormalize.utils.image_slicer import ImageReconstructor cudnn.benchmark =", "= list() valid_datasets = list() test_datasets = list() reconstruction_datasets =", "import numpy as np import os import random import torch", "model_trainers = model_trainer_factory.create(model_trainer_configs) if not isinstance(model_trainers, list): model_trainers = [model_trainers]", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50, is_ground_truth=True) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor(", "config: config.to_html(), dataset_configs.values())), list(map(lambda config: config.to_html(), model_trainer_configs))] # Prepare the", "batch_size=50, is_ground_truth=True) if dataset_configs[\"iSEG\"].augment: augmented_input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction._source_images[0]],", "visdom_logger = VisdomLogger(visdom_config) visdom_logger(VisdomData(\"Experiment\", \"Experiment Config\", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None, config_html))", "Initialize the loggers. visdom_config = VisdomConfiguration.from_yml(args.config_file, \"visdom\") exp = args.config_file.split(\"/\")[-3:]", "You may obtain a copy of the License at #", "import PlotType, PlotFrequency from kerosene.loggers.visdom.config import VisdomConfiguration from kerosene.loggers.visdom.visdom import", "= MRBrainSSliceDatasetFactory.create_train_valid_test( source_dir=dataset_configs[\"MRBrainS\"].path, modalities=dataset_configs[\"MRBrainS\"].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\"MRBrainS\"].validation_split, max_subjects=dataset_configs[\"MRBrainS\"].max_subjects, max_num_patches=dataset_configs[\"MRBrainS\"].max_num_patches, augment=dataset_configs[\"MRBrainS\"].augment, patch_size=dataset_configs[\"MRBrainS\"].patch_size,", "MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True, batch_size=50) if", "isinstance(model_trainers, list): model_trainers = [model_trainers] # Create datasets if dataset_configs.get(\"iSEG\",", "32), reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, batch_size=50) gt_reconstructor = ImageReconstructor(", "test_datasets.append(ABIDE_test) reconstruction_datasets.append(ABIDE_reconstruction) if len(list(dataset_configs.keys())) == 2: segmentation_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0],", "ImageReconstructor( [iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\"iSEG\"].test_patch_size, reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\"iSEG\"].test_step, is_ground_truth=True,", "model_trainer_configs))] # Prepare the data. train_datasets = list() valid_datasets =", "iSEG_CSV = None MRBrainS_train = None MRBrainS_CSV = None ABIDE_train", "None, input_reconstructor, segmentation_reconstructor, augmented_input_reconstructor, None, gt_reconstructor, run_config, dataset_configs, save_folder, visdom_logger)", "ABIDE_augmentation_strategy = None # Initialize the model trainers model_trainer_factory =", "numpy as np import os import random import torch import", "256, 192), step=dataset_configs[\"iSEG\"].test_step, models=[model_trainers[0]], segment=True, batch_size=8) input_reconstructor = ImageReconstructor( [iSEG_reconstruction._source_images[0],", "= valid_datasets[0] test_dataset = test_datasets[0] # Create loaders. dataloaders =" ]
[ "finally: is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock)", "sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) server_socket.close() if out is not None: out.release()", "= ld.lane_detection() except: # ライン検出失敗 is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+", "not is_need_header_receive: # ここには来ない print('body') if packet: print('packet True') is_need_header_receive", "str(packet) if packet: print('packet True') if packet == 'START': is_analyze_running", "global ld global od # 通信設定 HOST = '192.168.0.77' #", "None: out.release() except: import traceback traceback.print_exc() finally: is_need_header_receive = True", "if is_need_header_receive: print('header') packet = packet.decode('ascii') txt = str(packet) if", "離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else: # 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する if handle_angle", "from lib.webcam import WebcamVideoStream # ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f pid:%(process)d", "sock == server_socket: sockfd, client_address = server_socket.accept() connected_clients_sockets.append(sockfd) else: #", "np.abs(meters_from_center)*100 > 10: if tilt2_deg > 0 : # 離れて左いる、奥は左カーブ:右に少し曲がる", "TCP Port ######################################## # 通信準備 ######################################## connected_clients_sockets = [] server_socket", "LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running: frame_start_time = time.time() #time.sleep(0.2) ######################################## # 映像取得", "+が右、-が左 ''' ######################################## # ハンドル角調整を行う ######################################## handle_angle = -1*tilt1_deg if", "止まれを検出した is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue elif prediction_class", "None speed = None # 映像準備 camera = WebcamVideoStream() cols,rows,fps,fourcc", "= None speed = None # 映像準備 camera = WebcamVideoStream()", "Docker # 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS # 2. Serverを起動する", "'a32158c3da9f' # AWS Docker #PORT = 8091 # AWS TCP", "import * from lib.object_detection import ObjectDetection from lib.opencv_lane_detection import LaneDetection", "X_METER=1.5 Y_METER=1 # ライン検出クラス ld = None # 物体検出クラス od", "10: if tilt2_deg < 0 : # 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else:", "read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [], []) for sock in", "= select.select(connected_clients_sockets, [], []) for sock in read_sockets: if sock", "meters_from_center = ld.lane_detection() except: # ライン検出失敗 is_need_header_receive = True control='0,0,'", "# 映像準備 camera = WebcamVideoStream() cols,rows,fps,fourcc = camera.init_webcam() camera.start() fps", "# 物体認識 ######################################## # avi動画に保存する if IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes =", "映像取得 ######################################## cv_bgr = camera.read() frame_counter += 1 ######################################## #", "for sock in read_sockets: if sock == server_socket: sockfd, client_address", "# 物体検出クラス od = None def do_analyze(): global is_analyze_running global", "# ここには来ない print('body') if packet: print('packet True') is_need_header_receive = True", "tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s', ) # 解析、送信スレッド動作フラグ is_analyze_running = False", "HANDLE_ANGLE if handle_angle < -1*HANDLE_ANGLE: handle_angle = -1*HANDLE_ANGLE # 車両制御送信", "vid = cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある ''' Python 3.6", "is_analyze_running = False sock = None out = None #", "= 60 else: # 物体検出無し if speed is None: speed", "右にいる if np.abs(meters_from_center)*100 > 20: # とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100", "server_socket.close() if out is not None: out.release() if __name__ ==", "lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある ''' Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している '''", "-が右にいる、+が左にいる handle_angle: +が右、-が左 ''' ######################################## # ハンドル角調整を行う ######################################## handle_angle =", "except: import traceback traceback.print_exc() finally: is_need_header_receive = True is_analyze_running =", "X_METER global Y_METER global ld global od # 映像を保存するかどうか IS_SAVE", "print('got BYE') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close()", "speed = None # 映像準備 camera = WebcamVideoStream() cols,rows,fps,fourcc =", "sock.close() connected_clients_sockets.remove(sock) if out is not None: out.release() if not", "od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if len(rclasses) > 0: prediction_class = np.min(rclasses) if", "#PORT = 8091 # PC TCP Port ######################################## # 通信準備", "-*- coding: utf-8 -*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server: Jetson TX2", "= None # IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1 # ライン検出クラス ld =", "cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows))) ######################################## # ライン検出準備 ########################################", "out global X_METER global Y_METER global ld global od #", "None: speed = 40 handle_angle = 0 ######################################## # ライン検出", "print(\"wait. launching...\") import socket, select import time import cv2 import", "def do_analyze(): global is_analyze_running global sock global out global X_METER", "######################################## # 物体認識 ######################################## # avi動画に保存する if IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes", "== 1: # 止まれを検出した is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii'))", "select.select(connected_clients_sockets, [], []) for sock in read_sockets: if sock ==", "print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time = time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main(): global", "if not is_need_header_receive: # ここには来ない print('body') if packet: print('packet True')", "None # IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1 # ライン検出クラス ld = None", "# if is_need_header_receive: print('header') packet = packet.decode('ascii') txt = str(packet)", "# コード修正 # lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/", "import os import sys import logging import threading import numpy", "[], []) for sock in read_sockets: if sock == server_socket:", "= cv_bgr # ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center = ld.lane_detection()", "is_need_header_receive: # ここには来ない print('body') if packet: print('packet True') is_need_header_receive =", "socket, select import time import cv2 import numpy as np", "len(rclasses) > 0: prediction_class = np.min(rclasses) if prediction_class == 1:", "sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out is not None: out.release() except:", "Port ######################################## # 通信準備 ######################################## connected_clients_sockets = [] server_socket =", "elif np.abs(meters_from_center)*100 > 10: if tilt2_deg < 0 : #", "if speed is None: speed = 40 handle_angle = 0", "fourcc = None control = None roi_vertices = None ipm_vertices", "IS_SAVE: out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows))) ########################################", "[] server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((HOST, PORT))", "'received.avi' HANDLE_ANGLE = 42 frame_counter = 0 fourcc = None", "control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue elif prediction_class == 2: # 10を検出した", "ClientがServerにHeaderを送る時は4096 Byte以下にすること packet = sock.recv(4096) print(type(packet)) # if is_need_header_receive: print('header')", "else: print('data finished') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR)", "ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server: Jetson TX2 # Client: Jetson TX2/Raspberry Pi3", "= 0 fourcc = None control = None roi_vertices =", "# IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1 # ライン検出クラス ld = None #", "# ライン検出失敗 is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue ########################################", "meters_from_center >= 0: # 左にいる if np.abs(meters_from_center)*100 > 20: #", "sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out is not None: out.release() else:", "speed = 60 else: # 物体検出無し if speed is None:", "server_socket.accept() connected_clients_sockets.append(sockfd) else: # ClientがServerにHeaderを送る時は4096 Byte以下にすること packet = sock.recv(4096) print(type(packet))", "else: # 物体検出無し if speed is None: speed = 40", "global out global X_METER global Y_METER global ld global od", "numpy as np import time import os import sys import", "ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center = ld.lane_detection() except: # ライン検出失敗", "od # 映像を保存するかどうか IS_SAVE = True OUTPUT_DIR ='./' OUTPUT_FILENAME =", "3: # 20を検出した speed = 50 elif prediction_class == 4:", "True t = threading.Thread(target=do_analyze) t.start() elif packet.startswith('BYE'): print('got BYE') is_need_header_receive", "= 50 elif prediction_class == 4: # 30を検出した speed =", "frame_counter = 0 fourcc = None control = None roi_vertices", "# ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server: Jetson TX2 # Client: Jetson TX2/Raspberry", "import cv2 import numpy as np import time import os", "HANDLE_ANGLE: handle_angle = HANDLE_ANGLE if handle_angle < -1*HANDLE_ANGLE: handle_angle =", "if packet: print('packet True') if packet == 'START': is_analyze_running =", "== 3: # 20を検出した speed = 50 elif prediction_class ==", "0 : # 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else: # 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE #", "False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) server_socket.close() if out is not None:", "packet: print('packet True') is_need_header_receive = True else: print('data finished') is_need_header_receive", "物体認識 ######################################## # avi動画に保存する if IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes = od.get_detection(cv_bgr)", "packet.startswith('BYE'): print('got BYE') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR)", "HANDLE_ANGLE = 42 frame_counter = 0 fourcc = None control", "pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s', ) # 解析、送信スレッド動作フラグ is_analyze_running", "IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes = od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if len(rclasses) > 0:", "= 50 #else: # speed = 60 ''' 左右について tilt_deg:", "np.abs(meters_from_center)*100 > 20: # とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10:", "PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive = True ######################################## #", "global ld global od # 映像を保存するかどうか IS_SAVE = True OUTPUT_DIR", "time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main(): global is_analyze_running global sock global out", "def main(): global is_analyze_running global sock global out global ld", "connected_clients_sockets.remove(sock) if out is not None: out.release() else: print('client disconnect')", "if out is not None: out.release() else: print('client disconnect') is_need_header_receive", "out global ld global od # 通信設定 HOST = '192.168.0.77'", "Docker #PORT = 8091 # AWS TCP Port #HOST =", "print('body') if packet: print('packet True') is_need_header_receive = True else: print('data", "np.abs(angle2_deg) > np.abs(angle1_deg): # speed = 50 #else: # speed", "TX2 # Client: Jetson TX2/Raspberry Pi3 Docker # 1. FFMPEG", "= False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out is not None:", "control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time = time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main():", "import sys import logging import threading import numpy as np", "if out is not None: out.release() if __name__ == '__main__':", "2: # 10を検出した speed = 40 elif prediction_class == 3:", "np.abs(angle1_deg): # speed = 50 #else: # speed = 60", "is not None: out.release() if not is_need_header_receive: # ここには来ない print('body')", "TX2向け1FPS # 2. Serverを起動する # 3. Clientを起動する # コード修正 #", "通信準備 ######################################## connected_clients_sockets = [] server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET,", "print('header') packet = packet.decode('ascii') txt = str(packet) if packet: print('packet", "動作可能な角度内に調整する if handle_angle > HANDLE_ANGLE: handle_angle = HANDLE_ANGLE if handle_angle", "= '2204f9b0e871' # PC Docker #PORT = 8091 # PC", "# 通信設定 HOST = '192.168.0.77' # Server IP Address PORT", "< -1*HANDLE_ANGLE: handle_angle = -1*HANDLE_ANGLE # 車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+", "out is not None: out.release() else: print('client disconnect') is_need_header_receive =", "None: out.release() else: print('client disconnect') is_need_header_receive = True is_analyze_running =", "ライン検出クラス ld = None # 物体検出クラス od = None def", "None control = None roi_vertices = None ipm_vertices = None", "is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) server_socket.close() if out is", "BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait. launching...\") import socket, select import time import", "左にいる if np.abs(meters_from_center)*100 > 20: # とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100", "離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する if handle_angle > HANDLE_ANGLE: handle_angle =", "# とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg >", "= -1*tilt1_deg if meters_from_center >= 0: # 左にいる if np.abs(meters_from_center)*100", "start\") try: while True: ######################################## # 受信待ち ######################################## read_sockets, write_sockets,", "= True OUTPUT_DIR ='./' OUTPUT_FILENAME = 'received.avi' HANDLE_ANGLE = 42", "AWS Docker #PORT = 8091 # AWS TCP Port #HOST", "as np from lib.functions import * from lib.object_detection import ObjectDetection", "20: # とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg", "t.start() elif packet.startswith('BYE'): print('got BYE') is_need_header_receive = True is_analyze_running =", "# とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg <", "Jetson TX2 # Client: Jetson TX2/Raspberry Pi3 Docker # 1.", "車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time = time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def", "= 1 if IS_SAVE: out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps,", "False sock = None out = None # IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5", "if meters_from_center >= 0: # 左にいる if np.abs(meters_from_center)*100 > 20:", "6666 # Server TCP Port #HOST = 'a32158c3da9f' # AWS", "connected_clients_sockets.remove(sock) if out is not None: out.release() if not is_need_header_receive:", "sock = None out = None # IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1", "logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s', )", "= 60 ''' 左右について tilt_deg: -が右、+が左 angle_deg: +が右、-が左 meters_from_center: -が右にいる、+が左にいる", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((HOST, PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket) #", "camera = WebcamVideoStream() cols,rows,fps,fourcc = camera.init_webcam() camera.start() fps = 1", "# Server TCP Port #HOST = 'a32158c3da9f' # AWS Docker", "= cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある ''' Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる", "0 fourcc = None control = None roi_vertices = None", "sock.close() connected_clients_sockets.remove(sock) server_socket.close() if out is not None: out.release() if", "if out is not None: out.release() except: import traceback traceback.print_exc()", "# 左にいる if np.abs(meters_from_center)*100 > 20: # とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif", "# 速度調整を行う ######################################## #if np.abs(angle2_deg) > np.abs(angle1_deg): # speed =", "速度調整を行う ######################################## #if np.abs(angle2_deg) > np.abs(angle1_deg): # speed = 50", "import time import cv2 import numpy as np import time", "handle_angle=HANDLE_ANGLE else: # 右にいる if np.abs(meters_from_center)*100 > 20: # とても離れて右にいる:左に全開で曲がる", "global od # 映像を保存するかどうか IS_SAVE = True OUTPUT_DIR ='./' OUTPUT_FILENAME", "ld global od # 映像を保存するかどうか IS_SAVE = True OUTPUT_DIR ='./'", ") # 解析、送信スレッド動作フラグ is_analyze_running = False sock = None out", "# 映像を保存するかどうか IS_SAVE = True OUTPUT_DIR ='./' OUTPUT_FILENAME = 'received.avi'", "speed is None: speed = 40 handle_angle = 0 ########################################", "continue elif prediction_class == 2: # 10を検出した speed = 40", "<reponame>FaBoPlatform/RobotCarAI<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る", "= sock.recv(4096) print(type(packet)) # if is_need_header_receive: print('header') packet = packet.decode('ascii')", "while True: ######################################## # 受信待ち ######################################## read_sockets, write_sockets, error_sockets =", "0 ######################################## # ライン検出 ######################################## ld.cv_bgr = cv_bgr # ラインを検出する", "= 8091 # PC TCP Port ######################################## # 通信準備 ########################################", "ここには来ない print('body') if packet: print('packet True') is_need_header_receive = True else:", "None: out.release() if not is_need_header_receive: # ここには来ない print('body') if packet:", "> np.abs(angle1_deg): # speed = 50 #else: # speed =", "elif prediction_class == 2: # 10を検出した speed = 40 elif", "= 42 frame_counter = 0 fourcc = None control =", "# Client: Jetson TX2/Raspberry Pi3 Docker # 1. FFMPEG UDP", "handle_angle: +が右、-が左 ''' ######################################## # ハンドル角調整を行う ######################################## handle_angle = -1*tilt1_deg", "30を検出した speed = 60 else: # 物体検出無し if speed is", "import logging import threading import numpy as np from lib.functions", "Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait. launching...\") import socket,", "cv_bgr # ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center = ld.lane_detection() except:", "pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s', ) # 解析、送信スレッド動作フラグ is_analyze_running =", "sockfd, client_address = server_socket.accept() connected_clients_sockets.append(sockfd) else: # ClientがServerにHeaderを送る時は4096 Byte以下にすること packet", "# speed = 60 ''' 左右について tilt_deg: -が右、+が左 angle_deg: +が右、-が左", "0 : # 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else: # 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else:", "# Server IP Address PORT = 6666 # Server TCP", "ObjectDetection() print(\"Server start\") try: while True: ######################################## # 受信待ち ########################################", "speed = 40 elif prediction_class == 3: # 20を検出した speed", "rclasses,rscores,rbboxes = od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if len(rclasses) > 0: prediction_class =", "# Server: Jetson TX2 # Client: Jetson TX2/Raspberry Pi3 Docker", "out.write(cv_bgr) rclasses,rscores,rbboxes = od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if len(rclasses) > 0: prediction_class", "handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg < 0 :", "if out is not None: out.release() if not is_need_header_receive: #", "TX2/Raspberry Pi3 Docker # 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS #", "speed = 50 elif prediction_class == 4: # 30を検出した speed", "42 frame_counter = 0 fourcc = None control = None", "######################################## # ライン検出 ######################################## ld.cv_bgr = cv_bgr # ラインを検出する try:", "if tilt2_deg > 0 : # 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else: #", "='./' OUTPUT_FILENAME = 'received.avi' HANDLE_ANGLE = 42 frame_counter = 0", "prediction_class = np.min(rclasses) if prediction_class == 1: # 止まれを検出した is_need_header_receive", "time import cv2 import numpy as np import time import", "WebcamVideoStream() cols,rows,fps,fourcc = camera.init_webcam() camera.start() fps = 1 if IS_SAVE:", "######################################## ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running: frame_start_time = time.time() #time.sleep(0.2)", "python # -*- coding: utf-8 -*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server:", "######################################## cv_bgr = camera.read() frame_counter += 1 ######################################## # 物体認識", "# 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else: # 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else: # 右にいる", "speed = 60 ''' 左右について tilt_deg: -が右、+が左 angle_deg: +が右、-が左 meters_from_center:", "od = ObjectDetection() print(\"Server start\") try: while True: ######################################## #", "AWS TCP Port #HOST = '2204f9b0e871' # PC Docker #PORT", "logging import threading import numpy as np from lib.functions import", "60 ''' 左右について tilt_deg: -が右、+が左 angle_deg: +が右、-が左 meters_from_center: -が右にいる、+が左にいる handle_angle:", "traceback.print_exc() finally: is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close()", "as np import time import os import sys import logging", "camera.init_webcam() camera.start() fps = 1 if IS_SAVE: out = cv2.VideoWriter(os.path.join(OUTPUT_DIR,", "handle_angle = HANDLE_ANGLE if handle_angle < -1*HANDLE_ANGLE: handle_angle = -1*HANDLE_ANGLE", "Server TCP Port #HOST = 'a32158c3da9f' # AWS Docker #PORT", "LaneDetection from lib.webcam import WebcamVideoStream # ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f", "global sock global out global ld global od # 通信設定", "True else: print('data finished') is_need_header_receive = True is_analyze_running = False", "= None # 映像準備 camera = WebcamVideoStream() cols,rows,fps,fourcc = camera.init_webcam()", "Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive = True ######################################## # 物体認識準備 ######################################## od =", "sock.close() connected_clients_sockets.remove(sock) if out is not None: out.release() except: import", "'192.168.0.77' # Server IP Address PORT = 6666 # Server", "######################################## # ライン検出準備 ######################################## ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running: frame_start_time", "= True else: print('data finished') is_need_header_receive = True is_analyze_running =", "from lib.object_detection import ObjectDetection from lib.opencv_lane_detection import LaneDetection from lib.webcam", "launching...\") import socket, select import time import cv2 import numpy", "# 受信待ち ######################################## read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [], [])", "lib.opencv_lane_detection import LaneDetection from lib.webcam import WebcamVideoStream # ログ設定 logging.basicConfig(level=logging.DEBUG,", "# ライン検出クラス ld = None # 物体検出クラス od = None", "handle_angle=HANDLE_ANGLE/2 else: # 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else: # 右にいる if np.abs(meters_from_center)*100", "is None: speed = 40 handle_angle = 0 ######################################## #", "handle_angle = -1*tilt1_deg if meters_from_center >= 0: # 左にいる if", "= packet.decode('ascii') txt = str(packet) if packet: print('packet True') if", "else: # 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する if handle_angle > HANDLE_ANGLE:", "Server: Jetson TX2 # Client: Jetson TX2/Raspberry Pi3 Docker #", "elif prediction_class == 3: # 20を検出した speed = 50 elif", "%(message)s', ) # 解析、送信スレッド動作フラグ is_analyze_running = False sock = None", "elif prediction_class == 4: # 30を検出した speed = 60 else:", "connected_clients_sockets = [] server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)", "Jetson TX2/Raspberry Pi3 Docker # 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS", "is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue ######################################## # 速度調整を行う", "main(): global is_analyze_running global sock global out global ld global", "# 2. Serverを起動する # 3. Clientを起動する # コード修正 # lib/camera.py:", "format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s', ) #", "= True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue ######################################## # 速度調整を行う ########################################", "= 8091 # AWS TCP Port #HOST = '2204f9b0e871' #", "if IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes = od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if len(rclasses) >", "# PC TCP Port ######################################## # 通信準備 ######################################## connected_clients_sockets =", "server_socket: sockfd, client_address = server_socket.accept() connected_clients_sockets.append(sockfd) else: # ClientがServerにHeaderを送る時は4096 Byte以下にすること", "None def do_analyze(): global is_analyze_running global sock global out global", "cv_bgr = camera.read() frame_counter += 1 ######################################## # 物体認識 ########################################", "handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する if handle_angle > HANDLE_ANGLE: handle_angle = HANDLE_ANGLE", "import time import os import sys import logging import threading", "global od # 通信設定 HOST = '192.168.0.77' # Server IP", "tn:%(threadName)-10s fn:%(funcName)-10s %(message)s', ) # 解析、送信スレッド動作フラグ is_analyze_running = False sock", "write_sockets, error_sockets = select.select(connected_clients_sockets, [], []) for sock in read_sockets:", "映像を保存するかどうか IS_SAVE = True OUTPUT_DIR ='./' OUTPUT_FILENAME = 'received.avi' HANDLE_ANGLE", "> 20: # とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if", "is not None: out.release() except: import traceback traceback.print_exc() finally: is_need_header_receive", "handle_angle = 0 ######################################## # ライン検出 ######################################## ld.cv_bgr = cv_bgr", "is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if", "True ######################################## # 物体認識準備 ######################################## od = ObjectDetection() print(\"Server start\")", "is_analyze_running = True t = threading.Thread(target=do_analyze) t.start() elif packet.startswith('BYE'): print('got", "/home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある ''' Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait.", "if packet == 'START': is_analyze_running = True t = threading.Thread(target=do_analyze)", "server_socket.bind((HOST, PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive = True ########################################", "np import time import os import sys import logging import", "import numpy as np from lib.functions import * from lib.object_detection", "'START': is_analyze_running = True t = threading.Thread(target=do_analyze) t.start() elif packet.startswith('BYE'):", "3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait. launching...\") import socket, select", "= '192.168.0.77' # Server IP Address PORT = 6666 #", "映像準備 camera = WebcamVideoStream() cols,rows,fps,fourcc = camera.init_webcam() camera.start() fps =", "control = None roi_vertices = None ipm_vertices = None speed", "= time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main(): global is_analyze_running global sock global", "sock global out global X_METER global Y_METER global ld global", "= camera.read() frame_counter += 1 ######################################## # 物体認識 ######################################## #", "= np.min(rclasses) if prediction_class == 1: # 止まれを検出した is_need_header_receive =", "connected_clients_sockets.remove(sock) if out is not None: out.release() except: import traceback", "物体検出クラス od = None def do_analyze(): global is_analyze_running global sock", "WebcamVideoStream # ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s", "= None def do_analyze(): global is_analyze_running global sock global out", "print('packet True') is_need_header_receive = True else: print('data finished') is_need_header_receive =", "ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s',", "elif packet.startswith('BYE'): print('got BYE') is_need_header_receive = True is_analyze_running = False", "= False sock = None out = None # IPM変換後の画像におけるx,yメートル(黒い部分も含む)", "-1*HANDLE_ANGLE: handle_angle = -1*HANDLE_ANGLE # 車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii'))", "# 右にいる if np.abs(meters_from_center)*100 > 20: # とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif", "do_analyze(): global is_analyze_running global sock global out global X_METER global", "camera.start() fps = 1 if IS_SAVE: out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME),", "handle_angle > HANDLE_ANGLE: handle_angle = HANDLE_ANGLE if handle_angle < -1*HANDLE_ANGLE:", "-1*HANDLE_ANGLE # 車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time = time.time()", "UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS # 2. Serverを起動する # 3. Clientを起動する #", "camera.read() frame_counter += 1 ######################################## # 物体認識 ######################################## # avi動画に保存する", "60 else: # 物体検出無し if speed is None: speed =", "# 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS # 2. Serverを起動する #", "ライン検出 ######################################## ld.cv_bgr = cv_bgr # ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\", "受信待ち ######################################## read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [], []) for", "packet = packet.decode('ascii') txt = str(packet) if packet: print('packet True')", "= LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running: frame_start_time = time.time() #time.sleep(0.2) ######################################## #", "frame_end_time = time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main(): global is_analyze_running global sock", "import LaneDetection from lib.webcam import WebcamVideoStream # ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s]", "# 3. Clientを起動する # コード修正 # lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある", "np.abs(meters_from_center)*100 > 10: if tilt2_deg < 0 : # 離れて右いる、奥は右カーブ:左に少し曲がる", "print('packet True') if packet == 'START': is_analyze_running = True t", "None # 映像準備 camera = WebcamVideoStream() cols,rows,fps,fourcc = camera.init_webcam() camera.start()", "None ipm_vertices = None speed = None # 映像準備 camera", "離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else: # 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else: # 右にいる if", "# 物体認識準備 ######################################## od = ObjectDetection() print(\"Server start\") try: while", "fps, (int(cols), int(rows))) ######################################## # ライン検出準備 ######################################## ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows)", "# 10を検出した speed = 40 elif prediction_class == 3: #", "> 0 : # 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else: # 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE", "global is_analyze_running global sock global out global ld global od", "> 20: # とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if", "print(\"Server start\") try: while True: ######################################## # 受信待ち ######################################## read_sockets,", "* from lib.object_detection import ObjectDetection from lib.opencv_lane_detection import LaneDetection from", "fn:%(funcName)-10s %(message)s', ) # 解析、送信スレッド動作フラグ is_analyze_running = False sock =", "\\ meters_from_center = ld.lane_detection() except: # ライン検出失敗 is_need_header_receive = True", "-が右、+が左 angle_deg: +が右、-が左 meters_from_center: -が右にいる、+が左にいる handle_angle: +が右、-が左 ''' ######################################## #", "control).encode('ascii')) continue elif prediction_class == 2: # 10を検出した speed =", "+が右、-が左 meters_from_center: -が右にいる、+が左にいる handle_angle: +が右、-が左 ''' ######################################## # ハンドル角調整を行う ########################################", "time import os import sys import logging import threading import", "0: # 左にいる if np.abs(meters_from_center)*100 > 20: # とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE", "False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out is not None: out.release()", "10: if tilt2_deg > 0 : # 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else:", "if handle_angle > HANDLE_ANGLE: handle_angle = HANDLE_ANGLE if handle_angle <", "if IS_SAVE: out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows)))", "except: # ライン検出失敗 is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue", "disconnect') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock)", "control).encode('ascii')) continue ######################################## # 速度調整を行う ######################################## #if np.abs(angle2_deg) > np.abs(angle1_deg):", "= 40 elif prediction_class == 3: # 20を検出した speed =", "sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time = time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main(): global is_analyze_running", "not None: out.release() if __name__ == '__main__': main() print(\"end server\")", "if handle_angle < -1*HANDLE_ANGLE: handle_angle = -1*HANDLE_ANGLE # 車両制御送信 control=str(speed)+','+str(handle_angle)+','", "> 0: prediction_class = np.min(rclasses) if prediction_class == 1: #", "= camera.init_webcam() camera.start() fps = 1 if IS_SAVE: out =", "import ObjectDetection from lib.opencv_lane_detection import LaneDetection from lib.webcam import WebcamVideoStream", "[]) for sock in read_sockets: if sock == server_socket: sockfd,", "out is not None: out.release() if __name__ == '__main__': main()", "20を検出した speed = 50 elif prediction_class == 4: # 30を検出した", "control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue ######################################## # 速度調整を行う ######################################## #if np.abs(angle2_deg)", "import traceback traceback.print_exc() finally: is_need_header_receive = True is_analyze_running = False", "4: # 30を検出した speed = 60 else: # 物体検出無し if", "= None # 物体検出クラス od = None def do_analyze(): global", "sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue ######################################## # 速度調整を行う ######################################## #if np.abs(angle2_deg) >", "PORT = 6666 # Server TCP Port #HOST = 'a32158c3da9f'", "8091 # PC TCP Port ######################################## # 通信準備 ######################################## connected_clients_sockets", "ld = None # 物体検出クラス od = None def do_analyze():", "from lib.functions import * from lib.object_detection import ObjectDetection from lib.opencv_lane_detection", "print(rclasses,rscores,rbboxes) if len(rclasses) > 0: prediction_class = np.min(rclasses) if prediction_class", "sys import logging import threading import numpy as np from", "in read_sockets: if sock == server_socket: sockfd, client_address = server_socket.accept()", "lib.webcam import WebcamVideoStream # ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s", "OUTPUT_DIR ='./' OUTPUT_FILENAME = 'received.avi' HANDLE_ANGLE = 42 frame_counter =", "fps = 1 if IS_SAVE: out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc),", "######################################## # ハンドル角調整を行う ######################################## handle_angle = -1*tilt1_deg if meters_from_center >=", "od # 通信設定 HOST = '192.168.0.77' # Server IP Address", "np.min(rclasses) if prediction_class == 1: # 止まれを検出した is_need_header_receive = True", "ライン検出失敗 is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue ######################################## #", "out is not None: out.release() if not is_need_header_receive: # ここには来ない", "######################################## # 物体認識準備 ######################################## od = ObjectDetection() print(\"Server start\") try:", "is not None: out.release() else: print('client disconnect') is_need_header_receive = True", "cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある ''' Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV", "# 止まれを検出した is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue elif", "左右について tilt_deg: -が右、+が左 angle_deg: +が右、-が左 meters_from_center: -が右にいる、+が左にいる handle_angle: +が右、-が左 '''", "import socket, select import time import cv2 import numpy as", "#HOST = '2204f9b0e871' # PC Docker #PORT = 8091 #", "= threading.Thread(target=do_analyze) t.start() elif packet.startswith('BYE'): print('got BYE') is_need_header_receive = True", "out.release() else: print('client disconnect') is_need_header_receive = True is_analyze_running = False", "Server IP Address PORT = 6666 # Server TCP Port", "HOST = '192.168.0.77' # Server IP Address PORT = 6666", "is_analyze_running global sock global out global ld global od #", "handle_angle < -1*HANDLE_ANGLE: handle_angle = -1*HANDLE_ANGLE # 車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control))", "#PORT = 8091 # AWS TCP Port #HOST = '2204f9b0e871'", "txt = str(packet) if packet: print('packet True') if packet ==", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((HOST, PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket)", "np from lib.functions import * from lib.object_detection import ObjectDetection from", "ipm_vertices = None speed = None # 映像準備 camera =", "Y_METER=1 # ライン検出クラス ld = None # 物体検出クラス od =", "handle_angle = -1*HANDLE_ANGLE # 車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time", "50 elif prediction_class == 4: # 30を検出した speed = 60", "# ClientがServerにHeaderを送る時は4096 Byte以下にすること packet = sock.recv(4096) print(type(packet)) # if is_need_header_receive:", "#time.sleep(0.2) ######################################## # 映像取得 ######################################## cv_bgr = camera.read() frame_counter +=", "# ライン検出 ######################################## ld.cv_bgr = cv_bgr # ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r,", "# lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある '''", "# 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else: # 右にいる if np.abs(meters_from_center)*100 > 20:", "int(rows))) ######################################## # ライン検出準備 ######################################## ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running:", "traceback traceback.print_exc() finally: is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR)", "# lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある ''' Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している", "Serverを起動する # 3. Clientを起動する # コード修正 # lib/camera.py: vid =", "ObjectDetection from lib.opencv_lane_detection import LaneDetection from lib.webcam import WebcamVideoStream #", "from lib.opencv_lane_detection import LaneDetection from lib.webcam import WebcamVideoStream # ログ設定", "# speed = 50 #else: # speed = 60 '''", "= WebcamVideoStream() cols,rows,fps,fourcc = camera.init_webcam() camera.start() fps = 1 if", "not None: out.release() else: print('client disconnect') is_need_header_receive = True is_analyze_running", "-1*tilt1_deg if meters_from_center >= 0: # 左にいる if np.abs(meters_from_center)*100 >", "is_need_header_receive = True ######################################## # 物体認識準備 ######################################## od = ObjectDetection()", "= ObjectDetection() print(\"Server start\") try: while True: ######################################## # 受信待ち", "try: while True: ######################################## # 受信待ち ######################################## read_sockets, write_sockets, error_sockets", "= str(packet) if packet: print('packet True') if packet == 'START':", "= True t = threading.Thread(target=do_analyze) t.start() elif packet.startswith('BYE'): print('got BYE')", "= None ipm_vertices = None speed = None # 映像準備", "server_socket.listen(10) connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive = True ######################################## # 物体認識準備", "OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows))) ######################################## # ライン検出準備 ######################################## ld", "server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((HOST, PORT)) server_socket.listen(10)", "global X_METER global Y_METER global ld global od # 映像を保存するかどうか", "= 'received.avi' HANDLE_ANGLE = 42 frame_counter = 0 fourcc =", "# ハンドル角調整を行う ######################################## handle_angle = -1*tilt1_deg if meters_from_center >= 0:", "time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s', ) # 解析、送信スレッド動作フラグ", "''' Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait. launching...\") import", "error_sockets = select.select(connected_clients_sockets, [], []) for sock in read_sockets: if", "print(type(packet)) # if is_need_header_receive: print('header') packet = packet.decode('ascii') txt =", "packet: print('packet True') if packet == 'START': is_analyze_running = True", "Port #HOST = 'a32158c3da9f' # AWS Docker #PORT = 8091", "roi_vertices = None ipm_vertices = None speed = None #", "# ライン検出準備 ######################################## ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running: frame_start_time =", "else: # 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else: # 右にいる if np.abs(meters_from_center)*100 >", "client_address = server_socket.accept() connected_clients_sockets.append(sockfd) else: # ClientがServerにHeaderを送る時は4096 Byte以下にすること packet =", "threading.Thread(target=do_analyze) t.start() elif packet.startswith('BYE'): print('got BYE') is_need_header_receive = True is_analyze_running", "StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS # 2. Serverを起動する # 3. Clientを起動する # コード修正", "######################################## # 速度調整を行う ######################################## #if np.abs(angle2_deg) > np.abs(angle1_deg): # speed", "# -*- coding: utf-8 -*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server: Jetson", "threading import numpy as np from lib.functions import * from", "if sock == server_socket: sockfd, client_address = server_socket.accept() connected_clients_sockets.append(sockfd) else:", "# AWS Docker #PORT = 8091 # AWS TCP Port", "Docker #PORT = 8091 # PC TCP Port ######################################## #", "IS_SAVE = True OUTPUT_DIR ='./' OUTPUT_FILENAME = 'received.avi' HANDLE_ANGLE =", "if prediction_class == 1: # 止まれを検出した is_need_header_receive = True control='0,0,'", "を環境に合わせて修正する必要がある ''' Python 3.6 送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait. launching...\")", "# 30を検出した speed = 60 else: # 物体検出無し if speed", "finished') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock)", "True OUTPUT_DIR ='./' OUTPUT_FILENAME = 'received.avi' HANDLE_ANGLE = 42 frame_counter", "= True ######################################## # 物体認識準備 ######################################## od = ObjectDetection() print(\"Server", "packet.decode('ascii') txt = str(packet) if packet: print('packet True') if packet", "40 elif prediction_class == 3: # 20を検出した speed = 50", "######################################## #if np.abs(angle2_deg) > np.abs(angle1_deg): # speed = 50 #else:", "20: # とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg", "is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out is not", "######################################## handle_angle = -1*tilt1_deg if meters_from_center >= 0: # 左にいる", "sock.close() connected_clients_sockets.remove(sock) if out is not None: out.release() else: print('client", "= None roi_vertices = None ipm_vertices = None speed =", "= None out = None # IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1 #", "speed = 40 handle_angle = 0 ######################################## # ライン検出 ########################################", "packet == 'START': is_analyze_running = True t = threading.Thread(target=do_analyze) t.start()", "ライン検出準備 ######################################## ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running: frame_start_time = time.time()", "True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) server_socket.close() if out", "else: # ClientがServerにHeaderを送る時は4096 Byte以下にすること packet = sock.recv(4096) print(type(packet)) # if", "= server_socket.accept() connected_clients_sockets.append(sockfd) else: # ClientがServerにHeaderを送る時は4096 Byte以下にすること packet = sock.recv(4096)", "Address PORT = 6666 # Server TCP Port #HOST =", "if tilt2_deg < 0 : # 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else: #", "out.release() if not is_need_header_receive: # ここには来ない print('body') if packet: print('packet", "# ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s", ">= 0: # 左にいる if np.abs(meters_from_center)*100 > 20: # とても離れて左にいる:右に全開で曲がる", "-*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server: Jetson TX2 # Client: Jetson", "is not None: out.release() if __name__ == '__main__': main() print(\"end", "angle_deg: +が右、-が左 meters_from_center: -が右にいる、+が左にいる handle_angle: +が右、-が左 ''' ######################################## # ハンドル角調整を行う", "lib.functions import * from lib.object_detection import ObjectDetection from lib.opencv_lane_detection import", "40 handle_angle = 0 ######################################## # ライン検出 ######################################## ld.cv_bgr =", "os import sys import logging import threading import numpy as", "True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue ######################################## # 速度調整を行う ######################################## #if", "import WebcamVideoStream # ログ設定 logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d", "= -1*HANDLE_ANGLE # 車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time =", "coding: utf-8 -*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server: Jetson TX2 #", "######################################## od = ObjectDetection() print(\"Server start\") try: while True: ########################################", "sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue elif prediction_class == 2: # 10を検出した speed", "int(fourcc), fps, (int(cols), int(rows))) ######################################## # ライン検出準備 ######################################## ld =", "print('data finished') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close()", "prediction_class == 4: # 30を検出した speed = 60 else: #", "1) server_socket.bind((HOST, PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive = True", "if packet: print('packet True') is_need_header_receive = True else: print('data finished')", "FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS # 2. Serverを起動する # 3. Clientを起動する", "#if np.abs(angle2_deg) > np.abs(angle1_deg): # speed = 50 #else: #", "######################################## # 映像取得 ######################################## cv_bgr = camera.read() frame_counter += 1", "ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while is_analyze_running: frame_start_time = time.time() #time.sleep(0.2) ########################################", "is_need_header_receive = True else: print('data finished') is_need_header_receive = True is_analyze_running", "global out global ld global od # 通信設定 HOST =", "= 40 handle_angle = 0 ######################################## # ライン検出 ######################################## ld.cv_bgr", "handle_angle=-1*HANDLE_ANGLE/2 else: # 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する if handle_angle >", "######################################## read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [], []) for sock", "global Y_METER global ld global od # 映像を保存するかどうか IS_SAVE =", "else: print('client disconnect') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR)", "# ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center = ld.lane_detection() except: #", "prediction_class == 3: # 20を検出した speed = 50 elif prediction_class", "# 物体検出無し if speed is None: speed = 40 handle_angle", "#else: # speed = 60 ''' 左右について tilt_deg: -が右、+が左 angle_deg:", "prediction_class == 2: # 10を検出した speed = 40 elif prediction_class", "TCP Port #HOST = '2204f9b0e871' # PC Docker #PORT =", "is_analyze_running global sock global out global X_METER global Y_METER global", "socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((HOST, PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する", "print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main(): global is_analyze_running global sock global out global", "is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue elif prediction_class ==", "True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue elif prediction_class == 2: #", "= [] server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((HOST,", "解析、送信スレッド動作フラグ is_analyze_running = False sock = None out = None", "IP Address PORT = 6666 # Server TCP Port #HOST", "select import time import cv2 import numpy as np import", "= HANDLE_ANGLE if handle_angle < -1*HANDLE_ANGLE: handle_angle = -1*HANDLE_ANGLE #", "None out = None # IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1 # ライン検出クラス", "= 0 ######################################## # ライン検出 ######################################## ld.cv_bgr = cv_bgr #", "cols,rows,fps,fourcc = camera.init_webcam() camera.start() fps = 1 if IS_SAVE: out", "Pi3 Docker # 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS # 2.", "== 'START': is_analyze_running = True t = threading.Thread(target=do_analyze) t.start() elif", "コード修正 # lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある", "is_analyze_running: frame_start_time = time.time() #time.sleep(0.2) ######################################## # 映像取得 ######################################## cv_bgr", "1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS # 2. Serverを起動する # 3.", "BYE') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock)", "ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait. launching...\") import socket, select import time", "######################################## connected_clients_sockets = [] server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,", "sock.recv(4096) print(type(packet)) # if is_need_header_receive: print('header') packet = packet.decode('ascii') txt", "8091 # AWS TCP Port #HOST = '2204f9b0e871' # PC", "送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している ''' print(\"wait. launching...\") import socket, select import", "if len(rclasses) > 0: prediction_class = np.min(rclasses) if prediction_class ==", "time.time() #time.sleep(0.2) ######################################## # 映像取得 ######################################## cv_bgr = camera.read() frame_counter", "avi動画に保存する if IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes = od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if len(rclasses)", "とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg < 0", "else: # 右にいる if np.abs(meters_from_center)*100 > 20: # とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE", "import numpy as np import time import os import sys", "tilt2_deg < 0 : # 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else: # 離れて右いる、奥は左カーブ、左に全開で曲がる", "socket.SO_REUSEADDR, 1) server_socket.bind((HOST, PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive =", "とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg > 0", "PC TCP Port ######################################## # 通信準備 ######################################## connected_clients_sockets = []", "== 4: # 30を検出した speed = 60 else: # 物体検出無し", "PC Docker #PORT = 8091 # PC TCP Port ########################################", "= cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows))) ######################################## # ライン検出準備", "frame_counter += 1 ######################################## # 物体認識 ######################################## # avi動画に保存する if", "######################################## # avi動画に保存する if IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes = od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes)", "connected_clients_sockets.remove(sock) server_socket.close() if out is not None: out.release() if __name__", "global is_analyze_running global sock global out global X_METER global Y_METER", "######################################## # 通信準備 ######################################## connected_clients_sockets = [] server_socket = socket.socket(socket.AF_INET,", "sock in read_sockets: if sock == server_socket: sockfd, client_address =", "= True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out", "sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out is not None: out.release() if", "#!/usr/bin/env python # -*- coding: utf-8 -*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る #", "10を検出した speed = 40 elif prediction_class == 3: # 20を検出した", "= time.time() #time.sleep(0.2) ######################################## # 映像取得 ######################################## cv_bgr = camera.read()", "try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center = ld.lane_detection() except: # ライン検出失敗 is_need_header_receive", "# 20を検出した speed = 50 elif prediction_class == 4: #", "meters_from_center: -が右にいる、+が左にいる handle_angle: +が右、-が左 ''' ######################################## # ハンドル角調整を行う ######################################## handle_angle", "# 映像取得 ######################################## cv_bgr = camera.read() frame_counter += 1 ########################################", "import threading import numpy as np from lib.functions import *", "# 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else: # 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する if", "# 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する if handle_angle > HANDLE_ANGLE: handle_angle", "# 解析、送信スレッド動作フラグ is_analyze_running = False sock = None out =", "1 ######################################## # 物体認識 ######################################## # avi動画に保存する if IS_SAVE: out.write(cv_bgr)", "# Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive = True ######################################## # 物体認識準備 ######################################## od", "''' print(\"wait. launching...\") import socket, select import time import cv2", "ld.lane_detection() except: # ライン検出失敗 is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii'))", "True') if packet == 'START': is_analyze_running = True t =", "np.abs(meters_from_center)*100 > 20: # とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10:", "+= 1 ######################################## # 物体認識 ######################################## # avi動画に保存する if IS_SAVE:", "tilt_deg: -が右、+が左 angle_deg: +が右、-が左 meters_from_center: -が右にいる、+が左にいる handle_angle: +が右、-が左 ''' ########################################", "global sock global out global X_METER global Y_METER global ld", "# avi動画に保存する if IS_SAVE: out.write(cv_bgr) rclasses,rscores,rbboxes = od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if", ": # 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else: # 離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else: #", "50 #else: # speed = 60 ''' 左右について tilt_deg: -が右、+が左", "out.release() except: import traceback traceback.print_exc() finally: is_need_header_receive = True is_analyze_running", "cv2 import numpy as np import time import os import", "IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1 # ライン検出クラス ld = None # 物体検出クラス", "OUTPUT_FILENAME = 'received.avi' HANDLE_ANGLE = 42 frame_counter = 0 fourcc", "######################################## # 受信待ち ######################################## read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [],", "Clientを起動する # コード修正 # lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py:", "# AWS TCP Port #HOST = '2204f9b0e871' # PC Docker", "True') is_need_header_receive = True else: print('data finished') is_need_header_receive = True", "lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある # lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある ''' Python", "# 車両制御送信 control=str(speed)+','+str(handle_angle)+',' print(\"speed={},handle_angle={},CONTROL,{}\".format(speed,handle_angle,control)) sock.sendall((\"CONTROL,\"+ control).encode('ascii')) frame_end_time = time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2)))", "通信設定 HOST = '192.168.0.77' # Server IP Address PORT =", "if np.abs(meters_from_center)*100 > 20: # とても離れて左にいる:右に全開で曲がる handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100 >", "> HANDLE_ANGLE: handle_angle = HANDLE_ANGLE if handle_angle < -1*HANDLE_ANGLE: handle_angle", "t = threading.Thread(target=do_analyze) t.start() elif packet.startswith('BYE'): print('got BYE') is_need_header_receive =", "not None: out.release() except: import traceback traceback.print_exc() finally: is_need_header_receive =", "= False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) server_socket.close() if out is not", "server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((HOST, PORT)) server_socket.listen(10) connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive", "while is_analyze_running: frame_start_time = time.time() #time.sleep(0.2) ######################################## # 映像取得 ########################################", "# 動作可能な角度内に調整する if handle_angle > HANDLE_ANGLE: handle_angle = HANDLE_ANGLE if", "out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows))) ######################################## #", "out = None # IPM変換後の画像におけるx,yメートル(黒い部分も含む) X_METER=1.5 Y_METER=1 # ライン検出クラス ld", "= 'a32158c3da9f' # AWS Docker #PORT = 8091 # AWS", "connected_clients_sockets.append(sockfd) else: # ClientがServerにHeaderを送る時は4096 Byte以下にすること packet = sock.recv(4096) print(type(packet)) #", "None roi_vertices = None ipm_vertices = None speed = None", "frame_start_time = time.time() #time.sleep(0.2) ######################################## # 映像取得 ######################################## cv_bgr =", "######################################## ld.cv_bgr = cv_bgr # ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center", "TCP Port #HOST = 'a32158c3da9f' # AWS Docker #PORT =", "utf-8 -*- # ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る # Server: Jetson TX2 # Client:", "= True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) server_socket.close() if", "is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) server_socket.close()", "= od.get_detection(cv_bgr) print(rclasses,rscores,rbboxes) if len(rclasses) > 0: prediction_class = np.min(rclasses)", "handle_angle=HANDLE_ANGLE elif np.abs(meters_from_center)*100 > 10: if tilt2_deg > 0 :", "sock global out global ld global od # 通信設定 HOST", "ハンドル角調整を行う ######################################## handle_angle = -1*tilt1_deg if meters_from_center >= 0: #", "'2204f9b0e871' # PC Docker #PORT = 8091 # PC TCP", ": # 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else: # 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE # 動作可能な角度内に調整する", "tilt2_deg > 0 : # 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2 else: # 離れて左いる、奥は右カーブ:右に全開で曲がる", "> 10: if tilt2_deg > 0 : # 離れて左いる、奥は左カーブ:右に少し曲がる handle_angle=HANDLE_ANGLE/2", "= 6666 # Server TCP Port #HOST = 'a32158c3da9f' #", "is_need_header_receive: print('header') packet = packet.decode('ascii') txt = str(packet) if packet:", "not None: out.release() if not is_need_header_receive: # ここには来ない print('body') if", "continue ######################################## # 速度調整を行う ######################################## #if np.abs(angle2_deg) > np.abs(angle1_deg): #", "control).encode('ascii')) frame_end_time = time.time() print(\"FPS={}\".format(round(1/(frame_end_time-frame_start_time),2))) def main(): global is_analyze_running global", "''' 左右について tilt_deg: -が右、+が左 angle_deg: +が右、-が左 meters_from_center: -が右にいる、+が左にいる handle_angle: +が右、-が左", "prediction_class == 1: # 止まれを検出した is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+", "== server_socket: sockfd, client_address = server_socket.accept() connected_clients_sockets.append(sockfd) else: # ClientがServerにHeaderを送る時は4096", "od = None def do_analyze(): global is_analyze_running global sock global", "< 0 : # 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2 else: # 離れて右いる、奥は左カーブ、左に全開で曲がる handle_angle=-1*HANDLE_ANGLE", "# PC Docker #PORT = 8091 # PC TCP Port", "0: prediction_class = np.min(rclasses) if prediction_class == 1: # 止まれを検出した", "物体検出無し if speed is None: speed = 40 handle_angle =", "2. Serverを起動する # 3. Clientを起動する # コード修正 # lib/camera.py: vid", "True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close() connected_clients_sockets.remove(sock) if out is", "numpy as np from lib.functions import * from lib.object_detection import", "None # 物体検出クラス od = None def do_analyze(): global is_analyze_running", "''' ######################################## # ハンドル角調整を行う ######################################## handle_angle = -1*tilt1_deg if meters_from_center", "#HOST = 'a32158c3da9f' # AWS Docker #PORT = 8091 #", "1 if IS_SAVE: out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols),", "tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center = ld.lane_detection() except: # ライン検出失敗 is_need_header_receive =", "elif np.abs(meters_from_center)*100 > 10: if tilt2_deg > 0 : #", "ld.cv_bgr = cv_bgr # ラインを検出する try: tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \\ meters_from_center =", "= True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue elif prediction_class == 2:", "離れて左いる、奥は右カーブ:右に全開で曲がる handle_angle=HANDLE_ANGLE else: # 右にいる if np.abs(meters_from_center)*100 > 20: #", "packet = sock.recv(4096) print(type(packet)) # if is_need_header_receive: print('header') packet =", "lib.object_detection import ObjectDetection from lib.opencv_lane_detection import LaneDetection from lib.webcam import", "read_sockets: if sock == server_socket: sockfd, client_address = server_socket.accept() connected_clients_sockets.append(sockfd)", "= None control = None roi_vertices = None ipm_vertices =", "物体認識準備 ######################################## od = ObjectDetection() print(\"Server start\") try: while True:", "3. Clientを起動する # コード修正 # lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある #", "Y_METER global ld global od # 映像を保存するかどうか IS_SAVE = True", "out is not None: out.release() except: import traceback traceback.print_exc() finally:", "Client: Jetson TX2/Raspberry Pi3 Docker # 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson", "connected_clients_sockets.append(server_socket) # Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する is_need_header_receive = True ######################################## # 物体認識準備 ########################################", "== 2: # 10を検出した speed = 40 elif prediction_class ==", "True: ######################################## # 受信待ち ######################################## read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets,", "Byte以下にすること packet = sock.recv(4096) print(type(packet)) # if is_need_header_receive: print('header') packet", "ld global od # 通信設定 HOST = '192.168.0.77' # Server", "(int(cols), int(rows))) ######################################## # ライン検出準備 ######################################## ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows) while", "speed = 50 #else: # speed = 60 ''' 左右について", "> 10: if tilt2_deg < 0 : # 離れて右いる、奥は右カーブ:左に少し曲がる handle_angle=-1*HANDLE_ANGLE/2", "print('client disconnect') is_need_header_receive = True is_analyze_running = False sock.shutdown(socket.SHUT_RDWR) sock.close()", "# 通信準備 ######################################## connected_clients_sockets = [] server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "if np.abs(meters_from_center)*100 > 20: # とても離れて右にいる:左に全開で曲がる handle_angle=-1*HANDLE_ANGLE elif np.abs(meters_from_center)*100 >", "Port #HOST = '2204f9b0e871' # PC Docker #PORT = 8091", "1: # 止まれを検出した is_need_header_receive = True control='0,0,' sock.sendall((\"CONTROL,\"+ control).encode('ascii')) continue" ]
[ "ps.subscribe(key) t = threading.Thread(target=self._sub_thread, args=(ps, cb, key)) t.daemon = True", "TTL = 2 hash_keys = ('cmd', 'user') cmd_hash_keys = {", "= redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5) self.r.info() self.nick = nick_filter.sub('_', nick)", "if all(hashkey): if hashkey in dedup: continue dedup.add(hashkey) state.append(data) for", "print('error processing item', item) traceback.print_exc() def join(self, key, cb): ps", "'user') cmd_hash_keys = { 'comment': ('addr',), 'extra_comment': ('addr',), 'area_comment': ('addr',),", "- ts < TTL: no.remove(data) return True return False def", "if ps: ps.unsubscribe(key) def publish(self, key, data, perm=True, send_uuid=True): if", "for k in keys]) if all(hashkey): if hashkey in dedup:", "-1): try: decoded.append(decode(data)) except Exception: print('error decoding history', data) traceback.print_exc()", "in self.r.lrange(key, 0, -1): try: decoded.append(decode(data)) except Exception: print('error decoding", "key_dec.items()) nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data): d = json.loads(data) return", "= True t.start() self.ps[key] = ps self.publish(key, {'cmd': 'join'}, perm=False)", "redis push', item) except Exception: print('error processing item', item) traceback.print_exc()", "with self.nolock: for data in no: ts = data[0] key", "keys = hash_keys + cmd_hash_keys.get(cmd, ()) hashkey = tuple([str(data.get(k)) for", "import threading import time import traceback import uuid import base64", "push', item) except Exception: print('error processing item', item) traceback.print_exc() def", "ps = self.ps.pop(key, None) if ps: ps.unsubscribe(key) def publish(self, key,", "'comment': ('addr',), 'extra_comment': ('addr',), 'area_comment': ('addr',), 'rename': ('addr',), 'stackvar_renamed': ('addr',", "'struc_member_changed': ('struc_name', 'offset', 'size',), } key_dec = { 'c': 'cmd',", "args=(ps, cb, key)) t.daemon = True t.start() self.ps[key] = ps", "now - d[0] < TTL] class Client: def __init__(self, host,", "data) self.r.publish(key, data) def push(self, key, data, send_uuid=True): if send_uuid:", "nick) self.ps = {} self.nolock = threading.Lock() self.nosend = defaultdict(list)", "perm=False) def leave(self, key): ps = self.ps.pop(key, None) if ps:", "in dedup: continue dedup.add(hashkey) state.append(data) for data in reversed(state): try:", "for data in self.r.lrange(key, 0, -1): try: decoded.append(decode(data)) except Exception:", "'is_union',), 'struc_deleted': ('struc_name',), 'struc_renamed': ('old_name', 'new_name',), 'struc_member_created': ('struc_name', 'offset', 'member_name',", "for item in ps.listen(): try: if item['type'] == 'message': data", "= time.time() return [d for d in a if now", "'rename': ('addr',), 'stackvar_renamed': ('addr', 'offset', 'name',), 'struc_created': ('struc_name', 'is_union',), 'struc_deleted':", "hashkey = tuple([str(data.get(k)) for k in keys]) if all(hashkey): if", "= json.dumps(data, separators=(',', ':'), sort_keys=True) if perm: self.r.rpush(key, data) self.r.publish(key,", "v in data.items()) data = json.dumps(data, separators=(',', ':'), sort_keys=True) if", "'offset',), 'struc_member_renamed': ('struc_name', 'offset', 'member_name',), 'struc_member_changed': ('struc_name', 'offset', 'size',), }", "self.nosend[key] = remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data) elif item['type']", "= self.nick data['ts'] = self.r.time()[0] if send_uuid: data['uuid'] = self.uuid", "'size',), } key_dec = { 'c': 'cmd', 'a': 'addr', 'u':", "ts < TTL: no.remove(data) return True return False def _sub_thread(self,", "= re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data): d = json.loads(data) return dict((key_dec.get(k, k),", "= defaultdict(list) self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self, no, data): dkey", "item) except Exception: print('error processing item', item) traceback.print_exc() def join(self,", "remove_ttl(a): now = time.time() return [d for d in a", "return [d for d in a if now - d[0]", "self.uuid: continue with self.nolock: self.nosend[key] = remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) + dtokey(data))", "{'cmd': 'join'}, perm=False) def leave(self, key): ps = self.ps.pop(key, None)", "messages if data.get('uuid') == self.uuid: continue with self.nolock: self.nosend[key] =", "[] dedup = set() for data in reversed(decoded): cmd =", "('user', 'ts', 'uuid'))) def remove_ttl(a): now = time.time() return [d", "data = decode(item['data']) if 'user' in data: data['user'] = nick_filter.sub('_',", "= hash_keys + cmd_hash_keys.get(cmd, ()) hashkey = tuple([str(data.get(k)) for k", "try: with self.nolock: self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data, replay=True) except", "self.uuid data = dict((key_enc.get(k, k), v) for k, v in", "state.append(data) for data in reversed(state): try: with self.nolock: self.nosend[key].append((time.time(),) +", "dedup = set() for data in reversed(decoded): cmd = data.get('cmd')", "traceback.print_exc() def join(self, key, cb): ps = self.r.pubsub() ps.subscribe(key) t", "import traceback import uuid import base64 import binascii TTL =", "try: decoded.append(decode(data)) except Exception: print('error decoding history', data) traceback.print_exc() state", "'stackvar_renamed': ('addr', 'offset', 'name',), 'struc_created': ('struc_name', 'is_union',), 'struc_deleted': ('struc_name',), 'struc_renamed':", "self.nick = nick_filter.sub('_', nick) self.ps = {} self.nolock = threading.Lock()", "= json.loads(data) return dict((key_dec.get(k, k), v) for k, v in", "perm=True, send_uuid=True): if self.debounce(self.nosend[key], data): return data['user'] = self.nick data['ts']", "if cmd: keys = hash_keys + cmd_hash_keys.get(cmd, ()) hashkey =", "data['user']) # reject our own messages if data.get('uuid') == self.uuid:", "+ cmd_hash_keys.get(cmd, ()) hashkey = tuple([str(data.get(k)) for k in keys])", "no, data): dkey = dtokey(data) now = time.time() with self.nolock:", "0, -1): try: decoded.append(decode(data)) except Exception: print('error decoding history', data)", "if hashkey in dedup: continue dedup.add(hashkey) state.append(data) for data in", "key, cb): ps = self.r.pubsub() ps.subscribe(key) t = threading.Thread(target=self._sub_thread, args=(ps,", "processing item', item) traceback.print_exc() def join(self, key, cb): ps =", "nick, password=None): self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5) self.r.info() self.nick", "threading.Lock() self.nosend = defaultdict(list) self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self, no,", "time.time() with self.nolock: for data in no: ts = data[0]", "t = threading.Thread(target=self._sub_thread, args=(ps, cb, key)) t.daemon = True t.start()", "v) for k, v in sorted(d.items()) if k not in", "cb(key, data, replay=True) except Exception: print('error replaying history', data) traceback.print_exc()", "tuple(((k, v) for k, v in sorted(d.items()) if k not", "= tuple([str(data.get(k)) for k in keys]) if all(hashkey): if hashkey", "'extra_comment': ('addr',), 'area_comment': ('addr',), 'rename': ('addr',), 'stackvar_renamed': ('addr', 'offset', 'name',),", "redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5) self.r.info() self.nick = nick_filter.sub('_', nick) self.ps", "+ dtokey(data)) cb(key, data, replay=True) except Exception: print('error replaying history',", "def publish(self, key, data, perm=True, send_uuid=True): if self.debounce(self.nosend[key], data): return", "= [] for data in self.r.lrange(key, 0, -1): try: decoded.append(decode(data))", "self.r.time()[0] if send_uuid: data['uuid'] = self.uuid data = dict((key_enc.get(k, k),", "data.items()) data = json.dumps(data, separators=(',', ':'), sort_keys=True) if perm: self.r.rpush(key,", "dtokey(d): return tuple(((k, v) for k, v in sorted(d.items()) if", "import binascii TTL = 2 hash_keys = ('cmd', 'user') cmd_hash_keys", "time.time() return [d for d in a if now -", "'a': 'addr', 'u': 'user', 't': 'text', 'i': 'uuid', 'b': 'blocks'", "binascii TTL = 2 hash_keys = ('cmd', 'user') cmd_hash_keys =", "k), v) for k, v in data.items()) data = json.dumps(data,", "'uuid', 'b': 'blocks' } key_enc = dict((v, k) for k,", "data) traceback.print_exc() state = [] dedup = set() for data", "[] for data in self.r.lrange(key, 0, -1): try: decoded.append(decode(data)) except", "k), v) for k, v in d.items()) def dtokey(d): return", "= self.uuid data = dict((key_enc.get(k, k), v) for k, v", "item in ps.listen(): try: if item['type'] == 'message': data =", "threading import time import traceback import uuid import base64 import", "'offset', 'size',), } key_dec = { 'c': 'cmd', 'a': 'addr',", "debounce(self, no, data): dkey = dtokey(data) now = time.time() with", "= 2 hash_keys = ('cmd', 'user') cmd_hash_keys = { 'comment':", "= str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self, no, data): dkey = dtokey(data) now", "sorted(d.items()) if k not in ('user', 'ts', 'uuid'))) def remove_ttl(a):", "continue dedup.add(hashkey) state.append(data) for data in reversed(state): try: with self.nolock:", "cb, key): for item in ps.listen(): try: if item['type'] ==", "d = json.loads(data) return dict((key_dec.get(k, k), v) for k, v", "from collections import defaultdict import json import re import redis", "in no: ts = data[0] key = data[1:] if dkey", "cmd_hash_keys.get(cmd, ()) hashkey = tuple([str(data.get(k)) for k in keys]) if", "in d.items()) def dtokey(d): return tuple(((k, v) for k, v", "True return False def _sub_thread(self, ps, cb, key): for item", "'text', 'i': 'uuid', 'b': 'blocks' } key_enc = dict((v, k)", "if dkey == key and now - ts < TTL:", "'ts', 'uuid'))) def remove_ttl(a): now = time.time() return [d for", "self.nolock = threading.Lock() self.nosend = defaultdict(list) self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def", "= threading.Thread(target=self._sub_thread, args=(ps, cb, key)) t.daemon = True t.start() self.ps[key]", "dict((key_dec.get(k, k), v) for k, v in d.items()) def dtokey(d):", "no.remove(data) return True return False def _sub_thread(self, ps, cb, key):", "('old_name', 'new_name',), 'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',), 'struc_member_deleted': ('struc_name',", "decode(data): d = json.loads(data) return dict((key_dec.get(k, k), v) for k,", "'name',), 'struc_created': ('struc_name', 'is_union',), 'struc_deleted': ('struc_name',), 'struc_renamed': ('old_name', 'new_name',), 'struc_member_created':", "'size', 'flag',), 'struc_member_deleted': ('struc_name', 'offset',), 'struc_member_renamed': ('struc_name', 'offset', 'member_name',), 'struc_member_changed':", "publish(self, key, data, perm=True, send_uuid=True): if self.debounce(self.nosend[key], data): return data['user']", "False def _sub_thread(self, ps, cb, key): for item in ps.listen():", "def join(self, key, cb): ps = self.r.pubsub() ps.subscribe(key) t =", "dkey = dtokey(data) now = time.time() with self.nolock: for data", "re import redis import threading import time import traceback import", "reject our own messages if data.get('uuid') == self.uuid: continue with", "data) def push(self, key, data, send_uuid=True): if send_uuid: data['uuid'] =", "key_enc = dict((v, k) for k, v in key_dec.items()) nick_filter", "push(self, key, data, send_uuid=True): if send_uuid: data['uuid'] = self.uuid data", "'u': 'user', 't': 'text', 'i': 'uuid', 'b': 'blocks' } key_enc", "return dict((key_dec.get(k, k), v) for k, v in d.items()) def", "continue with self.nolock: self.nosend[key] = remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) + dtokey(data)) cb(key,", "} key_enc = dict((v, k) for k, v in key_dec.items())", "k not in ('user', 'ts', 'uuid'))) def remove_ttl(a): now =", "self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5) self.r.info() self.nick = nick_filter.sub('_',", "self.ps[key] = ps self.publish(key, {'cmd': 'join'}, perm=False) def leave(self, key):", "def _sub_thread(self, ps, cb, key): for item in ps.listen(): try:", "'struc_renamed': ('old_name', 'new_name',), 'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',), 'struc_member_deleted':", "self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self, no, data): dkey = dtokey(data)", "= data[1:] if dkey == key and now - ts", "('addr',), 'area_comment': ('addr',), 'rename': ('addr',), 'stackvar_renamed': ('addr', 'offset', 'name',), 'struc_created':", "= [] dedup = set() for data in reversed(decoded): cmd", "'i': 'uuid', 'b': 'blocks' } key_enc = dict((v, k) for", "'flag',), 'struc_member_deleted': ('struc_name', 'offset',), 'struc_member_renamed': ('struc_name', 'offset', 'member_name',), 'struc_member_changed': ('struc_name',", "base64 import binascii TTL = 2 hash_keys = ('cmd', 'user')", "{ 'c': 'cmd', 'a': 'addr', 'u': 'user', 't': 'text', 'i':", "history', data) traceback.print_exc() state = [] dedup = set() for", "d.items()) def dtokey(d): return tuple(((k, v) for k, v in", "now - ts < TTL: no.remove(data) return True return False", "'uuid'))) def remove_ttl(a): now = time.time() return [d for d", "json.dumps(data, separators=(',', ':'), sort_keys=True) if perm: self.r.rpush(key, data) self.r.publish(key, data)", "self.nolock: self.nosend[key] = remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data) elif", "in data: data['user'] = nick_filter.sub('_', data['user']) # reject our own", "ps: ps.unsubscribe(key) def publish(self, key, data, perm=True, send_uuid=True): if self.debounce(self.nosend[key],", "nick_filter.sub('_', data['user']) # reject our own messages if data.get('uuid') ==", "('struc_name', 'is_union',), 'struc_deleted': ('struc_name',), 'struc_renamed': ('old_name', 'new_name',), 'struc_member_created': ('struc_name', 'offset',", "= dtokey(data) now = time.time() with self.nolock: for data in", "('addr', 'offset', 'name',), 'struc_created': ('struc_name', 'is_union',), 'struc_deleted': ('struc_name',), 'struc_renamed': ('old_name',", "item['type'] == 'message': data = decode(item['data']) if 'user' in data:", "password=password, socket_connect_timeout=5) self.r.info() self.nick = nick_filter.sub('_', nick) self.ps = {}", "a if now - d[0] < TTL] class Client: def", "for data in reversed(state): try: with self.nolock: self.nosend[key].append((time.time(),) + dtokey(data))", "'blocks' } key_enc = dict((v, k) for k, v in", "our own messages if data.get('uuid') == self.uuid: continue with self.nolock:", "data in reversed(state): try: with self.nolock: self.nosend[key].append((time.time(),) + dtokey(data)) cb(key,", "'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',), 'struc_member_deleted': ('struc_name', 'offset',), 'struc_member_renamed':", "cmd = data.get('cmd') if cmd: keys = hash_keys + cmd_hash_keys.get(cmd,", "= time.time() with self.nolock: for data in no: ts =", "data, replay=True) except Exception: print('error replaying history', data) traceback.print_exc() else:", "t.daemon = True t.start() self.ps[key] = ps self.publish(key, {'cmd': 'join'},", "def push(self, key, data, send_uuid=True): if send_uuid: data['uuid'] = self.uuid", "no: ts = data[0] key = data[1:] if dkey ==", "data: data['user'] = nick_filter.sub('_', data['user']) # reject our own messages", "with self.nolock: self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data, replay=True) except Exception:", "in data.items()) data = json.dumps(data, separators=(',', ':'), sort_keys=True) if perm:", "remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data) elif item['type'] == 'subscribe':", "def debounce(self, no, data): dkey = dtokey(data) now = time.time()", "k, v in data.items()) data = json.dumps(data, separators=(',', ':'), sort_keys=True)", "nick_filter.sub('_', nick) self.ps = {} self.nolock = threading.Lock() self.nosend =", "else: print('unknown redis push', item) except Exception: print('error processing item',", "import time import traceback import uuid import base64 import binascii", "if item['type'] == 'message': data = decode(item['data']) if 'user' in", "if perm: self.r.rpush(key, data) self.r.publish(key, data) def push(self, key, data,", "= nick_filter.sub('_', nick) self.ps = {} self.nolock = threading.Lock() self.nosend", "nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data): d = json.loads(data) return dict((key_dec.get(k,", "cmd_hash_keys = { 'comment': ('addr',), 'extra_comment': ('addr',), 'area_comment': ('addr',), 'rename':", "data) elif item['type'] == 'subscribe': decoded = [] for data", "send_uuid: data['uuid'] = self.uuid data = dict((key_enc.get(k, k), v) for", "self.r.info() self.nick = nick_filter.sub('_', nick) self.ps = {} self.nolock =", "= set() for data in reversed(decoded): cmd = data.get('cmd') if", "key_dec = { 'c': 'cmd', 'a': 'addr', 'u': 'user', 't':", "= data[0] key = data[1:] if dkey == key and", "threading.Thread(target=self._sub_thread, args=(ps, cb, key)) t.daemon = True t.start() self.ps[key] =", "print('error replaying history', data) traceback.print_exc() else: print('unknown redis push', item)", "import redis import threading import time import traceback import uuid", "dict((key_enc.get(k, k), v) for k, v in data.items()) data =", "= nick_filter.sub('_', data['user']) # reject our own messages if data.get('uuid')", "ps = self.r.pubsub() ps.subscribe(key) t = threading.Thread(target=self._sub_thread, args=(ps, cb, key))", "return tuple(((k, v) for k, v in sorted(d.items()) if k", "import re import redis import threading import time import traceback", "time import traceback import uuid import base64 import binascii TTL", "Exception: print('error replaying history', data) traceback.print_exc() else: print('unknown redis push',", "except Exception: print('error decoding history', data) traceback.print_exc() state = []", "':'), sort_keys=True) if perm: self.r.rpush(key, data) self.r.publish(key, data) def push(self,", "cb(key, data) elif item['type'] == 'subscribe': decoded = [] for", "port, nick, password=None): self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5) self.r.info()", "k in keys]) if all(hashkey): if hashkey in dedup: continue", "tuple([str(data.get(k)) for k in keys]) if all(hashkey): if hashkey in", "'cmd', 'a': 'addr', 'u': 'user', 't': 'text', 'i': 'uuid', 'b':", "except Exception: print('error processing item', item) traceback.print_exc() def join(self, key,", "item', item) traceback.print_exc() def join(self, key, cb): ps = self.r.pubsub()", "= self.r.pubsub() ps.subscribe(key) t = threading.Thread(target=self._sub_thread, args=(ps, cb, key)) t.daemon", "hashkey in dedup: continue dedup.add(hashkey) state.append(data) for data in reversed(state):", "('struc_name',), 'struc_renamed': ('old_name', 'new_name',), 'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',),", "k) for k, v in key_dec.items()) nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]') def", "decoded = [] for data in self.r.lrange(key, 0, -1): try:", "import json import re import redis import threading import time", "if 'user' in data: data['user'] = nick_filter.sub('_', data['user']) # reject", "for data in reversed(decoded): cmd = data.get('cmd') if cmd: keys", "now = time.time() with self.nolock: for data in no: ts", "return data['user'] = self.nick data['ts'] = self.r.time()[0] if send_uuid: data['uuid']", "cb, key)) t.daemon = True t.start() self.ps[key] = ps self.publish(key,", "hash_keys = ('cmd', 'user') cmd_hash_keys = { 'comment': ('addr',), 'extra_comment':", "defaultdict(list) self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self, no, data): dkey =", "json import re import redis import threading import time import", "data): dkey = dtokey(data) now = time.time() with self.nolock: for", "v in data.items()) data = json.dumps(data, separators=(',', ':'), sort_keys=True) self.r.lpush(key,", "== 'subscribe': decoded = [] for data in self.r.lrange(key, 0,", "for k, v in d.items()) def dtokey(d): return tuple(((k, v)", "str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self, no, data): dkey = dtokey(data) now =", "except Exception: print('error replaying history', data) traceback.print_exc() else: print('unknown redis", "self.r.lrange(key, 0, -1): try: decoded.append(decode(data)) except Exception: print('error decoding history',", "self.r.pubsub() ps.subscribe(key) t = threading.Thread(target=self._sub_thread, args=(ps, cb, key)) t.daemon =", "None) if ps: ps.unsubscribe(key) def publish(self, key, data, perm=True, send_uuid=True):", "self.nosend = defaultdict(list) self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self, no, data):", "item) traceback.print_exc() def join(self, key, cb): ps = self.r.pubsub() ps.subscribe(key)", "decode(item['data']) if 'user' in data: data['user'] = nick_filter.sub('_', data['user']) #", "all(hashkey): if hashkey in dedup: continue dedup.add(hashkey) state.append(data) for data", "data in self.r.lrange(key, 0, -1): try: decoded.append(decode(data)) except Exception: print('error", "TTL: no.remove(data) return True return False def _sub_thread(self, ps, cb,", "key, data, send_uuid=True): if send_uuid: data['uuid'] = self.uuid data =", "print('error decoding history', data) traceback.print_exc() state = [] dedup =", "= self.r.time()[0] if send_uuid: data['uuid'] = self.uuid data = dict((key_enc.get(k,", "[d for d in a if now - d[0] <", "dedup.add(hashkey) state.append(data) for data in reversed(state): try: with self.nolock: self.nosend[key].append((time.time(),)", "k, v in d.items()) def dtokey(d): return tuple(((k, v) for", "socket_connect_timeout=5) self.r.info() self.nick = nick_filter.sub('_', nick) self.ps = {} self.nolock", "- d[0] < TTL] class Client: def __init__(self, host, port,", "{ 'comment': ('addr',), 'extra_comment': ('addr',), 'area_comment': ('addr',), 'rename': ('addr',), 'stackvar_renamed':", "import uuid import base64 import binascii TTL = 2 hash_keys", "Client: def __init__(self, host, port, nick, password=None): self.r = redis.StrictRedis(host=host,", "in ('user', 'ts', 'uuid'))) def remove_ttl(a): now = time.time() return", "2 hash_keys = ('cmd', 'user') cmd_hash_keys = { 'comment': ('addr',),", "print('unknown redis push', item) except Exception: print('error processing item', item)", "set() for data in reversed(decoded): cmd = data.get('cmd') if cmd:", "in keys]) if all(hashkey): if hashkey in dedup: continue dedup.add(hashkey)", "'struc_deleted': ('struc_name',), 'struc_renamed': ('old_name', 'new_name',), 'struc_member_created': ('struc_name', 'offset', 'member_name', 'size',", "traceback.print_exc() else: print('unknown redis push', item) except Exception: print('error processing", "'struc_member_deleted': ('struc_name', 'offset',), 'struc_member_renamed': ('struc_name', 'offset', 'member_name',), 'struc_member_changed': ('struc_name', 'offset',", "= dict((key_enc.get(k, k), v) for k, v in data.items()) data", "with self.nolock: self.nosend[key] = remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data)", "def decode(data): d = json.loads(data) return dict((key_dec.get(k, k), v) for", "= threading.Lock() self.nosend = defaultdict(list) self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii')) def debounce(self,", "'member_name',), 'struc_member_changed': ('struc_name', 'offset', 'size',), } key_dec = { 'c':", "try: if item['type'] == 'message': data = decode(item['data']) if 'user'", "self.r.publish(key, data) def push(self, key, data, send_uuid=True): if send_uuid: data['uuid']", "= { 'comment': ('addr',), 'extra_comment': ('addr',), 'area_comment': ('addr',), 'rename': ('addr',),", "'offset', 'member_name',), 'struc_member_changed': ('struc_name', 'offset', 'size',), } key_dec = {", "data) traceback.print_exc() else: print('unknown redis push', item) except Exception: print('error", "history', data) traceback.print_exc() else: print('unknown redis push', item) except Exception:", "< TTL] class Client: def __init__(self, host, port, nick, password=None):", "not in ('user', 'ts', 'uuid'))) def remove_ttl(a): now = time.time()", "Exception: print('error processing item', item) traceback.print_exc() def join(self, key, cb):", "uuid import base64 import binascii TTL = 2 hash_keys =", "'offset', 'name',), 'struc_created': ('struc_name', 'is_union',), 'struc_deleted': ('struc_name',), 'struc_renamed': ('old_name', 'new_name',),", "'join'}, perm=False) def leave(self, key): ps = self.ps.pop(key, None) if", "('addr',), 'stackvar_renamed': ('addr', 'offset', 'name',), 'struc_created': ('struc_name', 'is_union',), 'struc_deleted': ('struc_name',),", "key and now - ts < TTL: no.remove(data) return True", "('struc_name', 'offset', 'member_name', 'size', 'flag',), 'struc_member_deleted': ('struc_name', 'offset',), 'struc_member_renamed': ('struc_name',", "'t': 'text', 'i': 'uuid', 'b': 'blocks' } key_enc = dict((v,", "True t.start() self.ps[key] = ps self.publish(key, {'cmd': 'join'}, perm=False) def", "in reversed(state): try: with self.nolock: self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data,", "in ps.listen(): try: if item['type'] == 'message': data = decode(item['data'])", "in reversed(decoded): cmd = data.get('cmd') if cmd: keys = hash_keys", "('struc_name', 'offset',), 'struc_member_renamed': ('struc_name', 'offset', 'member_name',), 'struc_member_changed': ('struc_name', 'offset', 'size',),", "'struc_member_renamed': ('struc_name', 'offset', 'member_name',), 'struc_member_changed': ('struc_name', 'offset', 'size',), } key_dec", "send_uuid=True): if self.debounce(self.nosend[key], data): return data['user'] = self.nick data['ts'] =", "import base64 import binascii TTL = 2 hash_keys = ('cmd',", "redis import threading import time import traceback import uuid import", "send_uuid=True): if send_uuid: data['uuid'] = self.uuid data = dict((key_enc.get(k, k),", "in a if now - d[0] < TTL] class Client:", "self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data, replay=True) except Exception: print('error replaying", "and now - ts < TTL: no.remove(data) return True return", "d[0] < TTL] class Client: def __init__(self, host, port, nick,", "dtokey(data) now = time.time() with self.nolock: for data in no:", "def leave(self, key): ps = self.ps.pop(key, None) if ps: ps.unsubscribe(key)", "sort_keys=True) if perm: self.r.rpush(key, data) self.r.publish(key, data) def push(self, key,", "return True return False def _sub_thread(self, ps, cb, key): for", "data, perm=True, send_uuid=True): if self.debounce(self.nosend[key], data): return data['user'] = self.nick", "data in reversed(decoded): cmd = data.get('cmd') if cmd: keys =", "data = json.dumps(data, separators=(',', ':'), sort_keys=True) if perm: self.r.rpush(key, data)", "ps self.publish(key, {'cmd': 'join'}, perm=False) def leave(self, key): ps =", "item['type'] == 'subscribe': decoded = [] for data in self.r.lrange(key,", "hash_keys + cmd_hash_keys.get(cmd, ()) hashkey = tuple([str(data.get(k)) for k in", "key = data[1:] if dkey == key and now -", "class Client: def __init__(self, host, port, nick, password=None): self.r =", "'addr', 'u': 'user', 't': 'text', 'i': 'uuid', 'b': 'blocks' }", "= data.get('cmd') if cmd: keys = hash_keys + cmd_hash_keys.get(cmd, ())", "'offset', 'member_name', 'size', 'flag',), 'struc_member_deleted': ('struc_name', 'offset',), 'struc_member_renamed': ('struc_name', 'offset',", "data.get('uuid') == self.uuid: continue with self.nolock: self.nosend[key] = remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),)", "reversed(state): try: with self.nolock: self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data, replay=True)", "data['ts'] = self.r.time()[0] if send_uuid: data['uuid'] = self.uuid data =", "ps.unsubscribe(key) def publish(self, key, data, perm=True, send_uuid=True): if self.debounce(self.nosend[key], data):", "= remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data) elif item['type'] ==", "'b': 'blocks' } key_enc = dict((v, k) for k, v", "= ('cmd', 'user') cmd_hash_keys = { 'comment': ('addr',), 'extra_comment': ('addr',),", "dkey == key and now - ts < TTL: no.remove(data)", "ps, cb, key): for item in ps.listen(): try: if item['type']", "v in d.items()) def dtokey(d): return tuple(((k, v) for k,", "host, port, nick, password=None): self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5)", "dtokey(data)) cb(key, data) elif item['type'] == 'subscribe': decoded = []", "port=port, password=password, socket_connect_timeout=5) self.r.info() self.nick = nick_filter.sub('_', nick) self.ps =", "__init__(self, host, port, nick, password=None): self.r = redis.StrictRedis(host=host, port=port, password=password,", "data = dict((key_enc.get(k, k), v) for k, v in data.items())", "v) for k, v in d.items()) def dtokey(d): return tuple(((k,", "v in sorted(d.items()) if k not in ('user', 'ts', 'uuid')))", "key, data, perm=True, send_uuid=True): if self.debounce(self.nosend[key], data): return data['user'] =", "'new_name',), 'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',), 'struc_member_deleted': ('struc_name', 'offset',),", "def dtokey(d): return tuple(((k, v) for k, v in sorted(d.items())", "data['uuid'] = self.uuid data = dict((key_enc.get(k, k), v) for k,", "# reject our own messages if data.get('uuid') == self.uuid: continue", "in data.items()) data = json.dumps(data, separators=(',', ':'), sort_keys=True) self.r.lpush(key, data)", "Exception: print('error decoding history', data) traceback.print_exc() state = [] dedup", "keys]) if all(hashkey): if hashkey in dedup: continue dedup.add(hashkey) state.append(data)", "'user' in data: data['user'] = nick_filter.sub('_', data['user']) # reject our", "self.ps = {} self.nolock = threading.Lock() self.nosend = defaultdict(list) self.uuid", "traceback.print_exc() state = [] dedup = set() for data in", "TTL] class Client: def __init__(self, host, port, nick, password=None): self.r", "join(self, key, cb): ps = self.r.pubsub() ps.subscribe(key) t = threading.Thread(target=self._sub_thread,", "= { 'c': 'cmd', 'a': 'addr', 'u': 'user', 't': 'text',", "def __init__(self, host, port, nick, password=None): self.r = redis.StrictRedis(host=host, port=port,", "= decode(item['data']) if 'user' in data: data['user'] = nick_filter.sub('_', data['user'])", "re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data): d = json.loads(data) return dict((key_dec.get(k, k), v)", "dtokey(data)) cb(key, data, replay=True) except Exception: print('error replaying history', data)", "self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data) elif item['type'] == 'subscribe': decoded", "cb): ps = self.r.pubsub() ps.subscribe(key) t = threading.Thread(target=self._sub_thread, args=(ps, cb,", "for k, v in sorted(d.items()) if k not in ('user',", "dict((v, k) for k, v in key_dec.items()) nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]')", "data, send_uuid=True): if send_uuid: data['uuid'] = self.uuid data = dict((key_enc.get(k,", "separators=(',', ':'), sort_keys=True) if perm: self.r.rpush(key, data) self.r.publish(key, data) def", "} key_dec = { 'c': 'cmd', 'a': 'addr', 'u': 'user',", "t.start() self.ps[key] = ps self.publish(key, {'cmd': 'join'}, perm=False) def leave(self,", "data.get('cmd') if cmd: keys = hash_keys + cmd_hash_keys.get(cmd, ()) hashkey", "== 'message': data = decode(item['data']) if 'user' in data: data['user']", "'member_name', 'size', 'flag',), 'struc_member_deleted': ('struc_name', 'offset',), 'struc_member_renamed': ('struc_name', 'offset', 'member_name',),", "defaultdict import json import re import redis import threading import", "replaying history', data) traceback.print_exc() else: print('unknown redis push', item) except", "cmd: keys = hash_keys + cmd_hash_keys.get(cmd, ()) hashkey = tuple([str(data.get(k))", "('cmd', 'user') cmd_hash_keys = { 'comment': ('addr',), 'extra_comment': ('addr',), 'area_comment':", "self.ps.pop(key, None) if ps: ps.unsubscribe(key) def publish(self, key, data, perm=True,", "for k, v in data.items()) data = json.dumps(data, separators=(',', ':'),", "in key_dec.items()) nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data): d = json.loads(data)", "decoding history', data) traceback.print_exc() state = [] dedup = set()", "key)) t.daemon = True t.start() self.ps[key] = ps self.publish(key, {'cmd':", "elif item['type'] == 'subscribe': decoded = [] for data in", "leave(self, key): ps = self.ps.pop(key, None) if ps: ps.unsubscribe(key) def", "v in key_dec.items()) nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data): d =", "traceback import uuid import base64 import binascii TTL = 2", "decoded.append(decode(data)) except Exception: print('error decoding history', data) traceback.print_exc() state =", "'c': 'cmd', 'a': 'addr', 'u': 'user', 't': 'text', 'i': 'uuid',", "k, v in key_dec.items()) nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data): d", "data['user'] = nick_filter.sub('_', data['user']) # reject our own messages if", "self.r.rpush(key, data) self.r.publish(key, data) def push(self, key, data, send_uuid=True): if", "{} self.nolock = threading.Lock() self.nosend = defaultdict(list) self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii'))", "('struc_name', 'offset', 'size',), } key_dec = { 'c': 'cmd', 'a':", "json.loads(data) return dict((key_dec.get(k, k), v) for k, v in d.items())", "if k not in ('user', 'ts', 'uuid'))) def remove_ttl(a): now", "'user', 't': 'text', 'i': 'uuid', 'b': 'blocks' } key_enc =", "< TTL: no.remove(data) return True return False def _sub_thread(self, ps,", "= {} self.nolock = threading.Lock() self.nosend = defaultdict(list) self.uuid =", "data[0] key = data[1:] if dkey == key and now", "now = time.time() return [d for d in a if", "v) for k, v in data.items()) data = json.dumps(data, separators=(',',", "if send_uuid: data['uuid'] = self.uuid data = dict((key_enc.get(k, k), v)", "self.debounce(self.nosend[key], data): return data['user'] = self.nick data['ts'] = self.r.time()[0] if", "password=None): self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5) self.r.info() self.nick =", "self.nick data['ts'] = self.r.time()[0] if send_uuid: data['uuid'] = self.uuid data", "if now - d[0] < TTL] class Client: def __init__(self,", "data[1:] if dkey == key and now - ts <", "'struc_created': ('struc_name', 'is_union',), 'struc_deleted': ('struc_name',), 'struc_renamed': ('old_name', 'new_name',), 'struc_member_created': ('struc_name',", "_sub_thread(self, ps, cb, key): for item in ps.listen(): try: if", "for data in no: ts = data[0] key = data[1:]", "'subscribe': decoded = [] for data in self.r.lrange(key, 0, -1):", "ts = data[0] key = data[1:] if dkey == key", "for k, v in key_dec.items()) nick_filter = re.compile(r'[^a-zA-Z0-9_\\-]') def decode(data):", "if self.debounce(self.nosend[key], data): return data['user'] = self.nick data['ts'] = self.r.time()[0]", "data in no: ts = data[0] key = data[1:] if", "()) hashkey = tuple([str(data.get(k)) for k in keys]) if all(hashkey):", "= ps self.publish(key, {'cmd': 'join'}, perm=False) def leave(self, key): ps", "reversed(decoded): cmd = data.get('cmd') if cmd: keys = hash_keys +", "return False def _sub_thread(self, ps, cb, key): for item in", "ps.listen(): try: if item['type'] == 'message': data = decode(item['data']) if", "('struc_name', 'offset', 'member_name',), 'struc_member_changed': ('struc_name', 'offset', 'size',), } key_dec =", "self.nolock: self.nosend[key].append((time.time(),) + dtokey(data)) cb(key, data, replay=True) except Exception: print('error", "k, v in sorted(d.items()) if k not in ('user', 'ts',", "+ dtokey(data)) cb(key, data) elif item['type'] == 'subscribe': decoded =", "for d in a if now - d[0] < TTL]", "'area_comment': ('addr',), 'rename': ('addr',), 'stackvar_renamed': ('addr', 'offset', 'name',), 'struc_created': ('struc_name',", "'message': data = decode(item['data']) if 'user' in data: data['user'] =", "('addr',), 'extra_comment': ('addr',), 'area_comment': ('addr',), 'rename': ('addr',), 'stackvar_renamed': ('addr', 'offset',", "in sorted(d.items()) if k not in ('user', 'ts', 'uuid'))) def", "== key and now - ts < TTL: no.remove(data) return", "key): ps = self.ps.pop(key, None) if ps: ps.unsubscribe(key) def publish(self,", "dedup: continue dedup.add(hashkey) state.append(data) for data in reversed(state): try: with", "self.nolock: for data in no: ts = data[0] key =", "self.publish(key, {'cmd': 'join'}, perm=False) def leave(self, key): ps = self.ps.pop(key,", "data): return data['user'] = self.nick data['ts'] = self.r.time()[0] if send_uuid:", "collections import defaultdict import json import re import redis import", "= self.ps.pop(key, None) if ps: ps.unsubscribe(key) def publish(self, key, data,", "('addr',), 'rename': ('addr',), 'stackvar_renamed': ('addr', 'offset', 'name',), 'struc_created': ('struc_name', 'is_union',),", "if data.get('uuid') == self.uuid: continue with self.nolock: self.nosend[key] = remove_ttl(self.nosend[key])", "perm: self.r.rpush(key, data) self.r.publish(key, data) def push(self, key, data, send_uuid=True):", "own messages if data.get('uuid') == self.uuid: continue with self.nolock: self.nosend[key]", "state = [] dedup = set() for data in reversed(decoded):", "= dict((v, k) for k, v in key_dec.items()) nick_filter =", "d in a if now - d[0] < TTL] class", "def remove_ttl(a): now = time.time() return [d for d in", "key): for item in ps.listen(): try: if item['type'] == 'message':", "data['user'] = self.nick data['ts'] = self.r.time()[0] if send_uuid: data['uuid'] =", "== self.uuid: continue with self.nolock: self.nosend[key] = remove_ttl(self.nosend[key]) self.nosend[key].append((time.time(),) +", "import defaultdict import json import re import redis import threading", "replay=True) except Exception: print('error replaying history', data) traceback.print_exc() else: print('unknown" ]
[ "= 'condition' urlpatterns = [ # # FILTERS # path(", "urlpatterns = [ # # FILTERS # path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(),", "# # FILTERS # path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter,", "views app_name = 'condition' urlpatterns = [ # # FILTERS", "columns.\"\"\" from django.urls import path from ontask.condition import views app_name", "views.edit_condition, name='edit_condition'), path( '<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'), # Clone the condition", "name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'), # # CONDITIONS", "= [ # # FILTERS # path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'),", "[ # # FILTERS # path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/',", "import views app_name = 'condition' urlpatterns = [ # #", "name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'), # # CONDITIONS # path( '<int:pk>/create_condition/',", "name='delete_filter'), # # CONDITIONS # path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'), path(", "path from ontask.condition import views app_name = 'condition' urlpatterns =", "# -*- coding: utf-8 -*- \"\"\"URLs to manipulate columns.\"\"\" from", "coding: utf-8 -*- \"\"\"URLs to manipulate columns.\"\"\" from django.urls import", "views.ConditionCreateView.as_view(), name='create_condition'), path( '<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'), path( '<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'),", "# # CONDITIONS # path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'), path( '<int:pk>/edit_condition/',", "name='create_condition'), path( '<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'), path( '<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'), #", "to manipulate columns.\"\"\" from django.urls import path from ontask.condition import", "'condition' urlpatterns = [ # # FILTERS # path( '<int:pk>/create_filter/',", "app_name = 'condition' urlpatterns = [ # # FILTERS #", "condition path( '<int:pk>/clone_condition/', views.clone_condition, name='clone_condition'), path( '<int:pk>/<int:action_pk>/clone_condition/', views.clone_condition, name='clone_condition'), ]", "'<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'), path( '<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'), # Clone the", "utf-8 -*- \"\"\"URLs to manipulate columns.\"\"\" from django.urls import path", "CONDITIONS # path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'), path( '<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'),", "views.delete_filter, name='delete_filter'), # # CONDITIONS # path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'),", "# path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter,", "'<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'), #", "ontask.condition import views app_name = 'condition' urlpatterns = [ #", "\"\"\"URLs to manipulate columns.\"\"\" from django.urls import path from ontask.condition", "path( '<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'), # Clone the condition path( '<int:pk>/clone_condition/',", "-*- coding: utf-8 -*- \"\"\"URLs to manipulate columns.\"\"\" from django.urls", "FILTERS # path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/',", "from django.urls import path from ontask.condition import views app_name =", "views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'), # # CONDITIONS # path(", "from ontask.condition import views app_name = 'condition' urlpatterns = [", "-*- \"\"\"URLs to manipulate columns.\"\"\" from django.urls import path from", "django.urls import path from ontask.condition import views app_name = 'condition'", "name='edit_condition'), path( '<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'), # Clone the condition path(", "the condition path( '<int:pk>/clone_condition/', views.clone_condition, name='clone_condition'), path( '<int:pk>/<int:action_pk>/clone_condition/', views.clone_condition, name='clone_condition'),", "path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'),", "views.delete_condition, name='delete_condition'), # Clone the condition path( '<int:pk>/clone_condition/', views.clone_condition, name='clone_condition'),", "views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'), # #", "manipulate columns.\"\"\" from django.urls import path from ontask.condition import views", "path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'), # # CONDITIONS # path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(),", "'<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'), path( '<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'), path( '<int:pk>/delete_condition/', views.delete_condition,", "path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'), path( '<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'), path( '<int:pk>/delete_condition/',", "'<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'), # Clone the condition path( '<int:pk>/clone_condition/', views.clone_condition,", "# path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'), path( '<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'), path(", "path( '<int:pk>/edit_condition/', views.edit_condition, name='edit_condition'), path( '<int:pk>/delete_condition/', views.delete_condition, name='delete_condition'), # Clone", "Clone the condition path( '<int:pk>/clone_condition/', views.clone_condition, name='clone_condition'), path( '<int:pk>/<int:action_pk>/clone_condition/', views.clone_condition,", "name='delete_condition'), # Clone the condition path( '<int:pk>/clone_condition/', views.clone_condition, name='clone_condition'), path(", "# CONDITIONS # path( '<int:pk>/create_condition/', views.ConditionCreateView.as_view(), name='create_condition'), path( '<int:pk>/edit_condition/', views.edit_condition,", "# FILTERS # path( '<int:pk>/create_filter/', views.FilterCreateView.as_view(), name='create_filter'), path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'),", "path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'), path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'), # # CONDITIONS #", "import path from ontask.condition import views app_name = 'condition' urlpatterns", "# Clone the condition path( '<int:pk>/clone_condition/', views.clone_condition, name='clone_condition'), path( '<int:pk>/<int:action_pk>/clone_condition/'," ]
[ "self.gate_fc = tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc = tf.keras.layers.Dense( units=num_classes*num_mixtures,", "video features. Shape is of the form: [max_frames, video_feature_dim +", "input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6 is used as it is employed in", "tf.transpose( tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2, 1) )", "model_input2], axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) final_out =", "__init__(self, input_shape, **kwargs): super(ContextGating, self).__init__(**kwargs) feature_dim = input_shape[-1] if not", "defined. \"\"\" model_input.shape.assert_has_rank(2) feature_dim = model_input.shape.as_list()[-1] if feature_dim is None:", "2D tensor with shape: `(batch_size, feature_dim)`. Output shape: 2D tensor", "= tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters]) def get_config(self): config", "used as it is employed in the paper. self.fc =", "shape [batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2 =", "is not defined. \"\"\" feature_dim = self.feature_dim max_frames = self.max_frames", "kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def call(self, input): \"\"\"Apply the MoE algorithm to", "be found here: https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters: the number of clusters", "= MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)),", "probs def compute_output_shape(self, input_shape): return (input_shape[0], self.num_classes) def get_config(self): base_config", "trainable=True, name=\"cluster_centers\" + str(num_clusters) ) self.feature_dim = feature_dim self.max_frames =", "self).__init__(**kwargs) self.num_classes = num_classes self.num_mixtures = num_mixtures self.gate_fc = tf.keras.layers.Dense(", "by 2.\") batch_size = video_input_shape[0] if audio_input_shape[0] != batch_size: raise", "def call(self, input): \"\"\"Apply the MoE algorithm to the given", "get_config(self): base_config = super().get_config() return dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a", "= input_shape[-1] if not isinstance(feature_dim, int): feature_dim = feature_dim.value self.fc", "kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" ) def build_model(self, input_shape, second_input_shape, batch_size): \"\"\"Perform one", "Google LLC Licensed under the Apache License, Version 2.0 (the", "with shape: `(batch_size, num_classes)`. \"\"\" def __init__(self, input_shape, num_classes, num_mixtures,", "mixtures': self.num_mixtures}) return config class VideoClassifier: \"\"\"The Video Classifier model,", "Output shape: 2D tensor with shape: `(batch_size, feature_dim)`. \"\"\" def", "shape: `(batch_size, feature_dim)`. Output shape: 2D tensor with shape: `(batch_size,", "Logistic Experts classifier. Input shape: 2D tensor with shape: `(batch_size,", "\"\"\" import math import tensorflow as tf class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies", "num_classes, num_mixtures, **kwargs): super(MOELogistic, self).__init__(**kwargs) self.num_classes = num_classes self.num_mixtures =", "the model. Args: model_input: input features of shape [batch_size, max_frames,", "equal.\") self.num_frames = video_input_shape[1] self.num_classes = num_classes self.video_feature_dim = video_input_shape[2]", "num_clusters/2. video_input_shape: shape of the input video features. Shape of", "input is not defined. \"\"\" feature_dim = self.feature_dim max_frames =", "__init__(self, input_shape, num_classes, num_mixtures, **kwargs): super(MOELogistic, self).__init__(**kwargs) self.num_classes = num_classes", "cg_out = self.first_cg(fc_out) moe_out = self.moe(cg_out) final_out = self.second_cg(moe_out) final_model", "tf.reshape(probs, [-1, self.num_classes]) return probs def compute_output_shape(self, input_shape): return (input_shape[0],", "* num_clusters]. Raises: ValueError: If the `feature_dim` of input is", "with shape [batch_size, feature_dim]. Returns: A tensor with shape [batch_size,", "activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.moe", "A tensor with shape [batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape,", "given input. Args: model_input: A tensor with shape [batch_size, feature_dim].", "name=\"video_vlad\") self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6 is used as", "the form: [max_frames, video_feature_dim + audio_feature_dim]. second_input_shape: input shape of", "Apache License, Version 2.0 (the \"License\"); you may not use", "gate_activations = self.gate_fc(input) expert_activations = self.expert_fc(input) #Calculate the distribution across", "batch_size: raise ValueError(\"audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.\")", "must be greater than 1: %i\" % num_clusters) self.num_clusters =", "the input. Args: num_clusters: The number of clusters to use.", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "feature_dim * num_clusters)`. \"\"\" def __init__(self, num_clusters, input_shape, **kwargs): super().__init__(**kwargs)", "Input Shape: 3D tensor with shape: `(batch_size, time, feature_dim)`. Output", "Video Classifier model, implemented according to the winning model from", "\"\"\"Apply the MoE algorithm to the given input. Args: input:", "the NetVLAD layer. Input Shape: 3D tensor with shape: `(batch_size,", "at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6 is used as it", "feature_dim)`. Output shape: 2D tensor with shape: `(batch_size, num_classes)`. \"\"\"", "cluster_activation vlad_out = tf.nn.l2_normalize(vlad_out, 1) vlad_out = tf.reshape(vlad_out, (-1, feature_dim", "int): feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5),", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "feature_dim = model_input.shape.as_list()[-1] if feature_dim is None: raise ValueError(\"Last dimension", "self).__init__(**kwargs) if num_clusters % 2 != 0: raise ValueError(\"num_clusters must", "and video_input_shape do not match. ValueError: If the number of", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim]. Returns: A tensor", "list(config.items())) class ContextGating(tf.keras.layers.Layer): \"\"\"Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905", "tf class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD to the input. Args: num_clusters:", "vlad_out def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0], input_shape[-1]", "input_shape): input_shape = tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters]) def", "return vlad_out def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0],", "self.expert_fc = tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def call(self, input): \"\"\"Apply", "across mixtures gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1])) expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations,", "num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input", "ANY KIND, either express or implied. See the License for", "{\"num_clusters\": self.num_clusters} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) class", "+ str(num_clusters) ) self.cluster_centers = self.add_weight( shape=(1, feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal(", "shape [batch_size, feature_dim]. Returns: A tensor with shape [batch_size, feature_dim].", "given input. Args: input: A tensor with shape [batch_size, feature_dim].", "= self.second_cg(moe_out) final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out) return final_model class SegmentClassifier:", "is not defined. \"\"\" model_input.shape.assert_has_rank(2) feature_dim = model_input.shape.as_list()[-1] if feature_dim", "if feature_dim is None: raise ValueError(\"Last dimension must be defined.\")", "class specific features. Shape is of the form [num_new_features] Returns:", "2D tensor with shape: `(batch_size, num_classes)`. \"\"\" def __init__(self, input_shape,", "found here: https://arxiv.org/abs/1911.08548 Arguments: num_clusters: the number of clusters to", "def get_config(self): base_config = super().get_config() return dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements", "video_input_shape do not match. ValueError: If the number of samples", "audio_input_shape, num_classes, fc_units, **kwargs): super(SegmentClassifier, self).__init__(**kwargs) if num_clusters % 2", "probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs = tf.reshape(probs, [-1, self.num_classes]) return", "under the License is distributed on an \"AS IS\" BASIS,", "Classifier model, implemented according to the winning model from the", "self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 / math.sqrt(feature_dim) ), trainable=True, name=\"cluster_centers\" + str(num_clusters)", "winning model from the Youtube-8M Challenge. The model can be", ") def build_model(self, input_shape, second_input_shape, batch_size): \"\"\"Perform one forward pass", "Returns: vlad_out: A tensor with shape [batch_size, feature_dim * num_clusters].", "Shape is of the form [num_new_features] Returns: A tensor with", "is None: raise ValueError(\"Last dimension must be defined.\") context_gate =", "tensor with shape [batch_size, num_classes]. Raises: ValueError: If the `feature_dim`", "must equal.\") self.num_frames = video_input_shape[1] self.num_classes = num_classes self.num_mixtures =", "of the form [num_new_features] Returns: A tensor with shape [batch_size,", "of model_input is not defined. \"\"\" model_input.shape.assert_has_rank(2) feature_dim = model_input.shape.as_list()[-1]", "audio_feature_dim]. second_input_shape: input shape of new class specific features. Shape", "2.\") batch_size = video_input_shape[0] if audio_input_shape[0] != batch_size: raise ValueError(\"audio_input_shape[0]", "audio_vlad_out = self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) vlad_out =", "self.num_mixtures = num_mixtures self.gate_fc = tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc", "num_clusters feature_dim = input_shape[-1] if not isinstance(feature_dim, int): feature_dim =", "\"\"\"Apply the ContextGating module to the given input. Args: model_input:", "expert_activations = self.expert_fc(input) #Calculate the distribution across mixtures gate_dist =", "= num_mixtures self.gate_fc = tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc =", "samples must equal.\") self.num_frames = video_input_shape[1] self.num_classes = num_classes self.num_mixtures", "self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\")", "to use. input_shape: 3D tensor denoting the input shape of", "model_input: A tensor with shape [batch_size, feature_dim]. Returns: A tensor", "= super().get_config() return dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a Mixture of", "be found here: https://arxiv.org/abs/1911.08548 Arguments: num_clusters: the number of clusters", "the given input. Args: model_input: A tensor with shape [batch_size,", "context_gate = self.fc(model_input) output = tf.math.multiply(context_gate, model_input) return output def", "feature_dim]. Returns: A tensor with shape [batch_size, feature_dim]. Raises: ValueError:", "this file except in compliance with the License. You may", "num_mixtures, **kwargs): super(MOELogistic, self).__init__(**kwargs) self.num_classes = num_classes self.num_mixtures = num_mixtures", "self.expert_fc(input) #Calculate the distribution across mixtures gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1,", "used for NetVLAD. The audio clusters will be num_clusters/2. video_input_shape:", "of the audio_input_shape and video_input_shape do not match. ValueError: If", "input_shape, batch_size): \"\"\"Perform one forward pass of the model. Args:", "of the model. Args: input_shape: input shape for video features.", "model_input: input features of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim].", "clusters to use. input_shape: 3D tensor denoting the input shape", "2D tensor with shape: `(batch_size, feature_dim)`. \"\"\" def __init__(self, input_shape,", "do not match. ValueError: If the number of samples of", "Batch sizes must equal.\") if audio_input_shape[1] != video_input_shape[1]: raise ValueError(\"audio_input_shape[1]", "self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.fc2 = tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid,", "tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out =", "ValueError: If the batch sizes of the audio_input_shape and video_input_shape", "of input is not defined. \"\"\" feature_dim = self.feature_dim max_frames", "name=\"audio_vlad\") #Relu6 is used as it is employed in the", "= tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def call(self, input): \"\"\"Apply the", "= ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.fc2 = tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5),", "str(num_clusters) ) self.cluster_centers = self.add_weight( shape=(1, feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0", "forward pass of the model. Args: input_shape: input shape for", "batch_size = video_input_shape[0] if audio_input_shape[0] != batch_size: raise ValueError(\"audio_input_shape[0] must", "self.add_weight( shape=(1, feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 / math.sqrt(feature_dim) ), trainable=True,", "activation = self.fc(frames) activation = tf.reshape(activation, (-1, max_frames, self.num_clusters)) activation_sum", "base_config.update({'number of classes': self.num_classes, 'number of mixtures': self.num_mixtures}) return config", "for NetVLAD. The audio clusters will be num_clusters/2. video_input_shape: shape", "not match. ValueError: If the number of samples of the", "iterations, **kwargs): super(VideoClassifier, self).__init__(**kwargs) if num_clusters % 2 != 0:", "build_model(self, input_shape, batch_size): \"\"\"Perform one forward pass of the model.", "= video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad = NetVLAD(num_clusters//2,", "num_clusters)`. \"\"\" def __init__(self, num_clusters, input_shape, **kwargs): super().__init__(**kwargs) if num_clusters", "NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6 is", "shape of new class specific features. Shape is of the", "be divisible by 2.\") batch_size = video_input_shape[0] if audio_input_shape[0] !=", "= tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size,", "name=\"moe\") self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\") def build_model(self, input_shape, batch_size):", "obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required", "here: https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters: the number of clusters to be", "[batch_size, feature_dim]. Raises: ValueError: If the `feature_dim` of model_input is", "kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc = tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def call(self,", "outputs=final_out) return final_model class SegmentClassifier: \"\"\"The Segment Classifier model, implemented", "vlad_out = tf.nn.l2_normalize(vlad_out, 1) vlad_out = tf.reshape(vlad_out, (-1, feature_dim *", "audio_feature_dim]. Raises: ValueError: If num_clusters is not divisible by 2.", "shape: `(batch_size, time, feature_dim)`. Output shape: 2D tensor with shape:", "* self.cluster_centers frames = tf.reshape(frames, (-1, max_frames, feature_dim)) activation =", "[batch_size, num_classes]. Raises: ValueError: If the `feature_dim` of input is", "= tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs = tf.reshape(probs, [-1, self.num_classes]) return probs", "name=\"main_fc2\" ) def build_model(self, input_shape, second_input_shape, batch_size): \"\"\"Perform one forward", "tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1])) expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures])) probs =", "one forward pass of the model. Args: input_shape: input shape", "tensor with shape: `(batch_size, feature_dim * num_clusters)`. \"\"\" def __init__(self,", "Returns: A tensor with shape [batch_size, num_classes]. \"\"\" model_input =", "[batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim]", "= tf.math.multiply(context_gate, model_input) return output def compute_output_shape(self, input_shape): return input_shape", "self.fc(vlad_out) cg_out = self.first_cg(fc_out) final_out = self.fc2(cg_out) final_model = tf.keras.models.Model(inputs=[model_input,", "frames: A tensor with shape [batch_size, max_frames, feature_dim]. Returns: vlad_out:", "= tf.nn.l2_normalize(vlad_out, 1) vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters))", "If the `feature_dim` of model_input is not defined. \"\"\" model_input.shape.assert_has_rank(2)", "= self.expert_fc(input) #Calculate the distribution across mixtures gate_dist = tf.nn.softmax(tf.reshape(gate_activations,", "if num_clusters <= 0: raise ValueError(\"`num_clusters` must be greater than", "self.num_clusters)) vlad_out = tf.nn.l2_normalize(vlad_out, 1) return vlad_out def compute_output_shape(self, input_shape):", "input): \"\"\"Apply the MoE algorithm to the given input. Args:", "of the input video features. Shape of [batch_size, num_samples, video_feature_dim].", "self.num_frames = video_input_shape[1] self.num_classes = num_classes self.video_feature_dim = video_input_shape[2] self.video_vlad", "file except in compliance with the License. You may obtain", "frames = tf.reshape(frames, (-1, max_frames, feature_dim)) activation = tf.transpose( tf.matmul(tf.transpose(activation,", "1)), frames), perm=(0, 2, 1) ) vlad_out = activation -", "units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\")", "ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\") def build_model(self, input_shape, batch_size): \"\"\"Perform one forward", "= tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1])) expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures])) probs", "not defined. \"\"\" gate_activations = self.gate_fc(input) expert_activations = self.expert_fc(input) #Calculate", "of input is not defined. \"\"\" gate_activations = self.gate_fc(input) expert_activations", "fc_units), name=\"first_cg\") self.fc2 = tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" )", "tf.math.reduce_sum(activation, axis=-2, keepdims=True) cluster_activation = activation_sum * self.cluster_centers frames =", "input shape of the NetVLAD layer. Input Shape: 3D tensor", "OR CONDITIONS OF ANY KIND, either express or implied. See", "time, feature_dim)`. Output shape: 2D tensor with shape: `(batch_size, feature_dim", "self.max_frames = input_shape[-2] def call(self, frames): \"\"\"Apply the NetVLAD module", "model_input.shape.as_list()[-1] if feature_dim is None: raise ValueError(\"Last dimension must be", "shape of the NetVLAD layer. Input Shape: 3D tensor with", "the distribution across mixtures gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1])) expert_dist", "not isinstance(feature_dim, int): feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=self.num_clusters,", "tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2, 1) ) vlad_out", "max_frames, video_feature_dim + audio_feature_dim]. Returns: A tensor with shape [batch_size,", "algorithm to the given input. Args: input: A tensor with", "model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:]", "the model. Args: input_shape: input shape for video features. Shape", "model_input.shape.assert_has_rank(2) feature_dim = model_input.shape.as_list()[-1] if feature_dim is None: raise ValueError(\"Last", "NetVLAD. The audio clusters will be num_clusters/2. video_input_shape: shape of", "tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc = tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), )", "under the Apache License, Version 2.0 (the \"License\"); you may", "self.num_classes = num_classes self.num_mixtures = num_mixtures self.iterations = iterations self.video_feature_dim", "dict(list(base_config.items()) + list(config.items())) class ContextGating(tf.keras.layers.Layer): \"\"\"Implements the Context Gating Layer", "tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def call(self, model_input): \"\"\"Apply the", "model can be found here: https://arxiv.org/abs/1911.08548 Arguments: num_clusters: the number", "self.fc(frames) activation = tf.reshape(activation, (-1, max_frames, self.num_clusters)) activation_sum = tf.math.reduce_sum(activation,", "compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters])", "ValueError(\"`num_clusters` must be greater than 1: %i\" % num_clusters) self.num_clusters", "The model can be found here: https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters: the", "must equal.\") self.num_frames = video_input_shape[1] self.num_classes = num_classes self.video_feature_dim =", "axis=1) vlad_out = tf.concat([vlad_out, model_input2], axis=1) fc_out = self.fc(vlad_out) cg_out", "= tf.reshape(frames, (-1, feature_dim)) activation = self.fc(frames) activation = tf.reshape(activation,", "expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures])) probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs", "= tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out", "feature_dim)`. Output shape: 2D tensor with shape: `(batch_size, feature_dim)`. \"\"\"", "of the audio_input_shape and video_input_shape do not match. \"\"\" def", "+ str(num_clusters) ) self.feature_dim = feature_dim self.max_frames = input_shape[-2] def", "(input_shape[0], self.num_classes) def get_config(self): base_config = super().get_config() config = base_config.update({'number", "the audio_input_shape and video_input_shape do not match. \"\"\" def __init__(self,", "def call(self, model_input): \"\"\"Apply the ContextGating module to the given", "**kwargs): super(VideoClassifier, self).__init__(**kwargs) if num_clusters % 2 != 0: raise", "[batch_size, max_frames, video_feature_dim + audio_feature_dim]. Returns: A tensor with shape", "vlad_out = tf.nn.l2_normalize(vlad_out, 1) return vlad_out def compute_output_shape(self, input_shape): input_shape", "audio_vlad_out = self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) fc_out =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "config = {\"num_clusters\": self.num_clusters} base_config = super().get_config() return dict(list(base_config.items()) +", "activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def call(self, model_input): \"\"\"Apply the ContextGating module", "See the License for the specific language governing permissions and", "output = tf.math.multiply(context_gate, model_input) return output def compute_output_shape(self, input_shape): return", "A tensor with shape [batch_size, feature_dim * num_clusters]. Raises: ValueError:", "class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a Mixture of Logistic Experts classifier. Input", "as it is employed in the paper. self.fc = tf.keras.layers.Dense(", "= num_classes self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\")", "shape [batch_size, feature_dim]. Returns: A tensor with shape [batch_size, num_classes].", "implemented according to the winning model from the Youtube-8M Challenge.", "not defined. \"\"\" feature_dim = self.feature_dim max_frames = self.max_frames frames", "shape: `(batch_size, num_classes)`. \"\"\" def __init__(self, input_shape, num_classes, num_mixtures, **kwargs):", "super().__init__(**kwargs) if num_clusters <= 0: raise ValueError(\"`num_clusters` must be greater", "Input shape: 2D tensor with shape: `(batch_size, feature_dim)`. Output shape:", "self.num_classes) def get_config(self): base_config = super().get_config() config = base_config.update({'number of", "audio_vlad_out], axis=1) vlad_out = tf.concat([vlad_out, model_input2], axis=1) fc_out = self.fc(vlad_out)", "NetVLAD layer. Input Shape: 3D tensor with shape: `(batch_size, time,", "Args: model_input: A tensor with shape [batch_size, feature_dim]. Returns: A", "num_classes self.num_mixtures = num_mixtures self.gate_fc = tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), )", "to the given input. Args: input: A tensor with shape", "not divisible by 2. ValueError: If the batch sizes of", "\"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs): super(SegmentClassifier,", "= tf.transpose( tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2, 1)", "activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" ) def build_model(self, input_shape, second_input_shape, batch_size): \"\"\"Perform", "Output shape: 2D tensor with shape: `(batch_size, feature_dim * num_clusters)`.", "[batch_size, max_frames, feature_dim]. Returns: vlad_out: A tensor with shape [batch_size,", "from https://arxiv.org/abs/1706.06905 Input shape: 2D tensor with shape: `(batch_size, feature_dim)`.", "in writing, software distributed under the License is distributed on", "classifier. Input shape: 2D tensor with shape: `(batch_size, feature_dim)`. Output", "Shape of [batch_size, num_samples, audio_feature_dim]. Raises: ValueError: If num_clusters is", "required by applicable law or agreed to in writing, software", "frames), perm=(0, 2, 1) ) vlad_out = activation - cluster_activation", "and video_input_shape do not match. \"\"\" def __init__(self, num_clusters, video_input_shape,", "https://arxiv.org/abs/1706.06905 Input shape: 2D tensor with shape: `(batch_size, feature_dim)`. Output", "num_classes, num_mixtures, fc_units, iterations, **kwargs): super(VideoClassifier, self).__init__(**kwargs) if num_clusters %", "num_mixtures self.gate_fc = tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc = tf.keras.layers.Dense(", "the ContextGating module to the given input. Args: model_input: A", "def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs): super(SegmentClassifier, self).__init__(**kwargs)", "= model_input.shape.as_list()[-1] if feature_dim is None: raise ValueError(\"Last dimension must", "shape [batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input =", "model from the Youtube-8M Challenge. The model can be found", "Defines the architecture of the Video Classifier. \"\"\" import math", "Raises: ValueError: If the `feature_dim` of model_input is not defined.", "1) return vlad_out def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() return", "sizes must equal.\") if audio_input_shape[1] != video_input_shape[1]: raise ValueError(\"audio_input_shape[1] must", "num_classes self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad", "\"\"\"Copyright 2020 Google LLC Licensed under the Apache License, Version", "self.fc2 = tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" ) def build_model(self,", "= NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6", "with shape [batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input", "% num_clusters) self.num_clusters = num_clusters feature_dim = input_shape[-1] if not", "self.num_clusters)) activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True) cluster_activation = activation_sum *", "Number of samples must equal.\") self.num_frames = video_input_shape[1] self.num_classes =", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "tf.concat([vlad_out, model_input2], axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) final_out", "self.fc(vlad_out) cg_out = self.first_cg(fc_out) moe_out = self.moe(cg_out) final_out = self.second_cg(moe_out)", "self.num_mixtures])) probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs = tf.reshape(probs, [-1, self.num_classes])", "= super().get_config() return dict(list(base_config.items()) + list(config.items())) class ContextGating(tf.keras.layers.Layer): \"\"\"Implements the", "If the number of samples of the audio_input_shape and video_input_shape", "\"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input =", "use. input_shape: 3D tensor denoting the input shape of the", "raise ValueError(\"`num_clusters` must be greater than 1: %i\" % num_clusters)", "CONDITIONS OF ANY KIND, either express or implied. See the", "samples must equal.\") self.num_frames = video_input_shape[1] self.num_classes = num_classes self.video_feature_dim", "audio_feature_dim]. Returns: A tensor with shape [batch_size, num_classes]. \"\"\" model_input", ") self.cluster_centers = self.add_weight( shape=(1, feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 /", "max_frames, feature_dim)) activation = tf.transpose( tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames),", "Version 2.0 (the \"License\"); you may not use this file", "ValueError(\"audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.\") if audio_input_shape[1]", "of clusters to be used for NetVLAD. The audio clusters", "= self.fc(vlad_out) cg_out = self.first_cg(fc_out) final_out = self.fc2(cg_out) final_model =", "Raises: ValueError: If num_clusters is not divisible by 2. ValueError:", "num_clusters: the number of clusters to be used for NetVLAD.", "the given frames. Args: frames: A tensor with shape [batch_size,", "tensor with shape [batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)", "SegmentClassifier: \"\"\"The Segment Classifier model, implemented according to the winning", "video_input_shape[0]. Batch sizes must equal.\") if audio_input_shape[1] != video_input_shape[1]: raise", "of classes': self.num_classes, 'number of mixtures': self.num_mixtures}) return config class", "not use this file except in compliance with the License.", "2.0 (the \"License\"); you may not use this file except", "num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs): super(VideoClassifier, self).__init__(**kwargs)", "!= 0: raise ValueError(\"num_clusters must be divisible by 2.\") batch_size", "tensor with shape: `(batch_size, feature_dim)`. \"\"\" def __init__(self, input_shape, **kwargs):", "sizes of the audio_input_shape and video_input_shape do not match. ValueError:", "return probs def compute_output_shape(self, input_shape): return (input_shape[0], self.num_classes) def get_config(self):", "Gating Layer from https://arxiv.org/abs/1706.06905 Input shape: 2D tensor with shape:", "A tensor with shape [batch_size, max_frames, feature_dim]. Returns: vlad_out: A", "Args: model_input: input features of shape [batch_size, max_frames, video_feature_dim +", "[batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2 = tf.keras.layers.Input(shape=second_input_shape,", "of new class specific features. Shape is of the form", "architecture of the Video Classifier. \"\"\" import math import tensorflow", "you may not use this file except in compliance with", "the Context Gating Layer from https://arxiv.org/abs/1706.06905 Input shape: 2D tensor", "match. \"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs):", "input_shape, second_input_shape, batch_size): \"\"\"Perform one forward pass of the model.", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input =", "the License. You may obtain a copy of the License", "self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) vlad_out", "def compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config = super().get_config()", "* num_clusters)`. \"\"\" def __init__(self, num_clusters, input_shape, **kwargs): super().__init__(**kwargs) if", "Context Gating Layer from https://arxiv.org/abs/1706.06905 Input shape: 2D tensor with", "name=\"cluster_centers\" + str(num_clusters) ) self.feature_dim = feature_dim self.max_frames = input_shape[-2]", "use this file except in compliance with the License. You", "video_input_shape do not match. \"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape,", "Args: num_clusters: The number of clusters to use. input_shape: 3D", "equal.\") self.num_frames = video_input_shape[1] self.num_classes = num_classes self.num_mixtures = num_mixtures", "module to the given frames. Args: frames: A tensor with", "activation - cluster_activation vlad_out = tf.nn.l2_normalize(vlad_out, 1) vlad_out = tf.reshape(vlad_out,", "A tensor with shape [batch_size, num_classes]. Raises: ValueError: If the", "NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD to the input. Args: num_clusters: The number", "\"\"\" gate_activations = self.gate_fc(input) expert_activations = self.expert_fc(input) #Calculate the distribution", "3D tensor with shape: `(batch_size, time, feature_dim)`. Output shape: 2D", "= model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out = self.video_vlad(video_input) audio_vlad_out =", "shape [batch_size, feature_dim * num_clusters]. Raises: ValueError: If the `feature_dim`", "self.num_frames = video_input_shape[1] self.num_classes = num_classes self.num_mixtures = num_mixtures self.iterations", ") vlad_out = activation - cluster_activation vlad_out = tf.nn.l2_normalize(vlad_out, 1)", "be used for NetVLAD. The audio clusters will be num_clusters/2.", "batch_size): \"\"\"Perform one forward pass of the model. Args: input_shape:", "tf.reshape(frames, (-1, max_frames, feature_dim)) activation = tf.transpose( tf.matmul(tf.transpose(activation, perm=(0, 2,", "feature_dim]. Raises: ValueError: If the `feature_dim` of model_input is not", "input: A tensor with shape [batch_size, feature_dim]. Returns: A tensor", "the paper. self.fc = tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" )", "https://arxiv.org/abs/1911.08548 Arguments: num_clusters: the number of clusters to be used", "\"\"\"Perform one forward pass of the model. Args: input_shape: input", "number of clusters to use. input_shape: 3D tensor denoting the", "model_input[:,:,self.video_feature_dim:] video_vlad_out = self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out,", "ValueError: If the number of samples of the audio_input_shape and", "= tf.concat([vlad_out, model_input2], axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out)", "num_clusters, input_shape, **kwargs): super().__init__(**kwargs) if num_clusters <= 0: raise ValueError(\"`num_clusters`", "input_shape, **kwargs): super().__init__(**kwargs) if num_clusters <= 0: raise ValueError(\"`num_clusters` must", "the winning model from the Youtube-8M Challenge. The model can", "= num_classes self.num_mixtures = num_mixtures self.gate_fc = tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6),", "the `feature_dim` of input is not defined. \"\"\" feature_dim =", "super(VideoClassifier, self).__init__(**kwargs) if num_clusters % 2 != 0: raise ValueError(\"num_clusters", "super(ContextGating, self).__init__(**kwargs) feature_dim = input_shape[-1] if not isinstance(feature_dim, int): feature_dim", ") def call(self, model_input): \"\"\"Apply the ContextGating module to the", "num_clusters]. Raises: ValueError: If the `feature_dim` of input is not", "self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) fc_out = self.fc(vlad_out) cg_out", "of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "input shape of new class specific features. Shape is of", "def get_config(self): config = {\"num_clusters\": self.num_clusters} base_config = super().get_config() return", "#Calculate the distribution across mixtures gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1]))", "/ math.sqrt(feature_dim) ), trainable=True, name=\"cluster_centers\" + str(num_clusters) ) self.feature_dim =", "given frames. Args: frames: A tensor with shape [batch_size, max_frames,", "vlad_out: A tensor with shape [batch_size, feature_dim * num_clusters]. Raises:", "mixtures gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1])) expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1,", "def compute_output_shape(self, input_shape): return (input_shape[0], self.num_classes) def get_config(self): base_config =", "Shape of [batch_size, num_samples, video_feature_dim]. audio_input_shape: shape fo the input", "+ audio_feature_dim]. second_input_shape: input shape of new class specific features.", "[num_new_features] Returns: A tensor with shape [batch_size, num_classes]. \"\"\" model_input", "* self.num_clusters]) def get_config(self): config = {\"num_clusters\": self.num_clusters} base_config =", "`(batch_size, feature_dim)`. \"\"\" def __init__(self, input_shape, **kwargs): super(ContextGating, self).__init__(**kwargs) feature_dim", "= iterations self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\")", "final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out) return final_model class SegmentClassifier: \"\"\"The Segment", "tensor with shape: `(batch_size, time, feature_dim)`. Output shape: 2D tensor", "def __init__(self, input_shape, num_classes, num_mixtures, **kwargs): super(MOELogistic, self).__init__(**kwargs) self.num_classes =", "Shape: 3D tensor with shape: `(batch_size, time, feature_dim)`. Output shape:", "(the \"License\"); you may not use this file except in", "= super().get_config() config = base_config.update({'number of classes': self.num_classes, 'number of", "feature_dim = self.feature_dim max_frames = self.max_frames frames = tf.reshape(frames, (-1,", "module to the given input. Args: model_input: A tensor with", "of samples of the audio_input_shape and video_input_shape do not match.", "License. Defines the architecture of the Video Classifier. \"\"\" import", "layer. Input Shape: 3D tensor with shape: `(batch_size, time, feature_dim)`.", "= video_input_shape[1] self.num_classes = num_classes self.num_mixtures = num_mixtures self.iterations =", "base_config = super().get_config() config = base_config.update({'number of classes': self.num_classes, 'number", "name=\"vlad_fc\" + str(num_clusters) ) self.cluster_centers = self.add_weight( shape=(1, feature_dim, self.num_clusters),", "**kwargs): super(SegmentClassifier, self).__init__(**kwargs) if num_clusters % 2 != 0: raise", "video_vlad_out = self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out],", "for video features. Shape is of the form: [max_frames, video_feature_dim", "fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) final_out = self.fc2(cg_out) final_model", "% 2 != 0: raise ValueError(\"num_clusters must be divisible by", "ContextGating(tf.keras.layers.Layer): \"\"\"Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905 Input shape:", "the Video Classifier. \"\"\" import math import tensorflow as tf", "return output def compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config", "samples of the audio_input_shape and video_input_shape do not match. \"\"\"", "[batch_size, num_samples, audio_feature_dim]. Raises: ValueError: If num_clusters is not divisible", "input_shape, num_classes, num_mixtures, **kwargs): super(MOELogistic, self).__init__(**kwargs) self.num_classes = num_classes self.num_mixtures", "self.num_clusters]) def get_config(self): config = {\"num_clusters\": self.num_clusters} base_config = super().get_config()", "audio_input_shape: shape fo the input audio features. Shape of [batch_size,", "to the given input. Args: model_input: A tensor with shape", "self.first_cg(fc_out) moe_out = self.moe(cg_out) final_out = self.second_cg(moe_out) final_model = tf.keras.models.Model(inputs=model_input,", "2. ValueError: If the batch sizes of the audio_input_shape and", "VideoClassifier: \"\"\"The Video Classifier model, implemented according to the winning", "units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def call(self, model_input): \"\"\"Apply the ContextGating", "the Apache License, Version 2.0 (the \"License\"); you may not", "or implied. See the License for the specific language governing", "__init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs): super(SegmentClassifier, self).__init__(**kwargs) if", "vlad_out = tf.concat([vlad_out, model_input2], axis=1) fc_out = self.fc(vlad_out) cg_out =", "KIND, either express or implied. See the License for the", "= tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters)) vlad_out = tf.nn.l2_normalize(vlad_out, 1)", "= activation_sum * self.cluster_centers frames = tf.reshape(frames, (-1, max_frames, feature_dim))", "to in writing, software distributed under the License is distributed", "self.num_clusters = num_clusters feature_dim = input_shape[-1] if not isinstance(feature_dim, int):", "model_input is not defined. \"\"\" model_input.shape.assert_has_rank(2) feature_dim = model_input.shape.as_list()[-1] if", "ValueError: If num_clusters is not divisible by 2. ValueError: If", "divisible by 2. ValueError: If the batch sizes of the", "input_shape[-2] def call(self, frames): \"\"\"Apply the NetVLAD module to the", "num_clusters % 2 != 0: raise ValueError(\"num_clusters must be divisible", "batch_size): \"\"\"Perform one forward pass of the model. Args: model_input:", "must equal video_input_shape[1]. Number of samples must equal.\") self.num_frames =", "law or agreed to in writing, software distributed under the", "fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) moe_out = self.moe(cg_out) final_out", "tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input", "self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad =", "def build_model(self, input_shape, second_input_shape, batch_size): \"\"\"Perform one forward pass of", "num_classes, fc_units, **kwargs): super(SegmentClassifier, self).__init__(**kwargs) if num_clusters % 2 !=", "it is employed in the paper. self.fc = tf.keras.layers.Dense( units=fc_units,", "Youtube-8M Challenge. The model can be found here: https://arxiv.org/pdf/1706.06905.pdf Arguments:", "int): feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5),", "the Youtube-8M Challenge. The model can be found here: https://arxiv.org/pdf/1706.06905.pdf", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "isinstance(feature_dim, int): feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid,", ") def call(self, input): \"\"\"Apply the MoE algorithm to the", "= tf.reshape(frames, (-1, max_frames, feature_dim)) activation = tf.transpose( tf.matmul(tf.transpose(activation, perm=(0,", "self.max_frames frames = tf.reshape(frames, (-1, feature_dim)) activation = self.fc(frames) activation", "def call(self, frames): \"\"\"Apply the NetVLAD module to the given", "with shape [batch_size, feature_dim * num_clusters]. Raises: ValueError: If the", "shape for video features. Shape is of the form: [max_frames,", "!= video_input_shape[1]: raise ValueError(\"audio_input_shape[1] must equal video_input_shape[1]. Number of samples", "ContextGating module to the given input. Args: model_input: A tensor", "self.fc = tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def call(self, model_input):", "tensor with shape [batch_size, feature_dim]. Returns: A tensor with shape", "match. \"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units,", "divisible by 2.\") batch_size = video_input_shape[0] if audio_input_shape[0] != batch_size:", "fc_units), name=\"first_cg\") self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg", "be num_clusters/2. video_input_shape: shape of the input video features. Shape", "'number of mixtures': self.num_mixtures}) return config class VideoClassifier: \"\"\"The Video", "self).__init__(**kwargs) feature_dim = input_shape[-1] if not isinstance(feature_dim, int): feature_dim =", "for the specific language governing permissions and limitations under the", "super().get_config() return dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a Mixture of Logistic", "limitations under the License. Defines the architecture of the Video", "can be found here: https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters: the number of", "**kwargs): super(ContextGating, self).__init__(**kwargs) feature_dim = input_shape[-1] if not isinstance(feature_dim, int):", "The model can be found here: https://arxiv.org/abs/1911.08548 Arguments: num_clusters: the", "self.first_cg(fc_out) final_out = self.fc2(cg_out) final_model = tf.keras.models.Model(inputs=[model_input, model_input2], outputs=final_out) return", "), trainable=True, name=\"cluster_centers\" + str(num_clusters) ) self.feature_dim = feature_dim self.max_frames", "vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) fc_out = self.fc(vlad_out) cg_out =", "input_shape def get_config(self): base_config = super().get_config() return dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer):", "= tf.keras.layers.Dense( units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc = tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6),", "not match. \"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units,", "\"\"\" def __init__(self, input_shape, num_classes, num_mixtures, **kwargs): super(MOELogistic, self).__init__(**kwargs) self.num_classes", "A tensor with shape [batch_size, feature_dim]. Raises: ValueError: If the", "a Mixture of Logistic Experts classifier. Input shape: 2D tensor", "with shape: `(batch_size, time, feature_dim)`. Output shape: 2D tensor with", "2D tensor with shape: `(batch_size, feature_dim * num_clusters)`. \"\"\" def", "tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def call(self, input): \"\"\"Apply the MoE", "input_shape = tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters]) def get_config(self):", "num_classes self.num_mixtures = num_mixtures self.iterations = iterations self.video_feature_dim = video_input_shape[2]", "[-1, self.num_mixtures+1])) expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures])) probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures],", "0: raise ValueError(\"num_clusters must be divisible by 2.\") batch_size =", "Youtube-8M Challenge. The model can be found here: https://arxiv.org/abs/1911.08548 Arguments:", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "super(MOELogistic, self).__init__(**kwargs) self.num_classes = num_classes self.num_mixtures = num_mixtures self.gate_fc =", "tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units),", "= input_shape[-2] def call(self, frames): \"\"\"Apply the NetVLAD module to", "implied. See the License for the specific language governing permissions", "dimension must be defined.\") context_gate = self.fc(model_input) output = tf.math.multiply(context_gate,", "tf.concat([video_vlad_out, audio_vlad_out], axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) moe_out", "\"\"\"The Segment Classifier model, implemented according to the winning model", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.num_mixtures+1])) expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures])) probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1)", "audio_input_shape[0] != batch_size: raise ValueError(\"audio_input_shape[0] must equal video_input_shape[0]. Batch sizes", "audio features. Shape of [batch_size, num_samples, audio_feature_dim]. Raises: ValueError: If", "final_out = self.second_cg(moe_out) final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out) return final_model class", "input. Args: num_clusters: The number of clusters to use. input_shape:", "If the batch sizes of the audio_input_shape and video_input_shape do", "= NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6 is used as it is", "distribution across mixtures gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1])) expert_dist =", "= tf.reshape(activation, (-1, max_frames, self.num_clusters)) activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True)", "= tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out", "super().get_config() config = base_config.update({'number of classes': self.num_classes, 'number of mixtures':", "dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a Mixture of Logistic Experts classifier.", "denoting the input shape of the NetVLAD layer. Input Shape:", "\"\"\"Applies NetVLAD to the input. Args: num_clusters: The number of", "\"\"\"Perform one forward pass of the model. Args: model_input: input", "language governing permissions and limitations under the License. Defines the", "License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "model_input) return output def compute_output_shape(self, input_shape): return input_shape def get_config(self):", "under the License. Defines the architecture of the Video Classifier.", "cg_out = self.first_cg(fc_out) final_out = self.fc2(cg_out) final_model = tf.keras.models.Model(inputs=[model_input, model_input2],", "num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size)", "name=\"first_cg\") self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg =", "keepdims=True) cluster_activation = activation_sum * self.cluster_centers frames = tf.reshape(frames, (-1,", "tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters]) def get_config(self): config = {\"num_clusters\": self.num_clusters}", "do not match. \"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes,", "return dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a Mixture of Logistic Experts", "to the winning model from the Youtube-8M Challenge. The model", "+ audio_feature_dim]. Returns: A tensor with shape [batch_size, num_classes]. \"\"\"", "tf.math.multiply(context_gate, model_input) return output def compute_output_shape(self, input_shape): return input_shape def", "can be found here: https://arxiv.org/abs/1911.08548 Arguments: num_clusters: the number of", "self.fc = tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" + str(num_clusters) )", "is employed in the paper. self.fc = tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6,", "[max_frames, video_feature_dim + audio_feature_dim]. second_input_shape: input shape of new class", "Challenge. The model can be found here: https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters:", "shape: 2D tensor with shape: `(batch_size, feature_dim)`. \"\"\" def __init__(self,", "Shape is of the form: [max_frames, video_feature_dim + audio_feature_dim]. second_input_shape:", "the batch sizes of the audio_input_shape and video_input_shape do not", "(-1, feature_dim)) activation = self.fc(frames) activation = tf.reshape(activation, (-1, max_frames,", "raise ValueError(\"Last dimension must be defined.\") context_gate = self.fc(model_input) output", "kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" + str(num_clusters) ) self.cluster_centers = self.add_weight( shape=(1, feature_dim,", "self.cluster_centers frames = tf.reshape(frames, (-1, max_frames, feature_dim)) activation = tf.transpose(", "= tf.math.reduce_sum(activation, axis=-2, keepdims=True) cluster_activation = activation_sum * self.cluster_centers frames", "the audio_input_shape and video_input_shape do not match. ValueError: If the", "writing, software distributed under the License is distributed on an", "iterations self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad", "shape: 2D tensor with shape: `(batch_size, feature_dim * num_clusters)`. \"\"\"", "MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\")", "governing permissions and limitations under the License. Defines the architecture", "video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs): super(SegmentClassifier, self).__init__(**kwargs) if num_clusters %", "`feature_dim` of model_input is not defined. \"\"\" model_input.shape.assert_has_rank(2) feature_dim =", "to be used for NetVLAD. The audio clusters will be", "is used as it is employed in the paper. self.fc", "in compliance with the License. You may obtain a copy", "= tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def call(self, model_input): \"\"\"Apply", "specific language governing permissions and limitations under the License. Defines", "video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs): super(VideoClassifier, self).__init__(**kwargs) if", "import tensorflow as tf class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD to the", "`(batch_size, feature_dim * num_clusters)`. \"\"\" def __init__(self, num_clusters, input_shape, **kwargs):", "self.feature_dim max_frames = self.max_frames frames = tf.reshape(frames, (-1, feature_dim)) activation", "model, implemented according to the winning model from the Youtube-8M", "activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.fc2", "video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape,", "agreed to in writing, software distributed under the License is", "with shape [batch_size, max_frames, feature_dim]. Returns: vlad_out: A tensor with", "= self.first_cg(fc_out) moe_out = self.moe(cg_out) final_out = self.second_cg(moe_out) final_model =", "in the paper. self.fc = tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\"", "shape of the input video features. Shape of [batch_size, num_samples,", "tensor with shape [batch_size, feature_dim * num_clusters]. Raises: ValueError: If", "tensor with shape [batch_size, feature_dim]. Raises: ValueError: If the `feature_dim`", "must equal.\") if audio_input_shape[1] != video_input_shape[1]: raise ValueError(\"audio_input_shape[1] must equal", "tf.nn.l2_normalize(vlad_out, 1) return vlad_out def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list()", "self.gate_fc(input) expert_activations = self.expert_fc(input) #Calculate the distribution across mixtures gate_dist", "tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" + str(num_clusters) ) self.cluster_centers =", "permissions and limitations under the License. Defines the architecture of", "= self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)", "num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\") def build_model(self, input_shape,", "def build_model(self, input_shape, batch_size): \"\"\"Perform one forward pass of the", "video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out = self.video_vlad(video_input) audio_vlad_out", "tf.concat([video_vlad_out, audio_vlad_out], axis=1) vlad_out = tf.concat([vlad_out, model_input2], axis=1) fc_out =", "self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) vlad_out = tf.concat([vlad_out, model_input2],", "features. Shape of [batch_size, num_samples, audio_feature_dim]. Raises: ValueError: If num_clusters", "perm=(0, 2, 1)), frames), perm=(0, 2, 1) ) vlad_out =", "num_clusters is not divisible by 2. ValueError: If the batch", "num_clusters) self.num_clusters = num_clusters feature_dim = input_shape[-1] if not isinstance(feature_dim,", "self.cluster_centers = self.add_weight( shape=(1, feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 / math.sqrt(feature_dim)", "fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\") def", "If the `feature_dim` of input is not defined. \"\"\" feature_dim", "self.num_mixtures = num_mixtures self.iterations = iterations self.video_feature_dim = video_input_shape[2] self.video_vlad", "self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size,", "with shape [batch_size, num_classes]. \"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2", "self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\") def build_model(self, input_shape, batch_size): \"\"\"Perform", "either express or implied. See the License for the specific", "3D tensor denoting the input shape of the NetVLAD layer.", "__init__(self, num_clusters, input_shape, **kwargs): super().__init__(**kwargs) if num_clusters <= 0: raise", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "num_samples, audio_feature_dim]. Raises: ValueError: If num_clusters is not divisible by", "self.num_classes = num_classes self.num_mixtures = num_mixtures self.gate_fc = tf.keras.layers.Dense( units=num_classes*(num_mixtures+1),", "with shape: `(batch_size, feature_dim * num_clusters)`. \"\"\" def __init__(self, num_clusters,", "\"License\"); you may not use this file except in compliance", "2, 1) ) vlad_out = activation - cluster_activation vlad_out =", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "self.num_mixtures}) return config class VideoClassifier: \"\"\"The Video Classifier model, implemented", "return input_shape def get_config(self): base_config = super().get_config() return dict(list(base_config.items())) class", "num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\") self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\") def build_model(self,", "ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.fc2 = tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\"", "`(batch_size, num_classes)`. \"\"\" def __init__(self, input_shape, num_classes, num_mixtures, **kwargs): super(MOELogistic,", "name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.fc2 = tf.keras.layers.Dense(", "vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) vlad_out = tf.concat([vlad_out, model_input2], axis=1)", "License for the specific language governing permissions and limitations under", "= feature_dim.value self.fc = tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def", "moe_out = self.moe(cg_out) final_out = self.second_cg(moe_out) final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out)", "Video Classifier. \"\"\" import math import tensorflow as tf class", "of clusters to use. input_shape: 3D tensor denoting the input", "[-1, self.num_classes]) return probs def compute_output_shape(self, input_shape): return (input_shape[0], self.num_classes)", "audio clusters will be num_clusters/2. video_input_shape: shape of the input", "= num_clusters feature_dim = input_shape[-1] if not isinstance(feature_dim, int): feature_dim", "the number of clusters to be used for NetVLAD. The", "call(self, frames): \"\"\"Apply the NetVLAD module to the given frames.", "self.moe(cg_out) final_out = self.second_cg(moe_out) final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out) return final_model", "fc_units, iterations, **kwargs): super(VideoClassifier, self).__init__(**kwargs) if num_clusters % 2 !=", "feature_dim.value self.fc = tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def call(self,", "num_classes)`. \"\"\" def __init__(self, input_shape, num_classes, num_mixtures, **kwargs): super(MOELogistic, self).__init__(**kwargs)", "here: https://arxiv.org/abs/1911.08548 Arguments: num_clusters: the number of clusters to be", "max_frames = self.max_frames frames = tf.reshape(frames, (-1, feature_dim)) activation =", "features. Shape of [batch_size, num_samples, video_feature_dim]. audio_input_shape: shape fo the", "fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].", "final_model class SegmentClassifier: \"\"\"The Segment Classifier model, implemented according to", "feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=feature_dim, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), )", "may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless", "class VideoClassifier: \"\"\"The Video Classifier model, implemented according to the", "axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) moe_out = self.moe(cg_out)", "video_feature_dim + audio_feature_dim]. second_input_shape: input shape of new class specific", "ValueError(\"Last dimension must be defined.\") context_gate = self.fc(model_input) output =", "shape: `(batch_size, feature_dim * num_clusters)`. \"\"\" def __init__(self, num_clusters, input_shape,", "ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name=\"moe\")", "second_input_shape, batch_size): \"\"\"Perform one forward pass of the model. Args:", "must be divisible by 2.\") batch_size = video_input_shape[0] if audio_input_shape[0]", "found here: https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters: the number of clusters to", "call(self, input): \"\"\"Apply the MoE algorithm to the given input.", "Returns: A tensor with shape [batch_size, num_classes]. Raises: ValueError: If", "copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "LLC Licensed under the Apache License, Version 2.0 (the \"License\");", "shape [batch_size, num_classes]. Raises: ValueError: If the `feature_dim` of input", "NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6 is used as it is employed", "tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters)) vlad_out = tf.nn.l2_normalize(vlad_out, 1) return", "feature_dim]. Returns: vlad_out: A tensor with shape [batch_size, feature_dim *", "expert_dist),1) probs = tf.reshape(probs, [-1, self.num_classes]) return probs def compute_output_shape(self,", "\"\"\" def __init__(self, num_clusters, input_shape, **kwargs): super().__init__(**kwargs) if num_clusters <=", "input_shape: 3D tensor denoting the input shape of the NetVLAD", "the MoE algorithm to the given input. Args: input: A", "2020 Google LLC Licensed under the Apache License, Version 2.0", "of the form: [max_frames, video_feature_dim + audio_feature_dim]. second_input_shape: input shape", "If num_clusters is not divisible by 2. ValueError: If the", "not isinstance(feature_dim, int): feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=feature_dim,", "Challenge. The model can be found here: https://arxiv.org/abs/1911.08548 Arguments: num_clusters:", "as tf class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD to the input. Args:", "raise ValueError(\"audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.\")", "except in compliance with the License. You may obtain a", "tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs = tf.reshape(probs, [-1, self.num_classes]) return probs def", "Mixture of Logistic Experts classifier. Input shape: 2D tensor with", "than 1: %i\" % num_clusters) self.num_clusters = num_clusters feature_dim =", "of samples must equal.\") self.num_frames = video_input_shape[1] self.num_classes = num_classes", "shape: `(batch_size, feature_dim)`. \"\"\" def __init__(self, input_shape, **kwargs): super(ContextGating, self).__init__(**kwargs)", "MoE algorithm to the given input. Args: input: A tensor", "= self.gate_fc(input) expert_activations = self.expert_fc(input) #Calculate the distribution across mixtures", "perm=(0, 2, 1) ) vlad_out = activation - cluster_activation vlad_out", "= self.fc(frames) activation = tf.reshape(activation, (-1, max_frames, self.num_clusters)) activation_sum =", "\"\"\" model_input.shape.assert_has_rank(2) feature_dim = model_input.shape.as_list()[-1] if feature_dim is None: raise", "raise ValueError(\"audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.\") if", "self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes,", "You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0", "is of the form [num_new_features] Returns: A tensor with shape", "tf.reshape(activation, (-1, max_frames, self.num_clusters)) activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True) cluster_activation", "cluster_activation = activation_sum * self.cluster_centers frames = tf.reshape(frames, (-1, max_frames,", "def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs):", "https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters: the number of clusters to be used", "<= 0: raise ValueError(\"`num_clusters` must be greater than 1: %i\"", "input is not defined. \"\"\" gate_activations = self.gate_fc(input) expert_activations =", "tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" ) def build_model(self, input_shape, second_input_shape,", "The audio clusters will be num_clusters/2. video_input_shape: shape of the", "compliance with the License. You may obtain a copy of", "frames): \"\"\"Apply the NetVLAD module to the given frames. Args:", "self.num_classes, 'number of mixtures': self.num_mixtures}) return config class VideoClassifier: \"\"\"The", "num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs): super(SegmentClassifier, self).__init__(**kwargs) if num_clusters", "\"\"\"Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905 Input shape: 2D", "= tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" ) def build_model(self, input_shape,", "kernel_regularizer=tf.keras.regularizers.l2(1e-5), ) def call(self, model_input): \"\"\"Apply the ContextGating module to", "The number of clusters to use. input_shape: 3D tensor denoting", "shape [batch_size, max_frames, feature_dim]. Returns: vlad_out: A tensor with shape", "feature_dim self.max_frames = input_shape[-2] def call(self, frames): \"\"\"Apply the NetVLAD", "import math import tensorflow as tf class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD", "activation_sum * self.cluster_centers frames = tf.reshape(frames, (-1, max_frames, feature_dim)) activation", "with shape: `(batch_size, feature_dim)`. Output shape: 2D tensor with shape:", "MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a Mixture of Logistic Experts classifier. Input shape:", "input video features. Shape of [batch_size, num_samples, video_feature_dim]. audio_input_shape: shape", "activation = tf.reshape(activation, (-1, max_frames, self.num_clusters)) activation_sum = tf.math.reduce_sum(activation, axis=-2,", "the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "Args: frames: A tensor with shape [batch_size, max_frames, feature_dim]. Returns:", "greater than 1: %i\" % num_clusters) self.num_clusters = num_clusters feature_dim", "Experts classifier. Input shape: 2D tensor with shape: `(batch_size, feature_dim)`.", "input. Args: input: A tensor with shape [batch_size, feature_dim]. Returns:", "1) vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters)) vlad_out =", "stddev=1.0 / math.sqrt(feature_dim) ), trainable=True, name=\"cluster_centers\" + str(num_clusters) ) self.feature_dim", "defined.\") context_gate = self.fc(model_input) output = tf.math.multiply(context_gate, model_input) return output", "[batch_size, feature_dim]. Returns: A tensor with shape [batch_size, num_classes]. Raises:", "\"\"\" def __init__(self, input_shape, **kwargs): super(ContextGating, self).__init__(**kwargs) feature_dim = input_shape[-1]", "Layer from https://arxiv.org/abs/1706.06905 Input shape: 2D tensor with shape: `(batch_size,", "= self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) vlad_out = tf.concat([vlad_out,", "num_mixtures self.iterations = iterations self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters,", "2, 1)), frames), perm=(0, 2, 1) ) vlad_out = activation", "max_frames, feature_dim]. Returns: vlad_out: A tensor with shape [batch_size, feature_dim", "2 != 0: raise ValueError(\"num_clusters must be divisible by 2.\")", "1) ) vlad_out = activation - cluster_activation vlad_out = tf.nn.l2_normalize(vlad_out,", "max_frames, self.num_clusters)) activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True) cluster_activation = activation_sum", "get_config(self): base_config = super().get_config() config = base_config.update({'number of classes': self.num_classes,", "[batch_size, feature_dim]. Returns: A tensor with shape [batch_size, feature_dim]. Raises:", "video_input_shape[1] self.num_classes = num_classes self.num_mixtures = num_mixtures self.iterations = iterations", "\"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations,", "model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out = self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input)", "audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs): super(VideoClassifier, self).__init__(**kwargs) if num_clusters", "units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" ) def build_model(self, input_shape, second_input_shape, batch_size):", "must be defined.\") context_gate = self.fc(model_input) output = tf.math.multiply(context_gate, model_input)", "= tf.keras.models.Model(inputs=model_input, outputs=final_out) return final_model class SegmentClassifier: \"\"\"The Segment Classifier", "Args: input: A tensor with shape [batch_size, feature_dim]. Returns: A", "= self.first_cg(fc_out) final_out = self.fc2(cg_out) final_model = tf.keras.models.Model(inputs=[model_input, model_input2], outputs=final_out)", "of Logistic Experts classifier. Input shape: 2D tensor with shape:", "super(SegmentClassifier, self).__init__(**kwargs) if num_clusters % 2 != 0: raise ValueError(\"num_clusters", "input_shape): return (input_shape[0], self.num_classes) def get_config(self): base_config = super().get_config() config", "name=\"first_cg\") self.fc2 = tf.keras.layers.Dense( units=1, activation=tf.keras.activations.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc2\" ) def", "equal.\") if audio_input_shape[1] != video_input_shape[1]: raise ValueError(\"audio_input_shape[1] must equal video_input_shape[1].", "video_input_shape[1]: raise ValueError(\"audio_input_shape[1] must equal video_input_shape[1]. Number of samples must", "self.fc = tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg =", "`(batch_size, feature_dim)`. Output shape: 2D tensor with shape: `(batch_size, feature_dim)`.", "ValueError: If the `feature_dim` of input is not defined. \"\"\"", "return (input_shape[0], self.num_classes) def get_config(self): base_config = super().get_config() config =", "paper. self.fc = tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg", "A tensor with shape [batch_size, feature_dim]. Returns: A tensor with", "* self.num_clusters)) vlad_out = tf.nn.l2_normalize(vlad_out, 1) return vlad_out def compute_output_shape(self,", "the `feature_dim` of input is not defined. \"\"\" gate_activations =", "def __init__(self, input_shape, **kwargs): super(ContextGating, self).__init__(**kwargs) feature_dim = input_shape[-1] if", "by 2. ValueError: If the batch sizes of the audio_input_shape", "form: [max_frames, video_feature_dim + audio_feature_dim]. second_input_shape: input shape of new", "of the model. Args: model_input: input features of shape [batch_size,", "shape: 2D tensor with shape: `(batch_size, feature_dim)`. Output shape: 2D", "axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) final_out = self.fc2(cg_out)", "raise ValueError(\"num_clusters must be divisible by 2.\") batch_size = video_input_shape[0]", "return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters]) def get_config(self): config = {\"num_clusters\":", "fc_units)), name=\"second_cg\") def build_model(self, input_shape, batch_size): \"\"\"Perform one forward pass", "!= batch_size: raise ValueError(\"audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must", "= model_input[:,:,self.video_feature_dim:] video_vlad_out = self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input) vlad_out =", "= self.moe(cg_out) final_out = self.second_cg(moe_out) final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out) return", "%i\" % num_clusters) self.num_clusters = num_clusters feature_dim = input_shape[-1] if", "str(num_clusters) ) self.feature_dim = feature_dim self.max_frames = input_shape[-2] def call(self,", "= num_classes self.num_mixtures = num_mixtures self.iterations = iterations self.video_feature_dim =", "batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out = self.video_vlad(video_input)", "initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 / math.sqrt(feature_dim) ), trainable=True, name=\"cluster_centers\" + str(num_clusters) )", "Args: input_shape: input shape for video features. Shape is of", "the Youtube-8M Challenge. The model can be found here: https://arxiv.org/abs/1911.08548", "tensor with shape [batch_size, max_frames, feature_dim]. Returns: vlad_out: A tensor", "the License. Defines the architecture of the Video Classifier. \"\"\"", "activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True) cluster_activation = activation_sum * self.cluster_centers", "isinstance(feature_dim, int): feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax,", "`feature_dim` of input is not defined. \"\"\" feature_dim = self.feature_dim", "`feature_dim` of input is not defined. \"\"\" gate_activations = self.gate_fc(input)", "of [batch_size, num_samples, video_feature_dim]. audio_input_shape: shape fo the input audio", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "Raises: ValueError: If the `feature_dim` of input is not defined.", "num_classes]. Raises: ValueError: If the `feature_dim` of input is not", "video_feature_dim + audio_feature_dim]. Returns: A tensor with shape [batch_size, num_classes].", "match. ValueError: If the number of samples of the audio_input_shape", "compute_output_shape(self, input_shape): return (input_shape[0], self.num_classes) def get_config(self): base_config = super().get_config()", "= {\"num_clusters\": self.num_clusters} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))", "ValueError: If the `feature_dim` of model_input is not defined. \"\"\"", "= self.max_frames frames = tf.reshape(frames, (-1, feature_dim)) activation = self.fc(frames)", "model. Args: input_shape: input shape for video features. Shape is", "= tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim]", "+ list(config.items())) class ContextGating(tf.keras.layers.Layer): \"\"\"Implements the Context Gating Layer from", "input features of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim]. Returns:", "= tf.nn.l2_normalize(vlad_out, 1) return vlad_out def compute_output_shape(self, input_shape): input_shape =", "super().get_config() return dict(list(base_config.items()) + list(config.items())) class ContextGating(tf.keras.layers.Layer): \"\"\"Implements the Context", "(-1, feature_dim * self.num_clusters)) vlad_out = tf.nn.l2_normalize(vlad_out, 1) return vlad_out", "the `feature_dim` of model_input is not defined. \"\"\" model_input.shape.assert_has_rank(2) feature_dim", "units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def call(self, input): \"\"\"Apply the MoE algorithm", "feature_dim)) activation = tf.transpose( tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0,", "batch_size=batch_size) model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input =", "features of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim]. Returns: A", "self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) fc_out", "get_config(self): config = {\"num_clusters\": self.num_clusters} base_config = super().get_config() return dict(list(base_config.items())", "a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by", ") self.expert_fc = tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def call(self, input):", "(-1, max_frames, feature_dim)) activation = tf.transpose( tf.matmul(tf.transpose(activation, perm=(0, 2, 1)),", "return dict(list(base_config.items()) + list(config.items())) class ContextGating(tf.keras.layers.Layer): \"\"\"Implements the Context Gating", "is not divisible by 2. ValueError: If the batch sizes", "= self.add_weight( shape=(1, feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 / math.sqrt(feature_dim) ),", "name=\"second_cg\") def build_model(self, input_shape, batch_size): \"\"\"Perform one forward pass of", "audio_vlad_out], axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out) moe_out =", "self.feature_dim = feature_dim self.max_frames = input_shape[-2] def call(self, frames): \"\"\"Apply", "shape fo the input audio features. Shape of [batch_size, num_samples,", "with shape: `(batch_size, feature_dim)`. \"\"\" def __init__(self, input_shape, **kwargs): super(ContextGating,", "express or implied. See the License for the specific language", "is of the form: [max_frames, video_feature_dim + audio_feature_dim]. second_input_shape: input", "= feature_dim.value self.fc = tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" +", "= ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name=\"second_cg\") def build_model(self, input_shape, batch_size): \"\"\"Perform one", "class ContextGating(tf.keras.layers.Layer): \"\"\"Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905 Input", "= tf.reshape(probs, [-1, self.num_classes]) return probs def compute_output_shape(self, input_shape): return", "tf.keras.models.Model(inputs=model_input, outputs=final_out) return final_model class SegmentClassifier: \"\"\"The Segment Classifier model,", "model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:]", "\"\"\"Implements a Mixture of Logistic Experts classifier. Input shape: 2D", "the input shape of the NetVLAD layer. Input Shape: 3D", "input. Args: model_input: A tensor with shape [batch_size, feature_dim]. Returns:", "clusters will be num_clusters/2. video_input_shape: shape of the input video", "shape [batch_size, max_frames, video_feature_dim + audio_feature_dim]. Returns: A tensor with", "= self.fc(model_input) output = tf.math.multiply(context_gate, model_input) return output def compute_output_shape(self,", "Classifier. \"\"\" import math import tensorflow as tf class NetVLAD(tf.keras.layers.Layer):", "`(batch_size, time, feature_dim)`. Output shape: 2D tensor with shape: `(batch_size,", "feature_dim is None: raise ValueError(\"Last dimension must be defined.\") context_gate", "= self.feature_dim max_frames = self.max_frames frames = tf.reshape(frames, (-1, feature_dim))", "\"\"\" feature_dim = self.feature_dim max_frames = self.max_frames frames = tf.reshape(frames,", "with shape [batch_size, num_classes]. Raises: ValueError: If the `feature_dim` of", "input audio features. Shape of [batch_size, num_samples, audio_feature_dim]. Raises: ValueError:", "input_shape, **kwargs): super(ContextGating, self).__init__(**kwargs) feature_dim = input_shape[-1] if not isinstance(feature_dim,", "of [batch_size, num_samples, audio_feature_dim]. Raises: ValueError: If num_clusters is not", "= activation - cluster_activation vlad_out = tf.nn.l2_normalize(vlad_out, 1) vlad_out =", "= num_mixtures self.iterations = iterations self.video_feature_dim = video_input_shape[2] self.video_vlad =", "model_input): \"\"\"Apply the ContextGating module to the given input. Args:", "to the input. Args: num_clusters: The number of clusters to", "**kwargs): super().__init__(**kwargs) if num_clusters <= 0: raise ValueError(\"`num_clusters` must be", "[-1, self.num_mixtures])) probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs = tf.reshape(probs, [-1,", "math.sqrt(feature_dim) ), trainable=True, name=\"cluster_centers\" + str(num_clusters) ) self.feature_dim = feature_dim", "number of samples of the audio_input_shape and video_input_shape do not", "to the given frames. Args: frames: A tensor with shape", "name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size,", "final_out = self.fc2(cg_out) final_model = tf.keras.models.Model(inputs=[model_input, model_input2], outputs=final_out) return final_model", "feature_dim * self.num_clusters)) vlad_out = tf.nn.l2_normalize(vlad_out, 1) return vlad_out def", "= video_input_shape[1] self.num_classes = num_classes self.video_feature_dim = video_input_shape[2] self.video_vlad =", "__init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs): super(VideoClassifier,", "shape [batch_size, feature_dim]. Raises: ValueError: If the `feature_dim` of model_input", "batch sizes of the audio_input_shape and video_input_shape do not match.", "form [num_new_features] Returns: A tensor with shape [batch_size, num_classes]. \"\"\"", "base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) class ContextGating(tf.keras.layers.Layer): \"\"\"Implements", "return final_model class SegmentClassifier: \"\"\"The Segment Classifier model, implemented according", "tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters]) def get_config(self): config =", "model. Args: model_input: input features of shape [batch_size, max_frames, video_feature_dim", "kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.moe =", "[batch_size, num_samples, video_feature_dim]. audio_input_shape: shape fo the input audio features.", "activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" + str(num_clusters) ) self.cluster_centers = self.add_weight( shape=(1,", "number of clusters to be used for NetVLAD. The audio", "second_input_shape: input shape of new class specific features. Shape is", "build_model(self, input_shape, second_input_shape, batch_size): \"\"\"Perform one forward pass of the", "of the Video Classifier. \"\"\" import math import tensorflow as", "feature_dim)`. \"\"\" def __init__(self, input_shape, **kwargs): super(ContextGating, self).__init__(**kwargs) feature_dim =", "feature_dim * num_clusters]. Raises: ValueError: If the `feature_dim` of input", "one forward pass of the model. Args: model_input: input features", "input_shape[-1] * self.num_clusters]) def get_config(self): config = {\"num_clusters\": self.num_clusters} base_config", "base_config = super().get_config() return dict(list(base_config.items())) class MOELogistic(tf.keras.layers.Layer): \"\"\"Implements a Mixture", "with the License. You may obtain a copy of the", "will be num_clusters/2. video_input_shape: shape of the input video features.", "= tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures])) probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs =", "if audio_input_shape[1] != video_input_shape[1]: raise ValueError(\"audio_input_shape[1] must equal video_input_shape[1]. Number", "self.iterations = iterations self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape,", "feature_dim.value self.fc = tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" + str(num_clusters)", "config class VideoClassifier: \"\"\"The Video Classifier model, implemented according to", "\"\"\"Apply the NetVLAD module to the given frames. Args: frames:", "class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD to the input. Args: num_clusters: The", "is not defined. \"\"\" gate_activations = self.gate_fc(input) expert_activations = self.expert_fc(input)", "https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "if audio_input_shape[0] != batch_size: raise ValueError(\"audio_input_shape[0] must equal video_input_shape[0]. Batch", "NetVLAD to the input. Args: num_clusters: The number of clusters", ") self.feature_dim = feature_dim self.max_frames = input_shape[-2] def call(self, frames):", "= tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" + str(num_clusters) ) self.cluster_centers", "must equal video_input_shape[0]. Batch sizes must equal.\") if audio_input_shape[1] !=", "employed in the paper. self.fc = tf.keras.layers.Dense( units=fc_units, activation=tf.nn.relu6, kernel_regularizer=tf.keras.regularizers.l2(1e-5),", "[batch_size, feature_dim * num_clusters]. Raises: ValueError: If the `feature_dim` of", "shape=(1, feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 / math.sqrt(feature_dim) ), trainable=True, name=\"cluster_centers\"", "NetVLAD module to the given frames. Args: frames: A tensor", "Returns: A tensor with shape [batch_size, feature_dim]. Raises: ValueError: If", "with shape [batch_size, feature_dim]. Raises: ValueError: If the `feature_dim` of", "vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters)) vlad_out = tf.nn.l2_normalize(vlad_out,", "not defined. \"\"\" model_input.shape.assert_has_rank(2) feature_dim = model_input.shape.as_list()[-1] if feature_dim is", "video_feature_dim]. audio_input_shape: shape fo the input audio features. Shape of", "ValueError(\"audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.\") self.num_frames", "= tf.concat([video_vlad_out, audio_vlad_out], axis=1) vlad_out = tf.concat([vlad_out, model_input2], axis=1) fc_out", "according to the winning model from the Youtube-8M Challenge. The", "Segment Classifier model, implemented according to the winning model from", "self.second_cg(moe_out) final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out) return final_model class SegmentClassifier: \"\"\"The", "applicable law or agreed to in writing, software distributed under", "= self.audio_vlad(audio_input) vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1) fc_out = self.fc(vlad_out)", "input_shape: input shape for video features. Shape is of the", "Arguments: num_clusters: the number of clusters to be used for", ") self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.fc2 = tf.keras.layers.Dense( units=1,", "units=num_classes*(num_mixtures+1), kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) self.expert_fc = tf.keras.layers.Dense( units=num_classes*num_mixtures, kernel_regularizer=tf.keras.regularizers.l2(1e-6), ) def", "units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\" + str(num_clusters) ) self.cluster_centers = self.add_weight(", "if num_clusters % 2 != 0: raise ValueError(\"num_clusters must be", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "tf.reshape(frames, (-1, feature_dim)) activation = self.fc(frames) activation = tf.reshape(activation, (-1,", "If the `feature_dim` of input is not defined. \"\"\" gate_activations", "= self.fc(vlad_out) cg_out = self.first_cg(fc_out) moe_out = self.moe(cg_out) final_out =", "vlad_out = activation - cluster_activation vlad_out = tf.nn.l2_normalize(vlad_out, 1) vlad_out", "the specific language governing permissions and limitations under the License.", "shape: 2D tensor with shape: `(batch_size, num_classes)`. \"\"\" def __init__(self,", "model can be found here: https://arxiv.org/pdf/1706.06905.pdf Arguments: num_clusters: the number", "tf.nn.l2_normalize(vlad_out, 1) vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters)) vlad_out", "the form [num_new_features] Returns: A tensor with shape [batch_size, num_classes].", "tensor with shape: `(batch_size, feature_dim)`. Output shape: 2D tensor with", "classes': self.num_classes, 'number of mixtures': self.num_mixtures}) return config class VideoClassifier:", "num_clusters <= 0: raise ValueError(\"`num_clusters` must be greater than 1:", "be defined.\") context_gate = self.fc(model_input) output = tf.math.multiply(context_gate, model_input) return", "the NetVLAD module to the given frames. Args: frames: A", "tensor with shape: `(batch_size, num_classes)`. \"\"\" def __init__(self, input_shape, num_classes,", "frames. Args: frames: A tensor with shape [batch_size, max_frames, feature_dim].", "input shape for video features. Shape is of the form:", "ValueError(\"num_clusters must be divisible by 2.\") batch_size = video_input_shape[0] if", "(-1, max_frames, self.num_clusters)) activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True) cluster_activation =", "fc_units, **kwargs): super(SegmentClassifier, self).__init__(**kwargs) if num_clusters % 2 != 0:", "math import tensorflow as tf class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD to", "\"\"\" model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size) video_input", "activation = tf.transpose( tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2,", "1: %i\" % num_clusters) self.num_clusters = num_clusters feature_dim = input_shape[-1]", "tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures])) probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1) probs = tf.reshape(probs,", "audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out = self.video_vlad(video_input) audio_vlad_out = self.audio_vlad(audio_input) vlad_out", "of the NetVLAD layer. Input Shape: 3D tensor with shape:", "or agreed to in writing, software distributed under the License", "the architecture of the Video Classifier. \"\"\" import math import", "be greater than 1: %i\" % num_clusters) self.num_clusters = num_clusters", "feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense( units=self.num_clusters, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"vlad_fc\"", "class SegmentClassifier: \"\"\"The Segment Classifier model, implemented according to the", "video_input_shape: shape of the input video features. Shape of [batch_size,", "input_shape): return input_shape def get_config(self): base_config = super().get_config() return dict(list(base_config.items()))", "tensor denoting the input shape of the NetVLAD layer. Input", "feature_dim = input_shape[-1] if not isinstance(feature_dim, int): feature_dim = feature_dim.value", "OF ANY KIND, either express or implied. See the License", "axis=-2, keepdims=True) cluster_activation = activation_sum * self.cluster_centers frames = tf.reshape(frames,", "`(batch_size, feature_dim)`. Output shape: 2D tensor with shape: `(batch_size, num_classes)`.", "the input video features. Shape of [batch_size, num_samples, video_feature_dim]. audio_input_shape:", "gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1])) expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures]))", "= base_config.update({'number of classes': self.num_classes, 'number of mixtures': self.num_mixtures}) return", "the number of samples of the audio_input_shape and video_input_shape do", "not match. \"\"\" def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures,", "audio_input_shape and video_input_shape do not match. ValueError: If the number", "features. Shape is of the form: [max_frames, video_feature_dim + audio_feature_dim].", "input_shape[-1] if not isinstance(feature_dim, int): feature_dim = feature_dim.value self.fc =", "num_clusters: The number of clusters to use. input_shape: 3D tensor", "self.num_classes = num_classes self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape,", "video_input_shape[1]. Number of samples must equal.\") self.num_frames = video_input_shape[1] self.num_classes", "= ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures,", "feature_dim, self.num_clusters), initializer=tf.keras.initializers.TruncatedNormal( stddev=1.0 / math.sqrt(feature_dim) ), trainable=True, name=\"cluster_centers\" +", "License, Version 2.0 (the \"License\"); you may not use this", "defined. \"\"\" gate_activations = self.gate_fc(input) expert_activations = self.expert_fc(input) #Calculate the", "num_mixtures, fc_units, iterations, **kwargs): super(VideoClassifier, self).__init__(**kwargs) if num_clusters % 2", "features. Shape is of the form [num_new_features] Returns: A tensor", "from the Youtube-8M Challenge. The model can be found here:", "num_samples, video_feature_dim]. audio_input_shape: shape fo the input audio features. Shape", "video features. Shape of [batch_size, num_samples, video_feature_dim]. audio_input_shape: shape fo", "video_input_shape[0] if audio_input_shape[0] != batch_size: raise ValueError(\"audio_input_shape[0] must equal video_input_shape[0].", "#Relu6 is used as it is employed in the paper.", "def get_config(self): base_config = super().get_config() config = base_config.update({'number of classes':", "pass of the model. Args: model_input: input features of shape", "probs = tf.reshape(probs, [-1, self.num_classes]) return probs def compute_output_shape(self, input_shape):", "and limitations under the License. Defines the architecture of the", "the input audio features. Shape of [batch_size, num_samples, audio_feature_dim]. Raises:", ") self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)),", "None: raise ValueError(\"Last dimension must be defined.\") context_gate = self.fc(model_input)", "- cluster_activation vlad_out = tf.nn.l2_normalize(vlad_out, 1) vlad_out = tf.reshape(vlad_out, (-1,", "\"\"\"The Video Classifier model, implemented according to the winning model", "self.num_clusters} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) class ContextGating(tf.keras.layers.Layer):", "audio_input_shape[1] != video_input_shape[1]: raise ValueError(\"audio_input_shape[1] must equal video_input_shape[1]. Number of", "feature_dim)) activation = self.fc(frames) activation = tf.reshape(activation, (-1, max_frames, self.num_clusters))", "def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() return tf.TensorShape([input_shape[0], input_shape[-1] *", "self.fc(model_input) output = tf.math.multiply(context_gate, model_input) return output def compute_output_shape(self, input_shape):", "input_shape=video_input_shape, name=\"video_vlad\") self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name=\"audio_vlad\") #Relu6 is used", "License. You may obtain a copy of the License at", "def __init__(self, num_clusters, input_shape, **kwargs): super().__init__(**kwargs) if num_clusters <= 0:", "= video_input_shape[0] if audio_input_shape[0] != batch_size: raise ValueError(\"audio_input_shape[0] must equal", "= feature_dim self.max_frames = input_shape[-2] def call(self, frames): \"\"\"Apply the", "equal video_input_shape[1]. Number of samples must equal.\") self.num_frames = video_input_shape[1]", "0: raise ValueError(\"`num_clusters` must be greater than 1: %i\" %", "new class specific features. Shape is of the form [num_new_features]", "tf.keras.layers.Input(shape=input_shape, batch_size=batch_size) video_input = model_input[:,:,:self.video_feature_dim] audio_input = model_input[:,:,self.video_feature_dim:] video_vlad_out =", "self.num_classes]) return probs def compute_output_shape(self, input_shape): return (input_shape[0], self.num_classes) def", "feature_dim)`. Output shape: 2D tensor with shape: `(batch_size, feature_dim *", "defined. \"\"\" feature_dim = self.feature_dim max_frames = self.max_frames frames =", "equal video_input_shape[0]. Batch sizes must equal.\") if audio_input_shape[1] != video_input_shape[1]:", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "config = base_config.update({'number of classes': self.num_classes, 'number of mixtures': self.num_mixtures})", "feature_dim]. Returns: A tensor with shape [batch_size, num_classes]. Raises: ValueError:", "pass of the model. Args: input_shape: input shape for video", "output def compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config =", "if not isinstance(feature_dim, int): feature_dim = feature_dim.value self.fc = tf.keras.layers.Dense(", "Output shape: 2D tensor with shape: `(batch_size, num_classes)`. \"\"\" def", "**kwargs): super(MOELogistic, self).__init__(**kwargs) self.num_classes = num_classes self.num_mixtures = num_mixtures self.gate_fc", "frames = tf.reshape(frames, (-1, feature_dim)) activation = self.fc(frames) activation =", "compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config = super().get_config() return", "clusters to be used for NetVLAD. The audio clusters will", "of mixtures': self.num_mixtures}) return config class VideoClassifier: \"\"\"The Video Classifier", "forward pass of the model. Args: model_input: input features of", "= tf.concat([video_vlad_out, audio_vlad_out], axis=1) fc_out = self.fc(vlad_out) cg_out = self.first_cg(fc_out)", "return config class VideoClassifier: \"\"\"The Video Classifier model, implemented according", "audio_input_shape and video_input_shape do not match. \"\"\" def __init__(self, num_clusters,", "kernel_regularizer=tf.keras.regularizers.l2(1e-5), name=\"main_fc\" ) self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name=\"first_cg\") self.fc2 =", "specific features. Shape is of the form [num_new_features] Returns: A", "the given input. Args: input: A tensor with shape [batch_size,", "video_input_shape[1] self.num_classes = num_classes self.video_feature_dim = video_input_shape[2] self.video_vlad = NetVLAD(num_clusters,", "tensorflow as tf class NetVLAD(tf.keras.layers.Layer): \"\"\"Applies NetVLAD to the input.", "call(self, model_input): \"\"\"Apply the ContextGating module to the given input." ]
[ "Ivy syntax and signature. \"\"\" # global import jax as", "= _jnp.cosh atanh = _jnp.arctanh log = _jnp.log exp =", "= _jnp.arccos atan = _jnp.arctan atan2 = _jnp.arctan2 cosh =", "atan = _jnp.arctan atan2 = _jnp.arctan2 cosh = _jnp.cosh atanh", "<reponame>faruq2021/ivy<filename>ivy/functional/backends/jax/old/math.py<gh_stars>0 \"\"\" Collection of Jax math functions, wrapped to fit", "of Jax math functions, wrapped to fit Ivy syntax and", "= _jnp.arctanh log = _jnp.log exp = _jnp.exp erf =", "# global import jax as _jax import jax.numpy as _jnp", "_jnp.arctanh log = _jnp.log exp = _jnp.exp erf = _jax.scipy.special.erf", "_jnp.cosh atanh = _jnp.arctanh log = _jnp.log exp = _jnp.exp", "math functions, wrapped to fit Ivy syntax and signature. \"\"\"", "import jax.numpy as _jnp tan = _jnp.tan acos = _jnp.arccos", "syntax and signature. \"\"\" # global import jax as _jax", "and signature. \"\"\" # global import jax as _jax import", "as _jnp tan = _jnp.tan acos = _jnp.arccos atan =", "\"\"\" # global import jax as _jax import jax.numpy as", "import jax as _jax import jax.numpy as _jnp tan =", "\"\"\" Collection of Jax math functions, wrapped to fit Ivy", "jax as _jax import jax.numpy as _jnp tan = _jnp.tan", "= _jnp.arctan atan2 = _jnp.arctan2 cosh = _jnp.cosh atanh =", "functions, wrapped to fit Ivy syntax and signature. \"\"\" #", "cosh = _jnp.cosh atanh = _jnp.arctanh log = _jnp.log exp", "Jax math functions, wrapped to fit Ivy syntax and signature.", "_jax import jax.numpy as _jnp tan = _jnp.tan acos =", "fit Ivy syntax and signature. \"\"\" # global import jax", "= _jnp.tan acos = _jnp.arccos atan = _jnp.arctan atan2 =", "acos = _jnp.arccos atan = _jnp.arctan atan2 = _jnp.arctan2 cosh", "as _jax import jax.numpy as _jnp tan = _jnp.tan acos", "_jnp.arctan2 cosh = _jnp.cosh atanh = _jnp.arctanh log = _jnp.log", "jax.numpy as _jnp tan = _jnp.tan acos = _jnp.arccos atan", "_jnp tan = _jnp.tan acos = _jnp.arccos atan = _jnp.arctan", "tan = _jnp.tan acos = _jnp.arccos atan = _jnp.arctan atan2", "Collection of Jax math functions, wrapped to fit Ivy syntax", "_jnp.tan acos = _jnp.arccos atan = _jnp.arctan atan2 = _jnp.arctan2", "atanh = _jnp.arctanh log = _jnp.log exp = _jnp.exp erf", "atan2 = _jnp.arctan2 cosh = _jnp.cosh atanh = _jnp.arctanh log", "_jnp.arctan atan2 = _jnp.arctan2 cosh = _jnp.cosh atanh = _jnp.arctanh", "to fit Ivy syntax and signature. \"\"\" # global import", "wrapped to fit Ivy syntax and signature. \"\"\" # global", "signature. \"\"\" # global import jax as _jax import jax.numpy", "global import jax as _jax import jax.numpy as _jnp tan", "= _jnp.arctan2 cosh = _jnp.cosh atanh = _jnp.arctanh log =", "_jnp.arccos atan = _jnp.arctan atan2 = _jnp.arctan2 cosh = _jnp.cosh" ]
[ "'\\b' in q['title']: q['title'] = q['title'].replace('\\n', ' ') q['title'] =", "db = MongoClient().zhihu_network.questions for q in db.find(): if '\\n' in", "len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words = segmentor.segment(q['title'].strip().replace( '\\n',", "< 3: continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def insert_questions_from_followed_question():", "segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for a", "in user['collections'].items(): q_ids += [q['q_id'] for q in c] out_db.insert({'_id':", "= line[1:] if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) out_db.insert(user) def", "for a in user['answers']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) if __name__", "a in u['answers']: if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if", "MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'], out_db.find()))", "in u['answers']: if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id)", "[q['q_id'] for q in c] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def", "delete_noise_question(): db = MongoClient().zhihu_network.questions id_to_delete = [] for q in", "'education', 'motto', 'answer_num', 'collection_num', 'followed_column_num', 'followed_topic_num', 'followee_num', 'follower_num', 'post_num', 'question_num',", "if len(q['title'].split(' ')) < 3: id_to_delete.append(q['_id']) print(len(id_to_delete)) for _id in", "in in_db.find(): if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids =", "a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id) % 1000 ==", "key in user: if key.endswith('_num'): user[key] = int(user[key]) out_db.insert(user) def", "existed_user_id.add(user['_id']) q_ids = [] for _, c in user['collections'].items(): q_ids", "= line[0] user['neibors'] = line[1:] if user['_id'] in existed_user_id: continue", "'neibors': q_ids}) if __name__ == '__main__': # insert_questions_from_answered_question() # insert_questions_from_followed_question()", "insert_user_list() insert_user_follow_user_list() # insert_user_follow_question_list() # insert_user_ask_question_list() # insert_user_collect_question_list() # insert_user_answer_question_list()", "== 0: print(len(existed_question_id)) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', '", "[] for _, c in user['collections'].items(): q_ids += [q['q_id'] for", "if __name__ == '__main__': # insert_questions_from_answered_question() # insert_questions_from_followed_question() # insert_questions_from_asked_question()", "in c] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_answer_question_list(): in_db =", "Segmentor def insert_questions_from_answered_question(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.questions existed_question_id", "len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words = segmentor.segment(a['title'].strip().replace( '\\n',", "user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_ask_question_list(): in_db = MongoClient().zhihu.user_asked_questions", "id_to_delete: db.delete_one({'_id': _id}) def remove_enger_inline(): db = MongoClient().zhihu_network.questions for q", "continue if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id) %", "or '\\b' in q['title']: q['title'] = q['title'].replace('\\n', ' ') q['title']", "q['title']: q['title'] = q['title'].replace('\\n', ' ') q['title'] = q['title'].replace('\\r', '", "== 0: print(len(existed_question_id)) words = segmentor.segment(a['title'].strip().replace( '\\n', ' ').replace('\\r', '", "segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for a in u['answers']: if", "len(words) < 3: continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def", "a['q_id'], 'title': ' '.join(words)}) def delete_noise_question(): db = MongoClient().zhihu_network.questions id_to_delete", "if len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words = segmentor.segment(q['title'].strip().replace(", "= set(map(lambda u: u['_id'], out_db.find())) for line in open('./user_info.data'): line", "q['title'] or '\\b' in q['title']: q['title'] = q['title'].replace('\\n', ' ')", "'neibors': q_ids}) def insert_user_collect_question_list(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list", "if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [a['q_id'] for", "def insert_user_follow_user_list(): out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id = set(map(lambda u: u['_id'],", "existed_question_id: continue existed_question_id.add(q['id']) if len(existed_question_id) % 1000 == 0: print(len(existed_question_id))", "db.update_one({'_id': q['_id']}, {'$set': {'title': q['title']}}, upsert=True) def insert_user_list(): keys =", "' '.join(words)}) def insert_questions_from_followed_question(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.questions", "'title': ' '.join(words)}) def delete_noise_question(): db = MongoClient().zhihu_network.questions id_to_delete =", "in q['title']: q['title'] = q['title'].replace('\\n', ' ') q['title'] = q['title'].replace('\\r',", "continue existed_question_id.add(q['id']) if len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words", "' ') q['title'] = q['title'].replace('\\r', ' ') q['title'] = q['title'].replace('\\b',", "import MongoClient from pyltp import Segmentor def insert_questions_from_answered_question(): in_db =", "in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [a['q_id'] for a in", "for q in u['questions']: if q['id'] in existed_question_id: continue existed_question_id.add(q['id'])", "= MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user", "').replace('\\r', ' ').replace('\\b', ' ')) if len(words) < 3: continue", "[] for q in db.find(): if len(q['title'].split(' ')) < 3:", "= MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'],", "user['_id'], 'neibors': q_ids}) def insert_user_ask_question_list(): in_db = MongoClient().zhihu.user_asked_questions out_db =", "= set(map(lambda u: u['_id'], out_db.find())) for user in in_db.find(): if", "continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_collected_question(): in_db =", "in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id = set(map(lambda u:", "existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [q['id'] for q in user['questions']]", "')) < 3: id_to_delete.append(q['_id']) print(len(id_to_delete)) for _id in id_to_delete: db.delete_one({'_id':", "u['collections'].items(): for a in c_questions: if a['q_id'] == -1: continue", "# insert_questions_from_asked_question() # insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline() # insert_user_list() insert_user_follow_user_list() #", "q_ids = [q['id'] for q in user['questions']] out_db.insert({'_id': user['_id'], 'neibors':", "= [q['id'] for q in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids})", "q in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_collect_question_list(): in_db", "if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) if len(existed_question_id) % 1000", "existed_question_id.add(q['id']) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', '", "in in_db.find(): for a in u['answers']: if a['q_id'] in existed_question_id:", "in db.find(): if '\\n' in q['title'] or '\\r' in q['title']", "for u in in_db.find(): for q in u['questions']: if q['id']", "line in open('./user_followees.data'): line = line.strip().split('\\t') user = dict() user['_id']", "'neibors': q_ids}) def insert_user_ask_question_list(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list", "u in in_db.find(): for a in u['answers']: if a['q_id'] in", "for key in user: if key.endswith('_num'): user[key] = int(user[key]) out_db.insert(user)", "in existed_user_id: continue existed_user_id.add(user['_id']) out_db.insert(user) def insert_user_follow_question_list(): in_db = MongoClient().zhihu.user_followed_questions", "for a in u['answers']: if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id'])", "insert_questions_from_followed_question() # insert_questions_from_asked_question() # insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline() # insert_user_list() insert_user_follow_user_list()", "#delete_noise_question() #remove_enger_inline() # insert_user_list() insert_user_follow_user_list() # insert_user_follow_question_list() # insert_user_ask_question_list() #", "in in_db.find(): for c_name, c_questions in u['collections'].items(): for a in", "for q in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_ask_question_list():", "def remove_enger_inline(): db = MongoClient().zhihu_network.questions for q in db.find(): if", "segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for c_name,", "in u['collections'].items(): for a in c_questions: if a['q_id'] == -1:", "= dict() user['_id'] = line[0] user['neibors'] = line[1:] if user['_id']", "existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [] for _, c in", "q['title']}}, upsert=True) def insert_user_list(): keys = ['_id', 'name', 'is_zero_user', 'gender',", "out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for", "out_db.find())) for line in open('./user_info.data'): line = line.strip().split('\\t') try: assert", "= MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'],", "= Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for a in", "'.join(words)}) def delete_noise_question(): db = MongoClient().zhihu_network.questions id_to_delete = [] for", "c_questions: if a['q_id'] == -1: continue if a['q_id'] in existed_question_id:", "insert_questions_from_followed_question(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda", "in db.find(): if len(q['title'].split(' ')) < 3: id_to_delete.append(q['_id']) print(len(id_to_delete)) for", "') q['title'] = q['title'].replace('\\b', ' ') db.update_one({'_id': q['_id']}, {'$set': {'title':", "len(line)) except: continue user = dict(zip(keys, line)) if user['_id'] in", "'title': ' '.join(words)}) def insert_questions_from_followed_question(): in_db = MongoClient().zhihu.user_followed_questions out_db =", "' ') q['title'] = q['title'].replace('\\b', ' ') db.update_one({'_id': q['_id']}, {'$set':", "continue existed_question_id.add(q['id']) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b',", "a['q_id'] == -1: continue if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id'])", "out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for", "q in u['questions']: if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) if", "== '__main__': # insert_questions_from_answered_question() # insert_questions_from_followed_question() # insert_questions_from_asked_question() # insert_questions_from_collected_question()", "= MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'],", "if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [q['id'] for", "q in u['questions']: if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) words", "= line.strip().split('\\t') user = dict() user['_id'] = line[0] user['neibors'] =", "out_db.find())) segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for", "q['title'] = q['title'].replace('\\b', ' ') db.update_one({'_id': q['_id']}, {'$set': {'title': q['title']}},", "3: id_to_delete.append(q['_id']) print(len(id_to_delete)) for _id in id_to_delete: db.delete_one({'_id': _id}) def", "existed_question_id: continue existed_question_id.add(q['id']) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', '", "existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for line in open('./user_info.data'):", "_, c in user['collections'].items(): q_ids += [q['q_id'] for q in", "keys = ['_id', 'name', 'is_zero_user', 'gender', 'location', 'business', 'education', 'motto',", "q_ids = [a['q_id'] for a in user['answers']] out_db.insert({'_id': user['_id'], 'neibors':", "def insert_user_ask_question_list(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id =", "= segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', ' ')) if", "= [] for _, c in user['collections'].items(): q_ids += [q['q_id']", "out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_collected_question(): in_db = MongoClient().zhihu.user_collected_questions", "in c_questions: if a['q_id'] == -1: continue if a['q_id'] in", "out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'], out_db.find())) segmentor", "'upvote_num', 'photo_url', 'weibo_url'] out_db = MongoClient().zhihu_network.users existed_user_id = set(map(lambda u:", "in_db.find(): if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = []", "q_ids = [] for _, c in user['collections'].items(): q_ids +=", "'followed_topic_num', 'followee_num', 'follower_num', 'post_num', 'question_num', 'thank_num', 'upvote_num', 'photo_url', 'weibo_url'] out_db", "in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_collect_question_list(): in_db =", "'weibo_url'] out_db = MongoClient().zhihu_network.users existed_user_id = set(map(lambda u: u['_id'], out_db.find()))", "a in c_questions: if a['q_id'] == -1: continue if a['q_id']", "assert (len(keys) == len(line)) except: continue user = dict(zip(keys, line))", "in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [] for _, c", "= MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'],", "% 1000 == 0: print(len(existed_question_id)) words = segmentor.segment(a['title'].strip().replace( '\\n', '", "').replace('\\b', ' ')) if len(words) < 3: continue out_db.insert({'_id': a['q_id'],", "remove_enger_inline(): db = MongoClient().zhihu_network.questions for q in db.find(): if '\\n'", "q['title'] = q['title'].replace('\\n', ' ') q['title'] = q['title'].replace('\\r', ' ')", "out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_collect_question_list(): in_db = MongoClient().zhihu.user_collected_questions out_db", "set(map(lambda u: u['_id'], out_db.find())) for user in in_db.find(): if user['_id']", "words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', ' '))", "def delete_noise_question(): db = MongoClient().zhihu_network.questions id_to_delete = [] for q", "')) if len(words) < 3: continue out_db.insert({'_id': a['q_id'], 'title': '", "'followee_num', 'follower_num', 'post_num', 'question_num', 'thank_num', 'upvote_num', 'photo_url', 'weibo_url'] out_db =", "Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for a in u['answers']:", "= [] for q in db.find(): if len(q['title'].split(' ')) <", "'followed_column_num', 'followed_topic_num', 'followee_num', 'follower_num', 'post_num', 'question_num', 'thank_num', 'upvote_num', 'photo_url', 'weibo_url']", "open('./user_info.data'): line = line.strip().split('\\t') try: assert (len(keys) == len(line)) except:", "< 3: continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def delete_noise_question():", "out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for", "q['title'] = q['title'].replace('\\r', ' ') q['title'] = q['title'].replace('\\b', ' ')", "' ')) if len(words) < 3: continue out_db.insert({'_id': a['q_id'], 'title':", "u in in_db.find(): for q in u['questions']: if q['id'] in", "u['answers']: if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id) %", "in existed_user_id: continue existed_user_id.add(user['_id']) for key in user: if key.endswith('_num'):", "c in user['collections'].items(): q_ids += [q['q_id'] for q in c]", "user in in_db.find(): if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids", "insert_user_list(): keys = ['_id', 'name', 'is_zero_user', 'gender', 'location', 'business', 'education',", "db = MongoClient().zhihu_network.questions id_to_delete = [] for q in db.find():", "q['title'] or '\\r' in q['title'] or '\\b' in q['title']: q['title']", "in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [q['id'] for q in", "if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id) % 1000", "' ')) if len(words) < 3: continue out_db.insert({'_id': q['id'], 'title':", "a in user['answers']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) if __name__ ==", "'neibors': q_ids}) def insert_user_answer_question_list(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list", "'.join(words)}) def insert_questions_from_collected_question(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.questions existed_question_id", "user['_id'] = line[0] user['neibors'] = line[1:] if user['_id'] in existed_user_id:", "in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id = set(map(lambda u:", "= q['title'].replace('\\n', ' ') q['title'] = q['title'].replace('\\r', ' ') q['title']", "'name', 'is_zero_user', 'gender', 'location', 'business', 'education', 'motto', 'answer_num', 'collection_num', 'followed_column_num',", "insert_questions_from_answered_question(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda", "line = line.strip().split('\\t') user = dict() user['_id'] = line[0] user['neibors']", "upsert=True) def insert_user_list(): keys = ['_id', 'name', 'is_zero_user', 'gender', 'location',", "# insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline() # insert_user_list() insert_user_follow_user_list() # insert_user_follow_question_list() #", "for user in in_db.find(): if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id'])", "or '\\r' in q['title'] or '\\b' in q['title']: q['title'] =", "for q in c] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_answer_question_list():", "existed_question_id.add(a['q_id']) if len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words =", "segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for q", "for _id in id_to_delete: db.delete_one({'_id': _id}) def remove_enger_inline(): db =", "MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'], out_db.find()))", "_id in id_to_delete: db.delete_one({'_id': _id}) def remove_enger_inline(): db = MongoClient().zhihu_network.questions", "q['id'], 'title': ' '.join(words)}) def insert_questions_from_collected_question(): in_db = MongoClient().zhihu.user_collected_questions out_db", "db.find(): if '\\n' in q['title'] or '\\r' in q['title'] or", "c_name, c_questions in u['collections'].items(): for a in c_questions: if a['q_id']", "= MongoClient().zhihu_network.questions id_to_delete = [] for q in db.find(): if", "['_id', 'name', 'is_zero_user', 'gender', 'location', 'business', 'education', 'motto', 'answer_num', 'collection_num',", "from pymongo import MongoClient from pyltp import Segmentor def insert_questions_from_answered_question():", "continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_asked_question(): in_db =", "pyltp import Segmentor def insert_questions_from_answered_question(): in_db = MongoClient().zhihu.user_answered_questions out_db =", "').replace('\\b', ' ')) if len(words) < 3: continue out_db.insert({'_id': q['id'],", "if len(words) < 3: continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)})", "line[1:] if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) out_db.insert(user) def insert_user_follow_question_list():", "'\\r' in q['title'] or '\\b' in q['title']: q['title'] = q['title'].replace('\\n',", "u['_id'], out_db.find())) for line in open('./user_followees.data'): line = line.strip().split('\\t') user", "out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for", "MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user in", "-1: continue if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id)", "in_db.find(): for q in u['questions']: if q['id'] in existed_question_id: continue", "in in_db.find(): for q in u['questions']: if q['id'] in existed_question_id:", "for c_name, c_questions in u['collections'].items(): for a in c_questions: if", "user = dict() user['_id'] = line[0] user['neibors'] = line[1:] if", "' '.join(words)}) def insert_questions_from_asked_question(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.questions", "= MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for line", "3: continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_collected_question(): in_db", "insert_user_collect_question_list(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id = set(map(lambda", "= int(user[key]) out_db.insert(user) def insert_user_follow_user_list(): out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id =", "pymongo import MongoClient from pyltp import Segmentor def insert_questions_from_answered_question(): in_db", "def insert_questions_from_asked_question(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.questions existed_question_id =", "len(words) < 3: continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def", "= set(map(lambda q: q['_id'], out_db.find())) segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for", "line[0] user['neibors'] = line[1:] if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id'])", "in user['answers']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) if __name__ == '__main__':", "'title': ' '.join(words)}) def insert_questions_from_asked_question(): in_db = MongoClient().zhihu.user_asked_questions out_db =", "Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for c_name, c_questions in", "in id_to_delete: db.delete_one({'_id': _id}) def remove_enger_inline(): db = MongoClient().zhihu_network.questions for", "def insert_user_list(): keys = ['_id', 'name', 'is_zero_user', 'gender', 'location', 'business',", "'thank_num', 'upvote_num', 'photo_url', 'weibo_url'] out_db = MongoClient().zhihu_network.users existed_user_id = set(map(lambda", "= MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'], out_db.find())) segmentor =", "def insert_questions_from_collected_question(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.questions existed_question_id =", "< 3: continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_asked_question():", "= [a['q_id'] for a in user['answers']] out_db.insert({'_id': user['_id'], 'neibors': q_ids})", "q['_id'], out_db.find())) segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find():", "'location', 'business', 'education', 'motto', 'answer_num', 'collection_num', 'followed_column_num', 'followed_topic_num', 'followee_num', 'follower_num',", "def insert_user_collect_question_list(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id =", "in_db.find(): for c_name, c_questions in u['collections'].items(): for a in c_questions:", "__name__ == '__main__': # insert_questions_from_answered_question() # insert_questions_from_followed_question() # insert_questions_from_asked_question() #", "from pyltp import Segmentor def insert_questions_from_answered_question(): in_db = MongoClient().zhihu.user_answered_questions out_db", "3: continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def insert_questions_from_followed_question(): in_db", "out_db.insert({'_id': user['_id'], 'neibors': q_ids}) if __name__ == '__main__': # insert_questions_from_answered_question()", "q['id'] in existed_question_id: continue existed_question_id.add(q['id']) words = segmentor.segment(q['title'].strip().replace( '\\n', '", "'.join(words)}) def insert_questions_from_asked_question(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.questions existed_question_id", "segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for c_name, c_questions in u['collections'].items():", "'is_zero_user', 'gender', 'location', 'business', 'education', 'motto', 'answer_num', 'collection_num', 'followed_column_num', 'followed_topic_num',", "MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'], out_db.find())) segmentor = Segmentor()", "for q in db.find(): if '\\n' in q['title'] or '\\r'", "u['questions']: if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) words = segmentor.segment(q['title'].strip().replace(", "3: continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def delete_noise_question(): db", "set(map(lambda q: q['_id'], out_db.find())) segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u", "if len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words = segmentor.segment(a['title'].strip().replace(", "' ') db.update_one({'_id': q['_id']}, {'$set': {'title': q['title']}}, upsert=True) def insert_user_list():", "q['id'] in existed_question_id: continue existed_question_id.add(q['id']) if len(existed_question_id) % 1000 ==", "' ').replace('\\b', ' ')) if len(words) < 3: continue out_db.insert({'_id':", "q['title'].replace('\\n', ' ') q['title'] = q['title'].replace('\\r', ' ') q['title'] =", "= set(map(lambda u: u['_id'], out_db.find())) for line in open('./user_followees.data'): line", "MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user in", "') db.update_one({'_id': q['_id']}, {'$set': {'title': q['title']}}, upsert=True) def insert_user_list(): keys", "u: u['_id'], out_db.find())) for user in in_db.find(): if user['_id'] in", "= MongoClient().zhihu_network.users existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for line", "in q['title'] or '\\b' in q['title']: q['title'] = q['title'].replace('\\n', '", "in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id = set(map(lambda u:", "len(q['title'].split(' ')) < 3: id_to_delete.append(q['_id']) print(len(id_to_delete)) for _id in id_to_delete:", "if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) words = segmentor.segment(q['title'].strip().replace( '\\n',", "user['_id'], 'neibors': q_ids}) def insert_user_answer_question_list(): in_db = MongoClient().zhihu.user_answered_questions out_db =", "<filename>neaten_db.py<gh_stars>0 from pymongo import MongoClient from pyltp import Segmentor def", "line.strip().split('\\t') user = dict() user['_id'] = line[0] user['neibors'] = line[1:]", "continue existed_user_id.add(user['_id']) out_db.insert(user) def insert_user_follow_question_list(): in_db = MongoClient().zhihu.user_followed_questions out_db =", "= MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'],", "q_ids}) if __name__ == '__main__': # insert_questions_from_answered_question() # insert_questions_from_followed_question() #", "user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [q['id'] for q", "')) if len(words) < 3: continue out_db.insert({'_id': q['id'], 'title': '", "Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for q in u['questions']:", "# insert_user_list() insert_user_follow_user_list() # insert_user_follow_question_list() # insert_user_ask_question_list() # insert_user_collect_question_list() #", "print(len(existed_question_id)) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', '", "q['_id']}, {'$set': {'title': q['title']}}, upsert=True) def insert_user_list(): keys = ['_id',", "out_db.insert(user) def insert_user_follow_question_list(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id", "existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user in in_db.find():", "continue existed_user_id.add(user['_id']) for key in user: if key.endswith('_num'): user[key] =", "3: continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_asked_question(): in_db", "MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'], out_db.find()))", "insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline() # insert_user_list() insert_user_follow_user_list() # insert_user_follow_question_list() # insert_user_ask_question_list()", "= ['_id', 'name', 'is_zero_user', 'gender', 'location', 'business', 'education', 'motto', 'answer_num',", "= Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for q in", "user['collections'].items(): q_ids += [q['q_id'] for q in c] out_db.insert({'_id': user['_id'],", "q_ids += [q['q_id'] for q in c] out_db.insert({'_id': user['_id'], 'neibors':", "= MongoClient().zhihu_network.questions for q in db.find(): if '\\n' in q['title']", "q in db.find(): if '\\n' in q['title'] or '\\r' in", "in open('./user_followees.data'): line = line.strip().split('\\t') user = dict() user['_id'] =", "MongoClient().zhihu_network.users existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for line in", "u: u['_id'], out_db.find())) for line in open('./user_info.data'): line = line.strip().split('\\t')", "< 3: id_to_delete.append(q['_id']) print(len(id_to_delete)) for _id in id_to_delete: db.delete_one({'_id': _id})", "c_questions in u['collections'].items(): for a in c_questions: if a['q_id'] ==", "in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q:", "q in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_ask_question_list(): in_db", "line)) if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) for key in", "set(map(lambda u: u['_id'], out_db.find())) for line in open('./user_followees.data'): line =", "q in db.find(): if len(q['title'].split(' ')) < 3: id_to_delete.append(q['_id']) print(len(id_to_delete))", "u: u['_id'], out_db.find())) for line in open('./user_followees.data'): line = line.strip().split('\\t')", "MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find()))", "except: continue user = dict(zip(keys, line)) if user['_id'] in existed_user_id:", "'motto', 'answer_num', 'collection_num', 'followed_column_num', 'followed_topic_num', 'followee_num', 'follower_num', 'post_num', 'question_num', 'thank_num',", "out_db.insert(user) def insert_user_follow_user_list(): out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id = set(map(lambda u:", "segmentor.segment(a['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', ' ')) if len(words)", "if key.endswith('_num'): user[key] = int(user[key]) out_db.insert(user) def insert_user_follow_user_list(): out_db =", "in_db.find(): for a in u['answers']: if a['q_id'] in existed_question_id: continue", "= line.strip().split('\\t') try: assert (len(keys) == len(line)) except: continue user", "out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def delete_noise_question(): db = MongoClient().zhihu_network.questions", "try: assert (len(keys) == len(line)) except: continue user = dict(zip(keys,", "print(len(existed_question_id)) words = segmentor.segment(a['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', '", "for q in db.find(): if len(q['title'].split(' ')) < 3: id_to_delete.append(q['_id'])", "existed_question_id = set(map(lambda q: q['_id'], out_db.find())) segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\")", "1000 == 0: print(len(existed_question_id)) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r',", "for line in open('./user_followees.data'): line = line.strip().split('\\t') user = dict()", "in u['questions']: if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) if len(existed_question_id)", "existed_user_id: continue existed_user_id.add(user['_id']) for key in user: if key.endswith('_num'): user[key]", "MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find()))", "if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) out_db.insert(user) def insert_user_follow_question_list(): in_db", "= MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user", "existed_user_id.add(user['_id']) for key in user: if key.endswith('_num'): user[key] = int(user[key])", "db.delete_one({'_id': _id}) def remove_enger_inline(): db = MongoClient().zhihu_network.questions for q in", "insert_questions_from_collected_question(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda", "+= [q['q_id'] for q in c] out_db.insert({'_id': user['_id'], 'neibors': q_ids})", "existed_user_id.add(user['_id']) out_db.insert(user) def insert_user_follow_question_list(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list", "dict(zip(keys, line)) if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) for key", "existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id) % 1000 == 0: print(len(existed_question_id))", "user['_id'], 'neibors': q_ids}) if __name__ == '__main__': # insert_questions_from_answered_question() #", "existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for line in open('./user_followees.data'):", "int(user[key]) out_db.insert(user) def insert_user_follow_user_list(): out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id = set(map(lambda", "q['id'], 'title': ' '.join(words)}) def insert_questions_from_asked_question(): in_db = MongoClient().zhihu.user_asked_questions out_db", "MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'], out_db.find()))", "'follower_num', 'post_num', 'question_num', 'thank_num', 'upvote_num', 'photo_url', 'weibo_url'] out_db = MongoClient().zhihu_network.users", "for _, c in user['collections'].items(): q_ids += [q['q_id'] for q", "user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) out_db.insert(user) def insert_user_follow_question_list(): in_db =", "if a['q_id'] == -1: continue if a['q_id'] in existed_question_id: continue", "print(len(id_to_delete)) for _id in id_to_delete: db.delete_one({'_id': _id}) def remove_enger_inline(): db", "def insert_user_follow_question_list(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id =", "continue existed_question_id.add(a['q_id']) if len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words", "insert_user_answer_question_list(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id = set(map(lambda", "' '.join(words)}) def insert_questions_from_collected_question(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.questions", "if len(words) < 3: continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)})", "in existed_question_id: continue existed_question_id.add(q['id']) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r',", "for u in in_db.find(): for a in u['answers']: if a['q_id']", "in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_ask_question_list(): in_db =", "'post_num', 'question_num', 'thank_num', 'upvote_num', 'photo_url', 'weibo_url'] out_db = MongoClient().zhihu_network.users existed_user_id", "q: q['_id'], out_db.find())) segmentor = Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in", "== len(line)) except: continue user = dict(zip(keys, line)) if user['_id']", "user['_id'], 'neibors': q_ids}) def insert_user_collect_question_list(): in_db = MongoClient().zhihu.user_collected_questions out_db =", "words = segmentor.segment(a['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', ' '))", "for q in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_collect_question_list():", "in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q:", "'business', 'education', 'motto', 'answer_num', 'collection_num', 'followed_column_num', 'followed_topic_num', 'followee_num', 'follower_num', 'post_num',", "insert_user_follow_user_list(): out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find()))", "for a in c_questions: if a['q_id'] == -1: continue if", "[q['id'] for q in user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def", "user = dict(zip(keys, line)) if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id'])", "= MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'],", "_id}) def remove_enger_inline(): db = MongoClient().zhihu_network.questions for q in db.find():", "' '.join(words)}) def delete_noise_question(): db = MongoClient().zhihu_network.questions id_to_delete = []", "continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def insert_questions_from_followed_question(): in_db =", "u['_id'], out_db.find())) for line in open('./user_info.data'): line = line.strip().split('\\t') try:", "for line in open('./user_info.data'): line = line.strip().split('\\t') try: assert (len(keys)", "user['questions']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_collect_question_list(): in_db = MongoClient().zhihu.user_collected_questions", "#remove_enger_inline() # insert_user_list() insert_user_follow_user_list() # insert_user_follow_question_list() # insert_user_ask_question_list() # insert_user_collect_question_list()", "= MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user", "user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [] for _,", "existed_question_id.add(q['id']) if len(existed_question_id) % 1000 == 0: print(len(existed_question_id)) words =", "[a['q_id'] for a in user['answers']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) if", "= segmentor.segment(a['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', ' ')) if", "insert_questions_from_asked_question() # insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline() # insert_user_list() insert_user_follow_user_list() # insert_user_follow_question_list()", "def insert_questions_from_answered_question(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.questions existed_question_id =", "= MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user", "MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user in", "in open('./user_info.data'): line = line.strip().split('\\t') try: assert (len(keys) == len(line))", "for u in in_db.find(): for c_name, c_questions in u['collections'].items(): for", "in existed_question_id: continue existed_question_id.add(q['id']) if len(existed_question_id) % 1000 == 0:", "MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for user in", "insert_questions_from_answered_question() # insert_questions_from_followed_question() # insert_questions_from_asked_question() # insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline() #", "id_to_delete.append(q['_id']) print(len(id_to_delete)) for _id in id_to_delete: db.delete_one({'_id': _id}) def remove_enger_inline():", "in q['title'] or '\\r' in q['title'] or '\\b' in q['title']:", "'photo_url', 'weibo_url'] out_db = MongoClient().zhihu_network.users existed_user_id = set(map(lambda u: u['_id'],", "line.strip().split('\\t') try: assert (len(keys) == len(line)) except: continue user =", "c] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_answer_question_list(): in_db = MongoClient().zhihu.user_answered_questions", "q_ids}) def insert_user_answer_question_list(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id", "line = line.strip().split('\\t') try: assert (len(keys) == len(line)) except: continue", "continue user = dict(zip(keys, line)) if user['_id'] in existed_user_id: continue", "== -1: continue if a['q_id'] in existed_question_id: continue existed_question_id.add(a['q_id']) if", "'answer_num', 'collection_num', 'followed_column_num', 'followed_topic_num', 'followee_num', 'follower_num', 'post_num', 'question_num', 'thank_num', 'upvote_num',", "import Segmentor def insert_questions_from_answered_question(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.questions", "existed_user_id: continue existed_user_id.add(user['_id']) out_db.insert(user) def insert_user_follow_question_list(): in_db = MongoClient().zhihu.user_followed_questions out_db", "continue existed_user_id.add(user['_id']) q_ids = [a['q_id'] for a in user['answers']] out_db.insert({'_id':", "# insert_questions_from_answered_question() # insert_questions_from_followed_question() # insert_questions_from_asked_question() # insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline()", "u['questions']: if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) if len(existed_question_id) %", "segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for q in u['questions']: if", "= dict(zip(keys, line)) if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) for", "def insert_questions_from_followed_question(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.questions existed_question_id =", "existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [a['q_id'] for a in user['answers']]", "{'$set': {'title': q['title']}}, upsert=True) def insert_user_list(): keys = ['_id', 'name',", "'.join(words)}) def insert_questions_from_followed_question(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.questions existed_question_id", "MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for line in", "segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b', ' ')) if len(words)", "in u['questions']: if q['id'] in existed_question_id: continue existed_question_id.add(q['id']) words =", "out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_asked_question(): in_db = MongoClient().zhihu.user_asked_questions", "'\\n' in q['title'] or '\\r' in q['title'] or '\\b' in", "q in c] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_answer_question_list(): in_db", "out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for", "out_db.find())) for user in in_db.find(): if user['_id'] in existed_user_id: continue", "out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def insert_questions_from_followed_question(): in_db = MongoClient().zhihu.user_followed_questions", "= MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'],", "MongoClient().zhihu_network.questions for q in db.find(): if '\\n' in q['title'] or", "a['q_id'], 'title': ' '.join(words)}) def insert_questions_from_followed_question(): in_db = MongoClient().zhihu.user_followed_questions out_db", "= MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q: q['_id'],", "user: if key.endswith('_num'): user[key] = int(user[key]) out_db.insert(user) def insert_user_follow_user_list(): out_db", "user['neibors'] = line[1:] if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) out_db.insert(user)", "in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q:", "= q['title'].replace('\\b', ' ') db.update_one({'_id': q['_id']}, {'$set': {'title': q['title']}}, upsert=True)", "open('./user_followees.data'): line = line.strip().split('\\t') user = dict() user['_id'] = line[0]", "') q['title'] = q['title'].replace('\\r', ' ') q['title'] = q['title'].replace('\\b', '", "'__main__': # insert_questions_from_answered_question() # insert_questions_from_followed_question() # insert_questions_from_asked_question() # insert_questions_from_collected_question() #delete_noise_question()", "out_db.find())) for line in open('./user_followees.data'): line = line.strip().split('\\t') user =", "'\\n', ' ').replace('\\r', ' ').replace('\\b', ' ')) if len(words) <", "continue existed_user_id.add(user['_id']) q_ids = [q['id'] for q in user['questions']] out_db.insert({'_id':", "# insert_questions_from_followed_question() # insert_questions_from_asked_question() # insert_questions_from_collected_question() #delete_noise_question() #remove_enger_inline() # insert_user_list()", "q['title'].replace('\\r', ' ') q['title'] = q['title'].replace('\\b', ' ') db.update_one({'_id': q['_id']},", "(len(keys) == len(line)) except: continue user = dict(zip(keys, line)) if", "MongoClient().zhihu_network.questions id_to_delete = [] for q in db.find(): if len(q['title'].split('", "if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [] for", "user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) for key in user: if", "def insert_user_answer_question_list(): in_db = MongoClient().zhihu.user_answered_questions out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list existed_user_id =", "if '\\n' in q['title'] or '\\r' in q['title'] or '\\b'", "in_db.find(): if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [a['q_id']", "0: print(len(existed_question_id)) words = segmentor.segment(a['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b',", "existed_user_id.add(user['_id']) q_ids = [a['q_id'] for a in user['answers']] out_db.insert({'_id': user['_id'],", "< 3: continue out_db.insert({'_id': q['id'], 'title': ' '.join(words)}) def insert_questions_from_collected_question():", "u['_id'], out_db.find())) for user in in_db.find(): if user['_id'] in existed_user_id:", "in user: if key.endswith('_num'): user[key] = int(user[key]) out_db.insert(user) def insert_user_follow_user_list():", "q_ids}) def insert_user_collect_question_list(): in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list existed_user_id", "user[key] = int(user[key]) out_db.insert(user) def insert_user_follow_user_list(): out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list existed_user_id", "out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_answer_question_list(): in_db = MongoClient().zhihu.user_answered_questions out_db", "in existed_question_id: continue existed_question_id.add(a['q_id']) if len(existed_question_id) % 1000 == 0:", "MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find()))", "line in open('./user_info.data'): line = line.strip().split('\\t') try: assert (len(keys) ==", "insert_user_follow_question_list(): in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id = set(map(lambda", "continue out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)}) def delete_noise_question(): db =", "1000 == 0: print(len(existed_question_id)) words = segmentor.segment(a['title'].strip().replace( '\\n', ' ').replace('\\r',", "user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [a['q_id'] for a", "if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) for key in user:", "in_db.find(): if user['_id'] in existed_user_id: continue existed_user_id.add(user['_id']) q_ids = [q['id']", "MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id = set(map(lambda u: u['_id'], out_db.find()))", "'question_num', 'thank_num', 'upvote_num', 'photo_url', 'weibo_url'] out_db = MongoClient().zhihu_network.users existed_user_id =", "% 1000 == 0: print(len(existed_question_id)) words = segmentor.segment(q['title'].strip().replace( '\\n', '", "id_to_delete = [] for q in db.find(): if len(q['title'].split(' '))", "= q['title'].replace('\\r', ' ') q['title'] = q['title'].replace('\\b', ' ') db.update_one({'_id':", "key.endswith('_num'): user[key] = int(user[key]) out_db.insert(user) def insert_user_follow_user_list(): out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list", "db.find(): if len(q['title'].split(' ')) < 3: id_to_delete.append(q['_id']) print(len(id_to_delete)) for _id", "'collection_num', 'followed_column_num', 'followed_topic_num', 'followee_num', 'follower_num', 'post_num', 'question_num', 'thank_num', 'upvote_num', 'photo_url',", "q_ids}) def insert_user_ask_question_list(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id", "{'title': q['title']}}, upsert=True) def insert_user_list(): keys = ['_id', 'name', 'is_zero_user',", "dict() user['_id'] = line[0] user['neibors'] = line[1:] if user['_id'] in", "'gender', 'location', 'business', 'education', 'motto', 'answer_num', 'collection_num', 'followed_column_num', 'followed_topic_num', 'followee_num',", "0: print(len(existed_question_id)) words = segmentor.segment(q['title'].strip().replace( '\\n', ' ').replace('\\r', ' ').replace('\\b',", "insert_questions_from_asked_question(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda", "out_db.insert({'_id': user['_id'], 'neibors': q_ids}) def insert_user_ask_question_list(): in_db = MongoClient().zhihu.user_asked_questions out_db", "insert_user_ask_question_list(): in_db = MongoClient().zhihu.user_asked_questions out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list existed_user_id = set(map(lambda", "' ').replace('\\r', ' ').replace('\\b', ' ')) if len(words) < 3:", "existed_user_id.add(user['_id']) q_ids = [q['id'] for q in user['questions']] out_db.insert({'_id': user['_id'],", "= Segmentor() segmentor.load(\"/Users/sunxiaofei/workspace/ltp_data/cws.model\") for u in in_db.find(): for c_name, c_questions", "set(map(lambda u: u['_id'], out_db.find())) for line in open('./user_info.data'): line =", "u in in_db.find(): for c_name, c_questions in u['collections'].items(): for a", "MongoClient from pyltp import Segmentor def insert_questions_from_answered_question(): in_db = MongoClient().zhihu.user_answered_questions", "in_db = MongoClient().zhihu.user_followed_questions out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list existed_user_id = set(map(lambda u:", "continue existed_user_id.add(user['_id']) q_ids = [] for _, c in user['collections'].items():", "'title': ' '.join(words)}) def insert_questions_from_collected_question(): in_db = MongoClient().zhihu.user_collected_questions out_db =", "out_db = MongoClient().zhihu_network.users existed_user_id = set(map(lambda u: u['_id'], out_db.find())) for", "q['title'].replace('\\b', ' ') db.update_one({'_id': q['_id']}, {'$set': {'title': q['title']}}, upsert=True) def", "in_db = MongoClient().zhihu.user_collected_questions out_db = MongoClient().zhihu_network.questions existed_question_id = set(map(lambda q:", "user['answers']] out_db.insert({'_id': user['_id'], 'neibors': q_ids}) if __name__ == '__main__': #" ]
[ "'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:] data2s = vs[:] for i", "False self.file = FILE_NAME f = Dataset(self.file, 'w') vlen_type =", "+ 1 data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i) for i", "VAR1_NAME = 'ragged' VAR2_NAME = 'strings' VAR3_NAME = 'strings_alt' VAR4_NAME", "the temporary files os.remove(self.file) def runTest(self): \"\"\"testing packing float vlens", "f = Dataset(FILE_NAME) assert f.variables['teststring'][:] == stringout f.close() os.remove(FILE_NAME) class", "f.createDimension('x', 1) # using assertRaisesRegext as a context manager #", "dtype=np.float64) v[0] # sometimes crashes v[0].tolist() # sometimes crashes v[0].size", "data if n==99: self.data = data n += 1 nc.close()", "= Dataset(self.file, 'r') vs_alt = f.variables[VAR3_NAME] unicode_strings = vs_alt[:] fancy_indexed", "def setUp(self): self.file = FILE_NAME nc = Dataset(self.file, 'w') vlen_type", "self.skip: return f = Dataset(self.file, 'a') w = f.variables[\"vl2\"] v", "datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:] = datas.astype(str) f.close() def tearDown(self): #", "f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME] assert list(f.vltypes.keys()) == [VL_NAME] assert f.vltypes[VL_NAME].dtype", "= f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME] assert list(f.vltypes.keys()) == [VL_NAME] assert", "= Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] == 'asdf' assert f.variables['tenstrings'][6.0] == 'asdf'", "'asdf' strtest.close() f = Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] == 'asdf' assert", "vlen_type = f.createVLType(np.float64, 'vltest') f.createDimension('x', None) v = f.createVariable('vl', vlen_type,", "= Dataset(FILE_NAME, 'w', format='NETCDF4') teststring = f.createVariable('teststring', str) stringout =", "= 1./254. v.missing_value=np.array(255,np.uint8) # random lengths between 1 and 1000", "= 5; nlats = 5 VAR1_NAME = 'ragged' VAR2_NAME =", "'asdf' f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file = FILE_NAME", "try: self.skipTest(\"This test requires NetCDF 4.4.1 or later.\") except AttributeError:", "self.data) assert(err.max() < nc['vl'].scale_factor) # turn off auto-scaling nc.set_auto_maskandscale(False) data", "None) v = nc.createVariable('vl', vlen_type, 'x') v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8)", "fancy_indexed[2] == 'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase): def setUp(self): import netCDF4", "VAR2_NAME = 'strings' VAR3_NAME = 'strings_alt' VAR4_NAME = 'string_scalar' VAR5_NAME", "== 'abcd' assert fancy_indexed[2] == 'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase): def", "return f = Dataset(self.file, 'a') w = f.variables[\"vl2\"] v =", "== data2s[j,i] assert_array_equal(datas, vs_alt[:]) f.close() class TestInvalidDataType(unittest.TestCase): def runTest(self): f", "python >= 2.7 (issue #497) #with self.assertRaisesRegexp(ValueError, 'strings are only", "w = f.variables[\"vl2\"] v = f.variables[\"vl\"] w[0:3] = np.arange(3, dtype=np.float64)", "Dataset(self.file, 'w') vlen_type = f.createVLType(np.float64, 'vltest') f.createDimension('x', None) v =", "2.6 if self.skip: return f = Dataset(self.file, 'a') w =", "nc['vl'][-1] assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor)) nc.close() if __name__ == '__main__': unittest.main()", "Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME,", "= 'strings_alt' VAR4_NAME = 'string_scalar' VAR5_NAME = 'vlen_scalar' data =", "Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file = FILE_NAME nc = Dataset(self.file, 'w')", "vlen_type, 'x') v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8) # random lengths between", "files os.remove(self.file) def runTest(self): \"\"\"testing vlen variables\"\"\" f = Dataset(self.file,", "sometimes crashes v[0].tolist() # sometimes crashes v[0].size # BOOM! f.close()", "unicode_strings = vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]] assert fancy_indexed[0] == 'abc'", "= f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME))", "'r') v = f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME]", "str, ('x',)) try: f.createVariable('foo', str, ('x',)) except ValueError: pass f.close()", "range(nlats*nlons): nn = nn + 1 data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n]", "np.abs(data - self.data) assert(err.max() < nc['vl'].scale_factor) # turn off auto-scaling", "10) strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0] = 'asdf'", "works with python >= 2.7 (issue #497) #with self.assertRaisesRegexp(ValueError, 'strings", "= datas.astype(str) string_scalar[...] = 'foo' #issue458 vlen_scalar[...] = np.array([1,2,3],np.int16) f.close()", "1 and 1000 ilen = np.random.randint(1,1000,size=100) n = 0 for", "= np.array([1,2,3],np.int16) f.close() def tearDown(self): # Remove the temporary files", "vlen_scalar[...] = np.array([1,2,3],np.int16) f.close() def tearDown(self): # Remove the temporary", "strings = f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME,", "strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0] = 'asdf' strtest.close() f = Dataset(FILE_NAME)", "nc = Dataset(self.file) data = nc['vl'][-1] # check max error", "runTest(self): \"\"\"testing appending to vlen variables (issue #527).\"\"\" # workaround", "'x') w = f.createVariable('vl2', np.float64, 'x') f.close() def tearDown(self): #", "0 for nlen in ilen: data = np.random.uniform(low=0.0, high=1.0, size=nlen)", "nc['vl'].scale_factor) # turn off auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1] assert(data[-1]", "'asdf' strtest['tenstrings'][6.0] = 'asdf' strtest.close() f = Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)]", "VL_NAME = 'vlen_type' VL_BASETYPE = np.int16 DIM1_NAME = 'lon' DIM2_NAME", "f.close() class TestInvalidDataType(unittest.TestCase): def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')", "nc['vl'][-1] # check max error of compression err = np.abs(data", "error of compression err = np.abs(data - self.data) assert(err.max() <", "err = np.abs(data - self.data) assert(err.max() < nc['vl'].scale_factor) # turn", "fancy_indexed = unicode_strings[0][[1,2,4]] assert fancy_indexed[0] == 'abc' assert fancy_indexed[1] ==", "Dataset(self.file, 'r') vs_alt = f.variables[VAR3_NAME] unicode_strings = vs_alt[:] fancy_indexed =", "= Dataset(self.file, 'a') w = f.variables[\"vl2\"] v = f.variables[\"vl\"] w[0:3]", "nc.createDimension('x', None) v = nc.createVariable('vl', vlen_type, 'x') v.scale_factor = 1./254.", "= data ragged[-1,-1] = data[-1,-1] strings[:] = datas strings[-2,-2] =", "= f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar =", "strings[-2,-2] = datas[-2,-2] strings_alt[:] = datas.astype(str) string_scalar[...] = 'foo' #issue458", "runTest(self): \"\"\"testing vlen variables\"\"\" f = Dataset(self.file, 'r') v =", "as scaled integers (issue #1003).\"\"\" nc = Dataset(self.file) data =", "f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:] data2s = vs[:]", "f.variables[VAR3_NAME] assert list(f.vltypes.keys()) == [VL_NAME] assert f.vltypes[VL_NAME].dtype == VL_BASETYPE assert", "ilen: data = np.random.uniform(low=0.0, high=1.0, size=nlen) v[n] = data if", "f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt =", "data2s[j,i] assert_array_equal(datas, vs_alt[:]) f.close() class TestInvalidDataType(unittest.TestCase): def runTest(self): f =", "strtest = Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str, ['tenstrings'])", "fancy_indexed[1] == 'abcd' assert fancy_indexed[2] == 'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase):", "with python >= 2.7 (issue #497) #with self.assertRaisesRegexp(ValueError, 'strings are", "compression err = np.abs(data - self.data) assert(err.max() < nc['vl'].scale_factor) #", "off auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1] assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor)) nc.close()", "temporary files os.remove(self.file) def runTest(self): \"\"\"testing packing float vlens as", "np.float64, 'x') f.close() def tearDown(self): # Remove the temporary files", "f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME, str,", "f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data ragged[-1,-1] = data[-1,-1]", "datas[j,i] == data2s[j,i] assert_array_equal(datas, vs_alt[:]) f.close() class TestInvalidDataType(unittest.TestCase): def runTest(self):", "numpy as np from numpy.testing import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc',", "files os.remove(self.file) def runTest(self): \"\"\"testing appending to vlen variables (issue", "for n in range(nlats*nlons): nn = nn + 1 data[n]", "os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): # issue 526 def runTest(self): strtest =", "data[j,i]) assert datas[j,i] == data2s[j,i] assert_array_equal(datas, vs_alt[:]) f.close() class TestInvalidDataType(unittest.TestCase):", "'x') v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8) # random lengths between 1", "= nc['vl'][-1] # check max error of compression err =", "n += 1 nc.close() def tearDown(self): # Remove the temporary", "# Remove the temporary files os.remove(self.file) def runTest(self): \"\"\"testing vlen", "numpy.testing import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME = 'vlen_type'", "as np from numpy.testing import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name", "= 5 VAR1_NAME = 'ragged' VAR2_NAME = 'strings' VAR3_NAME =", "data[-1,-1] strings[:] = datas strings[-2,-2] = datas[-2,-2] strings_alt[:] = datas.astype(str)", "vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data ragged[-1,-1] = data[-1,-1] strings[:]", "+= 1 nc.close() def tearDown(self): # Remove the temporary files", "== stringout f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): # issue 526 def", "VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w') vlen_t", "= np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f", "high=1.0, size=nlen) v[n] = data if n==99: self.data = data", "datas[-2,-2] strings_alt[:] = datas.astype(str) string_scalar[...] = 'foo' #issue458 vlen_scalar[...] =", "v = nc.createVariable('vl', vlen_type, 'x') v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8) #", "the temporary files os.remove(self.file) def runTest(self): \"\"\"testing vlen variables\"\"\" f", "vlen variables\"\"\" f = Dataset(self.file, 'r') vs_alt = f.variables[VAR3_NAME] unicode_strings", "data n += 1 nc.close() def tearDown(self): # Remove the", "stringout f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): # issue 526 def runTest(self):", "for j in range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i] == data2s[j,i]", "= np.random.randint(1,1000,size=100) n = 0 for nlen in ilen: data", "def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF4') teststring = f.createVariable('teststring',", "(DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,", "assert fancy_indexed[0] == 'abc' assert fancy_indexed[1] == 'abcd' assert fancy_indexed[2]", "size=nlen) v[n] = data if n==99: self.data = data n", "def runTest(self): strtest = Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings',", "- self.data) assert(err.max() < nc['vl'].scale_factor) # turn off auto-scaling nc.set_auto_maskandscale(False)", "vlen variables\"\"\" f = Dataset(self.file, 'r') v = f.variables[VAR1_NAME] vs", "None) v = f.createVariable('vl', vlen_type, 'x') w = f.createVariable('vl2', np.float64,", "1 nc.close() def tearDown(self): # Remove the temporary files os.remove(self.file)", "= True try: self.skipTest(\"This test requires NetCDF 4.4.1 or later.\")", "= nc.createVariable('vl', vlen_type, 'x') v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8) # random", "= Dataset(self.file, 'w') vlen_type = nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None) v", "# workaround for Python 2.6 if self.skip: return f =", "ilen = np.random.randint(1,1000,size=100) n = 0 for nlen in ilen:", "float vlens as scaled integers (issue #1003).\"\"\" nc = Dataset(self.file)", "assert f.vltypes[VL_NAME].dtype == VL_BASETYPE assert f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2", "NetCDF 4.4.1 or later.\") except AttributeError: # workaround for Python", "v = f.variables[\"vl\"] w[0:3] = np.arange(3, dtype=np.float64) v[0] # sometimes", "'strings are only supported'): # f.createVariable('foo', str, ('x',)) try: f.createVariable('foo',", "'lon' DIM2_NAME = 'lat' nlons = 5; nlats = 5", "netCDF4.__netcdf4libversion__ < \"4.4.1\": self.skip = True try: self.skipTest(\"This test requires", "packing float vlens as scaled integers (issue #1003).\"\"\" nc =", "5 VAR1_NAME = 'ragged' VAR2_NAME = 'strings' VAR3_NAME = 'strings_alt'", "FILE_NAME nc = Dataset(self.file, 'w') vlen_type = nc.createVLType(np.uint8, 'vltest') nc.createDimension('x',", "Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME,", "import Dataset import numpy as np from numpy.testing import assert_array_equal", "f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): # issue 333 def runTest(self): f", "== 'asdf' f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file =", "runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF4') teststring = f.createVariable('teststring', str)", "f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME))", "nlats = 5 VAR1_NAME = 'ragged' VAR2_NAME = 'strings' VAR3_NAME", "#1003).\"\"\" nc = Dataset(self.file) data = nc['vl'][-1] # check max", "= ''.join([chr(i) for i in range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons)) datas", "'asdf' assert f.variables['tenstrings'][6.0] == 'asdf' f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def", "vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME, vlen_t,\\", "< \"4.4.1\": self.skip = True try: self.skipTest(\"This test requires NetCDF", "4.4.1 or later.\") except AttributeError: # workaround for Python 2.6", "vlens as scaled integers (issue #1003).\"\"\" nc = Dataset(self.file) data", "assert f.variables['teststring'][:] == stringout f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): # issue", "[VL_NAME] assert f.vltypes[VL_NAME].dtype == VL_BASETYPE assert f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))", "vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]] assert fancy_indexed[0] == 'abc' assert fancy_indexed[1]", "(DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,())", "f.variables[VAR3_NAME] unicode_strings = vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]] assert fancy_indexed[0] ==", "n = 0 for nlen in ilen: data = np.random.uniform(low=0.0,", "= tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME = 'vlen_type' VL_BASETYPE = np.int16 DIM1_NAME", "2.7) pass else: self.skip = False self.file = FILE_NAME f", "v[0] # sometimes crashes v[0].tolist() # sometimes crashes v[0].size #", "f.createVariable('vl', vlen_type, 'x') w = f.createVariable('vl2', np.float64, 'x') f.close() def", "if n==99: self.data = data n += 1 nc.close() def", "= Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt =", "def setUp(self): import netCDF4 if netCDF4.__netcdf4libversion__ < \"4.4.1\": self.skip =", "vlen variables (issue #527).\"\"\" # workaround for Python 2.6 if", "== 'abc' assert fancy_indexed[1] == 'abcd' assert fancy_indexed[2] == 'abcdef'", "Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] == 'asdf' assert f.variables['tenstrings'][6.0] == 'asdf' f.close()", "(issue #527).\"\"\" # workaround for Python 2.6 if self.skip: return", "appending to vlen variables (issue #527).\"\"\" # workaround for Python", "vs_alt = f.variables[VAR3_NAME] unicode_strings = vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]] assert", "= f.variables[\"vl\"] w[0:3] = np.arange(3, dtype=np.float64) v[0] # sometimes crashes", "\"yyyymmdd_hhmmss\" teststring[()] = stringout f.close() f = Dataset(FILE_NAME) assert f.variables['teststring'][:]", "is new # in Python 2.7) pass else: self.skip =", "= np.abs(data - self.data) assert(err.max() < nc['vl'].scale_factor) # turn off", "v[:] data2s = vs[:] for i in range(nlons): for j", "Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] =", "\"\"\"testing appending to vlen variables (issue #527).\"\"\" # workaround for", "'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase): def setUp(self): import netCDF4 if netCDF4.__netcdf4libversion__", "nc.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def", "# using assertRaisesRegext as a context manager # only works", "assert_array_equal(datas, vs_alt[:]) f.close() class TestInvalidDataType(unittest.TestCase): def runTest(self): f = Dataset(FILE_NAME,", "j in range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i] == data2s[j,i] assert_array_equal(datas,", "strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0] =", "nlons = 5; nlats = 5 VAR1_NAME = 'ragged' VAR2_NAME", "# only works with python >= 2.7 (issue #497) #with", "i in range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons)) class", "= f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME] assert list(f.vltypes.keys())", "data2 = v[:] data2s = vs[:] for i in range(nlons):", "import os import tempfile from netCDF4 import Dataset import numpy", "'abc' assert fancy_indexed[1] == 'abcd' assert fancy_indexed[2] == 'abcdef' f.close()", "== 'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase): def setUp(self): import netCDF4 if", "datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons))", "= f.variables[VAR3_NAME] unicode_strings = vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]] assert fancy_indexed[0]", "in range(nlats*nlons): nn = nn + 1 data[n] = np.arange(nn,dtype=VL_BASETYPE)", "crashes v[0].tolist() # sometimes crashes v[0].size # BOOM! f.close() class", "# BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file = FILE_NAME", "netCDF4 if netCDF4.__netcdf4libversion__ < \"4.4.1\": self.skip = True try: self.skipTest(\"This", "= unicode_strings[0][[1,2,4]] assert fancy_indexed[0] == 'abc' assert fancy_indexed[1] == 'abcd'", "= f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt", "strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0] = 'asdf' strtest.close()", "nn + 1 data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i) for", "DIM1_NAME)) strings_alt[:] = datas.astype(str) f.close() def tearDown(self): # Remove the", "data ragged[-1,-1] = data[-1,-1] strings[:] = datas strings[-2,-2] = datas[-2,-2]", "= f.createVLType(np.float64, 'vltest') f.createDimension('x', None) v = f.createVariable('vl', vlen_type, 'x')", "format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0]", "strings_alt[:] = datas.astype(str) f.close() def tearDown(self): # Remove the temporary", "True try: self.skipTest(\"This test requires NetCDF 4.4.1 or later.\") except", "between 1 and 1000 ilen = np.random.randint(1,1000,size=100) n = 0", "f = Dataset(self.file, 'r') v = f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME]", "setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE,", "'vlen_type' VL_BASETYPE = np.int16 DIM1_NAME = 'lon' DIM2_NAME = 'lat'", "datas.astype(str) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file)", "in range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i] == data2s[j,i] assert_array_equal(datas, vs_alt[:])", "fancy_indexed[0] == 'abc' assert fancy_indexed[1] == 'abcd' assert fancy_indexed[2] ==", "of compression err = np.abs(data - self.data) assert(err.max() < nc['vl'].scale_factor)", "VlenAppendTestCase(unittest.TestCase): def setUp(self): import netCDF4 if netCDF4.__netcdf4libversion__ < \"4.4.1\": self.skip", "new # in Python 2.7) pass else: self.skip = False", "os.remove(self.file) def runTest(self): \"\"\"testing packing float vlens as scaled integers", "np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)]) data =", "except ValueError: pass f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): # issue 333", "datas = np.empty(nlats*nlons,object) nn = 0 for n in range(nlats*nlons):", "2.7 (issue #497) #with self.assertRaisesRegexp(ValueError, 'strings are only supported'): #", "string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data ragged[-1,-1]", "str, (DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar =", "Dataset(FILE_NAME, 'w', format='NETCDF4') teststring = f.createVariable('teststring', str) stringout = \"yyyymmdd_hhmmss\"", "f.variables[\"vl\"] w[0:3] = np.arange(3, dtype=np.float64) v[0] # sometimes crashes v[0].tolist()", "def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1) #", "f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())", "f.createDimension('x', None) v = f.createVariable('vl', vlen_type, 'x') w = f.createVariable('vl2',", "# sometimes crashes v[0].size # BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase): def", "= 'asdf' strtest['tenstrings'][6.0] = 'asdf' strtest.close() f = Dataset(FILE_NAME) assert", "requires NetCDF 4.4.1 or later.\") except AttributeError: # workaround for", "crashes v[0].size # BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file", "f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME,", "== VL_BASETYPE assert f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:]", "vs_alt = f.variables[VAR3_NAME] assert list(f.vltypes.keys()) == [VL_NAME] assert f.vltypes[VL_NAME].dtype ==", "def runTest(self): \"\"\"testing vlen variables\"\"\" f = Dataset(self.file, 'r') vs_alt", "os.remove(self.file) def runTest(self): \"\"\"testing vlen variables\"\"\" f = Dataset(self.file, 'r')", "f.variables['teststring'][:] == stringout f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): # issue 526", "stringout f.close() f = Dataset(FILE_NAME) assert f.variables['teststring'][:] == stringout f.close()", "import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME = 'vlen_type' VL_BASETYPE", "= f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:] = datas.astype(str) f.close() def", "f.createVLType(np.float64, 'vltest') f.createDimension('x', None) v = f.createVariable('vl', vlen_type, 'x') w", "range(nlons): for j in range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i] ==", "ValueError: pass f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): # issue 333 def", "ragged[:] = data ragged[-1,-1] = data[-1,-1] strings[:] = datas strings[-2,-2]", "# check max error of compression err = np.abs(data -", "self.file = FILE_NAME f = Dataset(self.file, 'w') vlen_type = f.createVLType(np.float64,", "using assertRaisesRegext as a context manager # only works with", "'a') w = f.variables[\"vl2\"] v = f.variables[\"vl\"] w[0:3] = np.arange(3,", "strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:] = datas.astype(str) f.close()", "['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0] = 'asdf' strtest.close() f =", "import tempfile from netCDF4 import Dataset import numpy as np", "os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): # issue 333 def runTest(self): f =", "assert f.variables['tenstrings'][np.int32(5)] == 'asdf' assert f.variables['tenstrings'][6.0] == 'asdf' f.close() os.remove(FILE_NAME)", "teststring = f.createVariable('teststring', str) stringout = \"yyyymmdd_hhmmss\" teststring[()] = stringout", "np.empty(nlats*nlons,object) nn = 0 for n in range(nlats*nlons): nn =", "f.close() f = Dataset(FILE_NAME) assert f.variables['teststring'][:] == stringout f.close() os.remove(FILE_NAME)", "self.skipTest(\"This test requires NetCDF 4.4.1 or later.\") except AttributeError: #", "f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt", "workaround for Python 2.6 (skipTest(reason) is new # in Python", "v[n] = data if n==99: self.data = data n +=", "TestScalarVlenString(unittest.TestCase): # issue 333 def runTest(self): f = Dataset(FILE_NAME, 'w',", "or later.\") except AttributeError: # workaround for Python 2.6 (skipTest(reason)", "= Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)]", "f.variables[\"vl2\"] v = f.variables[\"vl\"] w[0:3] = np.arange(3, dtype=np.float64) v[0] #", "f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def", "AttributeError: # workaround for Python 2.6 (skipTest(reason) is new #", "tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): \"\"\"testing", "nc = Dataset(self.file, 'w') vlen_type = nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None)", "\"4.4.1\": self.skip = True try: self.skipTest(\"This test requires NetCDF 4.4.1", "= FILE_NAME f = Dataset(self.file, 'w') vlen_type = f.createVLType(np.float64, 'vltest')", "sys import unittest import os import tempfile from netCDF4 import", "n in range(nlats*nlons): nn = nn + 1 data[n] =", "assert f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:] data2s =", "1000 ilen = np.random.randint(1,1000,size=100) n = 0 for nlen in", "assert fancy_indexed[2] == 'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase): def setUp(self): import", "DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data", "= 'asdf' strtest.close() f = Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] == 'asdf'", "v.missing_value=np.array(255,np.uint8) # random lengths between 1 and 1000 ilen =", "# random lengths between 1 and 1000 ilen = np.random.randint(1,1000,size=100)", "'string_scalar' VAR5_NAME = 'vlen_scalar' data = np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object)", "'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1) # using assertRaisesRegext as a context", "1./254. v.missing_value=np.array(255,np.uint8) # random lengths between 1 and 1000 ilen", "= np.int16 DIM1_NAME = 'lon' DIM2_NAME = 'lat' nlons =", "= f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data ragged[-1,-1] =", "as a context manager # only works with python >=", "'w', format='NETCDF4') teststring = f.createVariable('teststring', str) stringout = \"yyyymmdd_hhmmss\" teststring[()]", "f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings", "# issue 526 def runTest(self): strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')", "later.\") except AttributeError: # workaround for Python 2.6 (skipTest(reason) is", "check max error of compression err = np.abs(data - self.data)", "(DIM2_NAME, DIM1_NAME)) strings_alt[:] = datas.astype(str) f.close() def tearDown(self): # Remove", "f.variables['tenstrings'][6.0] == 'asdf' f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file", "np.int16 DIM1_NAME = 'lon' DIM2_NAME = 'lat' nlons = 5;", "self.skip = True try: self.skipTest(\"This test requires NetCDF 4.4.1 or", "= f.variables[\"vl2\"] v = f.variables[\"vl\"] w[0:3] = np.arange(3, dtype=np.float64) v[0]", "#with self.assertRaisesRegexp(ValueError, 'strings are only supported'): # f.createVariable('foo', str, ('x',))", ">= 2.7 (issue #497) #with self.assertRaisesRegexp(ValueError, 'strings are only supported'):", "data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])", "workaround for Python 2.6 if self.skip: return f = Dataset(self.file,", "VAR4_NAME = 'string_scalar' VAR5_NAME = 'vlen_scalar' data = np.empty(nlats*nlons,object) datas", "issue 333 def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF4') teststring", "class TestIntegerIndex(unittest.TestCase): # issue 526 def runTest(self): strtest = Dataset(FILE_NAME,", "v[0].size # BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file =", "assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME = 'vlen_type' VL_BASETYPE =", "and 1000 ilen = np.random.randint(1,1000,size=100) n = 0 for nlen", "= data n += 1 nc.close() def tearDown(self): # Remove", "1 data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i) for i in", "nc.set_auto_maskandscale(False) data = nc['vl'][-1] assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor)) nc.close() if __name__", "temporary files os.remove(self.file) def runTest(self): \"\"\"testing vlen variables\"\"\" f =", "np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f =", "= 'ragged' VAR2_NAME = 'strings' VAR3_NAME = 'strings_alt' VAR4_NAME =", "= datas[-2,-2] strings_alt[:] = datas.astype(str) string_scalar[...] = 'foo' #issue458 vlen_scalar[...]", "# Remove the temporary files os.remove(self.file) def runTest(self): \"\"\"testing packing", "class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w')", "variables\"\"\" f = Dataset(self.file, 'r') vs_alt = f.variables[VAR3_NAME] unicode_strings =", "os import tempfile from netCDF4 import Dataset import numpy as", "unittest import os import tempfile from netCDF4 import Dataset import", "data = np.random.uniform(low=0.0, high=1.0, size=nlen) v[n] = data if n==99:", "# workaround for Python 2.6 (skipTest(reason) is new # in", "sometimes crashes v[0].size # BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase): def setUp(self):", "FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME = 'vlen_type' VL_BASETYPE = np.int16", "#issue458 vlen_scalar[...] = np.array([1,2,3],np.int16) f.close() def tearDown(self): # Remove the", "'strings' VAR3_NAME = 'strings_alt' VAR4_NAME = 'string_scalar' VAR5_NAME = 'vlen_scalar'", "vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,", "f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file = FILE_NAME f", "f.createVariable('vl2', np.float64, 'x') f.close() def tearDown(self): # Remove the temporary", "try: f.createVariable('foo', str, ('x',)) except ValueError: pass f.close() os.remove(FILE_NAME) class", "str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0] = 'asdf' strtest.close() f", "Dataset(self.file, 'w') vlen_type = nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None) v =", "issue 526 def runTest(self): strtest = Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings',", "f = Dataset(self.file, 'w') vlen_type = f.createVLType(np.float64, 'vltest') f.createDimension('x', None)", "= Dataset(self.file, 'w') vlen_type = f.createVLType(np.float64, 'vltest') f.createDimension('x', None) v", "TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w') vlen_t", "FILE_NAME f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats)", "data = nc['vl'][-1] assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor)) nc.close() if __name__ ==", "self.file = FILE_NAME nc = Dataset(self.file, 'w') vlen_type = nc.createVLType(np.uint8,", "test requires NetCDF 4.4.1 or later.\") except AttributeError: # workaround", "turn off auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1] assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))", "f.createVariable('teststring', str) stringout = \"yyyymmdd_hhmmss\" teststring[()] = stringout f.close() f", "ragged = f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME))", "# sometimes crashes v[0].tolist() # sometimes crashes v[0].size # BOOM!", "f = Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] == 'asdf' assert f.variables['tenstrings'][6.0] ==", "runTest(self): \"\"\"testing packing float vlens as scaled integers (issue #1003).\"\"\"", "f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged", "w[0:3] = np.arange(3, dtype=np.float64) v[0] # sometimes crashes v[0].tolist() #", "data = nc['vl'][-1] # check max error of compression err", "= v[:] data2s = vs[:] for i in range(nlons): for", "'vlen_scalar' data = np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object) nn = 0", "DIM1_NAME = 'lon' DIM2_NAME = 'lat' nlons = 5; nlats", "from numpy.testing import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME =", "pass f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): # issue 333 def runTest(self):", "variables\"\"\" f = Dataset(self.file, 'r') v = f.variables[VAR1_NAME] vs =", "data = np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def setUp(self):", "(DIM2_NAME, DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] =", "vs[:] for i in range(nlons): for j in range(nlats): assert_array_equal(data2[j,i],", "integers (issue #1003).\"\"\" nc = Dataset(self.file) data = nc['vl'][-1] #", "class TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w')", "'foo' #issue458 vlen_scalar[...] = np.array([1,2,3],np.int16) f.close() def tearDown(self): # Remove", "< nc['vl'].scale_factor) # turn off auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1]", "= datas strings[-2,-2] = datas[-2,-2] strings_alt[:] = datas.astype(str) string_scalar[...] =", "= FILE_NAME f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons)", "else: self.skip = False self.file = FILE_NAME f = Dataset(self.file,", "nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None) v = nc.createVariable('vl', vlen_type, 'x') v.scale_factor", "import sys import unittest import os import tempfile from netCDF4", "f = Dataset(self.file, 'a') w = f.variables[\"vl2\"] v = f.variables[\"vl\"]", "Dataset(self.file, 'r') v = f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME] vs_alt =", "#527).\"\"\" # workaround for Python 2.6 if self.skip: return f", "526 def runTest(self): strtest = Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings', 10)", "f = Dataset(self.file, 'r') vs_alt = f.variables[VAR3_NAME] unicode_strings = vs_alt[:]", "\"\"\"testing packing float vlens as scaled integers (issue #1003).\"\"\" nc", "Dataset(self.file) data = nc['vl'][-1] # check max error of compression", "= FILE_NAME nc = Dataset(self.file, 'w') vlen_type = nc.createVLType(np.uint8, 'vltest')", "f.close() class VlenAppendTestCase(unittest.TestCase): def setUp(self): import netCDF4 if netCDF4.__netcdf4libversion__ <", "v[0].tolist() # sometimes crashes v[0].size # BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase):", "= 'foo' #issue458 vlen_scalar[...] = np.array([1,2,3],np.int16) f.close() def tearDown(self): #", "str) stringout = \"yyyymmdd_hhmmss\" teststring[()] = stringout f.close() f =", "runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1) # using", "os.remove(self.file) def runTest(self): \"\"\"testing appending to vlen variables (issue #527).\"\"\"", "('x',)) try: f.createVariable('foo', str, ('x',)) except ValueError: pass f.close() os.remove(FILE_NAME)", "f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1) # using assertRaisesRegext", "in range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase):", "v = f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME] assert", "strings[:] = datas strings[-2,-2] = datas[-2,-2] strings_alt[:] = datas.astype(str) string_scalar[...]", "= f.createVariable('vl2', np.float64, 'x') f.close() def tearDown(self): # Remove the", "if netCDF4.__netcdf4libversion__ < \"4.4.1\": self.skip = True try: self.skipTest(\"This test", "for Python 2.6 (skipTest(reason) is new # in Python 2.7)", "supported'): # f.createVariable('foo', str, ('x',)) try: f.createVariable('foo', str, ('x',)) except", "nc.createVariable('vl', vlen_type, 'x') v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8) # random lengths", "= f.createVariable('teststring', str) stringout = \"yyyymmdd_hhmmss\" teststring[()] = stringout f.close()", "import numpy as np from numpy.testing import assert_array_equal FILE_NAME =", "'ragged' VAR2_NAME = 'strings' VAR3_NAME = 'strings_alt' VAR4_NAME = 'string_scalar'", "ragged[-1,-1] = data[-1,-1] strings[:] = datas strings[-2,-2] = datas[-2,-2] strings_alt[:]", "strtest.close() f = Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] == 'asdf' assert f.variables['tenstrings'][6.0]", "Python 2.7) pass else: self.skip = False self.file = FILE_NAME", "= np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object) nn = 0 for n", "range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def", "in range(nlons): for j in range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i]", "for i in range(nlons): for j in range(nlats): assert_array_equal(data2[j,i], data[j,i])", "strings_alt[:] = datas.astype(str) string_scalar[...] = 'foo' #issue458 vlen_scalar[...] = np.array([1,2,3],np.int16)", "= False self.file = FILE_NAME f = Dataset(self.file, 'w') vlen_type", "(issue #497) #with self.assertRaisesRegexp(ValueError, 'strings are only supported'): # f.createVariable('foo',", "#497) #with self.assertRaisesRegexp(ValueError, 'strings are only supported'): # f.createVariable('foo', str,", "f.createVariable('foo', str, ('x',)) except ValueError: pass f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase):", "= data[-1,-1] strings[:] = datas strings[-2,-2] = datas[-2,-2] strings_alt[:] =", "datas = np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME", "v = f.createVariable('vl', vlen_type, 'x') w = f.createVariable('vl2', np.float64, 'x')", "variables (issue #527).\"\"\" # workaround for Python 2.6 if self.skip:", "'strings_alt' VAR4_NAME = 'string_scalar' VAR5_NAME = 'vlen_scalar' data = np.empty(nlats*nlons,object)", "(issue #1003).\"\"\" nc = Dataset(self.file) data = nc['vl'][-1] # check", "to vlen variables (issue #527).\"\"\" # workaround for Python 2.6", "datas.astype(str) string_scalar[...] = 'foo' #issue458 vlen_scalar[...] = np.array([1,2,3],np.int16) f.close() def", "== [VL_NAME] assert f.vltypes[VL_NAME].dtype == VL_BASETYPE assert f.variables['string_scalar'][...] == 'foo'", "'w', format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf'", "= 0 for nlen in ilen: data = np.random.uniform(low=0.0, high=1.0,", "netCDF4 import Dataset import numpy as np from numpy.testing import", "tempfile from netCDF4 import Dataset import numpy as np from", "f.close() class Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file = FILE_NAME nc =", "# Remove the temporary files os.remove(self.file) def runTest(self): \"\"\"testing appending", "np.random.randint(1,1000,size=100) n = 0 for nlen in ilen: data =", "'lat' nlons = 5; nlats = 5 VAR1_NAME = 'ragged'", "the temporary files os.remove(self.file) def runTest(self): \"\"\"testing appending to vlen", "= nn + 1 data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i)", "# turn off auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1] assert(data[-1] ==", "data2s = vs[:] for i in range(nlons): for j in", "lengths between 1 and 1000 ilen = np.random.randint(1,1000,size=100) n =", "= 'string_scalar' VAR5_NAME = 'vlen_scalar' data = np.empty(nlats*nlons,object) datas =", "string_scalar[...] = 'foo' #issue458 vlen_scalar[...] = np.array([1,2,3],np.int16) f.close() def tearDown(self):", "os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file = FILE_NAME f =", "vs_alt[:]) f.close() class TestInvalidDataType(unittest.TestCase): def runTest(self): f = Dataset(FILE_NAME, 'w',", "333 def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF4') teststring =", "= f.createVariable('vl', vlen_type, 'x') w = f.createVariable('vl2', np.float64, 'x') f.close()", "Remove the temporary files os.remove(self.file) def runTest(self): \"\"\"testing appending to", "if self.skip: return f = Dataset(self.file, 'a') w = f.variables[\"vl2\"]", "files os.remove(self.file) def runTest(self): \"\"\"testing packing float vlens as scaled", "assert f.variables['tenstrings'][6.0] == 'asdf' f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def setUp(self):", "= Dataset(self.file) data = nc['vl'][-1] # check max error of", "temporary files os.remove(self.file) def runTest(self): \"\"\"testing appending to vlen variables", "TestInvalidDataType(unittest.TestCase): def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1)", "= f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME,", "np.arange(3, dtype=np.float64) v[0] # sometimes crashes v[0].tolist() # sometimes crashes", "'vltest') nc.createDimension('x', None) v = nc.createVariable('vl', vlen_type, 'x') v.scale_factor =", "vlen_type, 'x') w = f.createVariable('vl2', np.float64, 'x') f.close() def tearDown(self):", "vlen_type = nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None) v = nc.createVariable('vl', vlen_type,", "setUp(self): import netCDF4 if netCDF4.__netcdf4libversion__ < \"4.4.1\": self.skip = True", "\"\"\"testing vlen variables\"\"\" f = Dataset(self.file, 'r') vs_alt = f.variables[VAR3_NAME]", "np.random.uniform(low=0.0, high=1.0, size=nlen) v[n] = data if n==99: self.data =", "w = f.createVariable('vl2', np.float64, 'x') f.close() def tearDown(self): # Remove", "def runTest(self): \"\"\"testing packing float vlens as scaled integers (issue", "= Dataset(FILE_NAME) assert f.variables['teststring'][:] == stringout f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase):", "5; nlats = 5 VAR1_NAME = 'ragged' VAR2_NAME = 'strings'", "VAR3_NAME = 'strings_alt' VAR4_NAME = 'string_scalar' VAR5_NAME = 'vlen_scalar' data", "(skipTest(reason) is new # in Python 2.7) pass else: self.skip", "context manager # only works with python >= 2.7 (issue", "= \"yyyymmdd_hhmmss\" teststring[()] = stringout f.close() f = Dataset(FILE_NAME) assert", "Remove the temporary files os.remove(self.file) def runTest(self): \"\"\"testing vlen variables\"\"\"", "datas strings[-2,-2] = datas[-2,-2] strings_alt[:] = datas.astype(str) string_scalar[...] = 'foo'", "= 'lat' nlons = 5; nlats = 5 VAR1_NAME =", "VL_BASETYPE assert f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:] data2s", "== 'asdf' assert f.variables['tenstrings'][6.0] == 'asdf' f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase):", "= 'lon' DIM2_NAME = 'lat' nlons = 5; nlats =", "teststring[()] = stringout f.close() f = Dataset(FILE_NAME) assert f.variables['teststring'][:] ==", "scaled integers (issue #1003).\"\"\" nc = Dataset(self.file) data = nc['vl'][-1]", "np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def setUp(self): self.file =", "assert fancy_indexed[1] == 'abcd' assert fancy_indexed[2] == 'abcdef' f.close() class", "= f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME))", "= Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged =", "def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self):", "only works with python >= 2.7 (issue #497) #with self.assertRaisesRegexp(ValueError,", "self.file = FILE_NAME f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)", "= np.arange(3, dtype=np.float64) v[0] # sometimes crashes v[0].tolist() # sometimes", "in ilen: data = np.random.uniform(low=0.0, high=1.0, size=nlen) v[n] = data", "f = Dataset(FILE_NAME, 'w', format='NETCDF4') teststring = f.createVariable('teststring', str) stringout", "'r') vs_alt = f.variables[VAR3_NAME] unicode_strings = vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]]", "in Python 2.7) pass else: self.skip = False self.file =", "''.join([chr(i) for i in range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons)) datas =", "\"\"\"testing vlen variables\"\"\" f = Dataset(self.file, 'r') v = f.variables[VAR1_NAME]", "assert(err.max() < nc['vl'].scale_factor) # turn off auto-scaling nc.set_auto_maskandscale(False) data =", "strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar", "vs = f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME] assert list(f.vltypes.keys()) == [VL_NAME]", "= data if n==99: self.data = data n += 1", "def setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w') vlen_t =", "= np.random.uniform(low=0.0, high=1.0, size=nlen) v[n] = data if n==99: self.data", "= Dataset(self.file, 'r') v = f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME] vs_alt", "i in range(nlons): for j in range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert", "np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object) nn = 0 for n in", "f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:] = datas.astype(str) f.close() def tearDown(self):", "def runTest(self): \"\"\"testing appending to vlen variables (issue #527).\"\"\" #", "VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:]", "= nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None) v = nc.createVariable('vl', vlen_type, 'x')", "self.data = data n += 1 nc.close() def tearDown(self): #", "f.createVariable('foo', str, ('x',)) try: f.createVariable('foo', str, ('x',)) except ValueError: pass", "Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1) # using assertRaisesRegext as a", "= 'vlen_type' VL_BASETYPE = np.int16 DIM1_NAME = 'lon' DIM2_NAME =", "auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1] assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor)) nc.close() if", "np.array([1,2,3],np.int16) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file)", "= stringout f.close() f = Dataset(FILE_NAME) assert f.variables['teststring'][:] == stringout", "a context manager # only works with python >= 2.7", "pass else: self.skip = False self.file = FILE_NAME f =", "assert datas[j,i] == data2s[j,i] assert_array_equal(datas, vs_alt[:]) f.close() class TestInvalidDataType(unittest.TestCase): def", "import netCDF4 if netCDF4.__netcdf4libversion__ < \"4.4.1\": self.skip = True try:", "self.assertRaisesRegexp(ValueError, 'strings are only supported'): # f.createVariable('foo', str, ('x',)) try:", "vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME,", "Python 2.6 (skipTest(reason) is new # in Python 2.7) pass", "FILE_NAME f = Dataset(self.file, 'w') vlen_type = f.createVLType(np.float64, 'vltest') f.createDimension('x',", "# in Python 2.7) pass else: self.skip = False self.file", "Dataset(self.file, 'a') w = f.variables[\"vl2\"] v = f.variables[\"vl\"] w[0:3] =", "random lengths between 1 and 1000 ilen = np.random.randint(1,1000,size=100) n", "1) # using assertRaisesRegext as a context manager # only", "assert list(f.vltypes.keys()) == [VL_NAME] assert f.vltypes[VL_NAME].dtype == VL_BASETYPE assert f.variables['string_scalar'][...]", "VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME, vlen_t,\\ (DIM2_NAME,DIM1_NAME)) strings =", "= Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1) # using assertRaisesRegext as", "assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i] == data2s[j,i] assert_array_equal(datas, vs_alt[:]) f.close() class", "unicode_strings[0][[1,2,4]] assert fancy_indexed[0] == 'abc' assert fancy_indexed[1] == 'abcd' assert", "Dataset(FILE_NAME) assert f.variables['teststring'][:] == stringout f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): #", "self.skip = False self.file = FILE_NAME f = Dataset(self.file, 'w')", "f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar", "import unittest import os import tempfile from netCDF4 import Dataset", "('x',)) except ValueError: pass f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): # issue", "assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:] data2s = vs[:] for i in", "manager # only works with python >= 2.7 (issue #497)", "from netCDF4 import Dataset import numpy as np from numpy.testing", "# f.createVariable('foo', str, ('x',)) try: f.createVariable('foo', str, ('x',)) except ValueError:", "nn = 0 for n in range(nlats*nlons): nn = nn", "TestIntegerIndex(unittest.TestCase): # issue 526 def runTest(self): strtest = Dataset(FILE_NAME, 'w',", "f.variables['tenstrings'][np.int32(5)] == 'asdf' assert f.variables['tenstrings'][6.0] == 'asdf' f.close() os.remove(FILE_NAME) class", "'w') vlen_type = f.createVLType(np.float64, 'vltest') f.createDimension('x', None) v = f.createVariable('vl',", "v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8) # random lengths between 1 and", "= np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def setUp(self): self.file", "def runTest(self): \"\"\"testing vlen variables\"\"\" f = Dataset(self.file, 'r') v", "runTest(self): strtest = Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str,", "for nlen in ilen: data = np.random.uniform(low=0.0, high=1.0, size=nlen) v[n]", "for Python 2.6 if self.skip: return f = Dataset(self.file, 'a')", "range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i] == data2s[j,i] assert_array_equal(datas, vs_alt[:]) f.close()", "stringout = \"yyyymmdd_hhmmss\" teststring[()] = stringout f.close() f = Dataset(FILE_NAME)", "Remove the temporary files os.remove(self.file) def runTest(self): \"\"\"testing packing float", "= vs[:] for i in range(nlons): for j in range(nlats):", "delete=False).name VL_NAME = 'vlen_type' VL_BASETYPE = np.int16 DIM1_NAME = 'lon'", "f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data ragged[-1,-1] = data[-1,-1] strings[:] = datas", "assertRaisesRegext as a context manager # only works with python", "max error of compression err = np.abs(data - self.data) assert(err.max()", "= datas.astype(str) f.close() def tearDown(self): # Remove the temporary files", "= 'vlen_scalar' data = np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object) nn =", "list(f.vltypes.keys()) == [VL_NAME] assert f.vltypes[VL_NAME].dtype == VL_BASETYPE assert f.variables['string_scalar'][...] ==", "'vltest') f.createDimension('x', None) v = f.createVariable('vl', vlen_type, 'x') w =", "are only supported'): # f.createVariable('foo', str, ('x',)) try: f.createVariable('foo', str,", "strtest['tenstrings'][6.0] = 'asdf' strtest.close() f = Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] ==", "= 'strings' VAR3_NAME = 'strings_alt' VAR4_NAME = 'string_scalar' VAR5_NAME =", "datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:]", "= 0 for n in range(nlats*nlons): nn = nn +", "'abcd' assert fancy_indexed[2] == 'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase): def setUp(self):", "0 for n in range(nlats*nlons): nn = nn + 1", "class VlenAppendTestCase(unittest.TestCase): def setUp(self): import netCDF4 if netCDF4.__netcdf4libversion__ < \"4.4.1\":", "'x') f.close() def tearDown(self): # Remove the temporary files os.remove(self.file)", "except AttributeError: # workaround for Python 2.6 (skipTest(reason) is new", "'w') vlen_type = nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None) v = nc.createVariable('vl',", "f.vltypes[VL_NAME].dtype == VL_BASETYPE assert f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 =", "= f.variables[VAR3_NAME] assert list(f.vltypes.keys()) == [VL_NAME] assert f.vltypes[VL_NAME].dtype == VL_BASETYPE", "VL_BASETYPE = np.int16 DIM1_NAME = 'lon' DIM2_NAME = 'lat' nlons", "= np.empty(nlats*nlons,object) nn = 0 for n in range(nlats*nlons): nn", "= np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)]) data", "data = np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object) nn = 0 for", "BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file = FILE_NAME nc", "nn = nn + 1 data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n] =", "nlen in ilen: data = np.random.uniform(low=0.0, high=1.0, size=nlen) v[n] =", "n==99: self.data = data n += 1 nc.close() def tearDown(self):", "= f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data ragged[-1,-1] = data[-1,-1] strings[:] =", "# issue 333 def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF4')", "Python 2.6 if self.skip: return f = Dataset(self.file, 'a') w", "f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:] =", "f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:] = datas.astype(str)", "runTest(self): \"\"\"testing vlen variables\"\"\" f = Dataset(self.file, 'r') vs_alt =", "class Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file = FILE_NAME nc = Dataset(self.file,", "= nc['vl'][-1] assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor)) nc.close() if __name__ == '__main__':", "VAR5_NAME = 'vlen_scalar' data = np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object) nn", "DIM2_NAME = 'lat' nlons = 5; nlats = 5 VAR1_NAME", "only supported'): # f.createVariable('foo', str, ('x',)) try: f.createVariable('foo', str, ('x',))", "format='NETCDF4') teststring = f.createVariable('teststring', str) stringout = \"yyyymmdd_hhmmss\" teststring[()] =", "2.6 (skipTest(reason) is new # in Python 2.7) pass else:", "str, ('x',)) except ValueError: pass f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): #", "== 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:] data2s = vs[:] for", "tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME = 'vlen_type' VL_BASETYPE = np.int16 DIM1_NAME =", "format='NETCDF3_CLASSIC') f.createDimension('x', 1) # using assertRaisesRegext as a context manager", "for i in range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons))", "= vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]] assert fancy_indexed[0] == 'abc' assert", "f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): # issue 526 def runTest(self): strtest", "Dataset import numpy as np from numpy.testing import assert_array_equal FILE_NAME", "np from numpy.testing import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME", "f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME] assert list(f.vltypes.keys()) ==", "class TestInvalidDataType(unittest.TestCase): def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x',", "setUp(self): self.file = FILE_NAME nc = Dataset(self.file, 'w') vlen_type =", "class TestScalarVlenString(unittest.TestCase): # issue 333 def runTest(self): f = Dataset(FILE_NAME," ]
[ "once. >>> @snt.once ... def f(): ... print('Hello, world!') >>>", "def f(): ... print('Hello, world!') >>> f() Hello, world! >>>", "is only ever run once.\"\"\" if instance is None: #", "2.0 (the \"License\"); # you may not use this file", "if instance is None: # NOTE: We can't use the", "... def f(): ... print('Hello, world!') >>> f() Hello, world!", "if once_id not in seen: _check_no_output(wrapped(*args, **kwargs)) seen.add(once_id) wrapper.seen_none =", "= False decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none decorated.__snt_once_wrapped__ =", "raised to the user. Next time the method is run,", "evaluate `f` the first time it is called. \"\"\" #", "is None: # NOTE: We can't use the weakset since", "run before. Args: f: A function to wrap which should", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= True return # Get or set the `seen` set", "having run before. Args: f: A function to wrap which", "o2.f() Hello, world! >>> o.f() >>> o2.f() If an error", "is run, it will be treated as not having run", "this object. seen = getattr(instance, _ONCE_PROPERTY, None) if seen is", "decorated functions cannot return values\") def once(f): \"\"\"Decorator which ensures", "**kwargs)) wrapper.seen_none = True return # Get or set the", "or set the `seen` set for this object. seen =", "once.\"\"\" import uuid from sonnet.src import utils _ONCE_PROPERTY = \"_snt_once\"", "import uuid from sonnet.src import utils _ONCE_PROPERTY = \"_snt_once\" def", "============================================================================ \"\"\"Utility to run functions and methods once.\"\"\" import uuid", "def _check_no_output(output): if output is not None: raise ValueError(\"@snt.once decorated", "use this file except in compliance with the License. #", "wrapper.seen_none = True return # Get or set the `seen`", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", ">>> o = MyObject() >>> o.f() Hello, world! >>> o.f()", "License. # You may obtain a copy of the License", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "before. Args: f: A function to wrap which should only", "# Copyright 2019 The Sonnet Authors. All Rights Reserved. #", "Reserved. # # Licensed under the Apache License, Version 2.0", "which should only be called once. Returns: Wrapped version of", "If an error is raised during execution of `f` it", "once per instance: >>> class MyObject: ... @snt.once ... def", "will be evaluated once per instance: >>> class MyObject: ...", "`seen` set for this object. seen = getattr(instance, _ONCE_PROPERTY, None)", "methods once.\"\"\" import uuid from sonnet.src import utils _ONCE_PROPERTY =", "only evaluate `f` the first time it is called. \"\"\"", "from sonnet.src import utils _ONCE_PROPERTY = \"_snt_once\" def _check_no_output(output): if", "user. Next time the method is run, it will be", "the user. Next time the method is run, it will", "weakset since you can't weakref None. if not wrapper.seen_none: _check_no_output(wrapped(*args,", "function to wrap which should only be called once. Returns:", "to the user. Next time the method is run, it", "only be called once. Returns: Wrapped version of `f` which", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "not None: raise ValueError(\"@snt.once decorated functions cannot return values\") def", "def once(f): \"\"\"Decorator which ensures a wrapped method is only", "world! >>> o.f() >>> o2.f() If an error is raised", "Args: f: A function to wrap which should only be", "output is not None: raise ValueError(\"@snt.once decorated functions cannot return", "wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none = True return # Get or", "then it will be evaluated once per instance: >>> class", "None) if seen is None: seen = set() setattr(instance, _ONCE_PROPERTY,", "be treated as not having run before. Args: f: A", "ValueError(\"@snt.once decorated functions cannot return values\") def once(f): \"\"\"Decorator which", "= uuid.uuid4() @utils.decorator def wrapper(wrapped, instance, args, kwargs): \"\"\"Decorator which", "functions cannot return values\") def once(f): \"\"\"Decorator which ensures a", "args, kwargs): \"\"\"Decorator which ensures a wrapped method is only", "to wrap which should only be called once. Returns: Wrapped", "method then it will be evaluated once per instance: >>>", "evaluated once per instance: >>> class MyObject: ... @snt.once ...", "ensures a wrapped method is only ever run once.\"\"\" if", "the method is run, it will be treated as not", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "utils _ONCE_PROPERTY = \"_snt_once\" def _check_no_output(output): if output is not", "f() >>> f() If `f` is a method then it", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "is called. \"\"\" # TODO(tomhennigan) Perhaps some more human friendly", "# See the License for the specific language governing permissions", "\"_snt_once\" def _check_no_output(output): if output is not None: raise ValueError(\"@snt.once", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "per instance: >>> class MyObject: ... @snt.once ... def f(self):", "friendly identifier? once_id = uuid.uuid4() @utils.decorator def wrapper(wrapped, instance, args,", "seen = getattr(instance, _ONCE_PROPERTY, None) if seen is None: seen", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "TODO(tomhennigan) Perhaps some more human friendly identifier? once_id = uuid.uuid4()", "with the License. # You may obtain a copy of", "Next time the method is run, it will be treated", "ever run once. >>> @snt.once ... def f(): ... print('Hello,", "it will be raised to the user. Next time the", "if seen is None: seen = set() setattr(instance, _ONCE_PROPERTY, seen)", "use the weakset since you can't weakref None. if not", "the `seen` set for this object. seen = getattr(instance, _ONCE_PROPERTY,", "compliance with the License. # You may obtain a copy", "called. \"\"\" # TODO(tomhennigan) Perhaps some more human friendly identifier?", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "MyObject() >>> o2.f() Hello, world! >>> o.f() >>> o2.f() If", "can't weakref None. if not wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none =", "in seen: _check_no_output(wrapped(*args, **kwargs)) seen.add(once_id) wrapper.seen_none = False decorated =", "seen is None: seen = set() setattr(instance, _ONCE_PROPERTY, seen) if", "functions and methods once.\"\"\" import uuid from sonnet.src import utils", "@snt.once ... def f(): ... print('Hello, world!') >>> f() Hello,", "express or implied. # See the License for the specific", "_check_no_output(output): if output is not None: raise ValueError(\"@snt.once decorated functions", "first time it is called. \"\"\" # TODO(tomhennigan) Perhaps some", "except in compliance with the License. # You may obtain", "License. # ============================================================================ \"\"\"Utility to run functions and methods once.\"\"\"", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", ">>> o.f() Hello, world! >>> o.f() >>> o2 = MyObject()", "not use this file except in compliance with the License.", "f() If `f` is a method then it will be", "writing, software # distributed under the License is distributed on", "since you can't weakref None. if not wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs))", "None. if not wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none = True return", "be evaluated once per instance: >>> class MyObject: ... @snt.once", "you may not use this file except in compliance with", ">>> f() Hello, world! >>> f() >>> f() If `f`", "`f` the first time it is called. \"\"\" # TODO(tomhennigan)", "None: seen = set() setattr(instance, _ONCE_PROPERTY, seen) if once_id not", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "during execution of `f` it will be raised to the", "Hello, world! >>> o.f() >>> o2 = MyObject() >>> o2.f()", "kwargs): \"\"\"Decorator which ensures a wrapped method is only ever", "time the method is run, it will be treated as", "Returns: Wrapped version of `f` which will only evaluate `f`", "instance, args, kwargs): \"\"\"Decorator which ensures a wrapped method is", "CONDITIONS OF ANY KIND, either express or implied. # See", "True return # Get or set the `seen` set for", "once_id = uuid.uuid4() @utils.decorator def wrapper(wrapped, instance, args, kwargs): \"\"\"Decorator", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "... print('Hello, world!') >>> f() Hello, world! >>> f() >>>", "treated as not having run before. Args: f: A function", "not in seen: _check_no_output(wrapped(*args, **kwargs)) seen.add(once_id) wrapper.seen_none = False decorated", "Get or set the `seen` set for this object. seen", "Wrapped version of `f` which will only evaluate `f` the", "run once.\"\"\" if instance is None: # NOTE: We can't", "= getattr(instance, _ONCE_PROPERTY, None) if seen is None: seen =", "time it is called. \"\"\" # TODO(tomhennigan) Perhaps some more", "can't use the weakset since you can't weakref None. if", "False decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none decorated.__snt_once_wrapped__ = f", "OR CONDITIONS OF ANY KIND, either express or implied. #", "ever run once.\"\"\" if instance is None: # NOTE: We", "A function to wrap which should only be called once.", "the License is distributed on an \"AS IS\" BASIS, #", "set for this object. seen = getattr(instance, _ONCE_PROPERTY, None) if", "once(f): \"\"\"Decorator which ensures a wrapped method is only ever", "some more human friendly identifier? once_id = uuid.uuid4() @utils.decorator def", "`f` is a method then it will be evaluated once", "= MyObject() >>> o.f() Hello, world! >>> o.f() >>> o2", "Sonnet Authors. All Rights Reserved. # # Licensed under the", ">>> class MyObject: ... @snt.once ... def f(self): ... print('Hello,", "f(self): ... print('Hello, world!') >>> o = MyObject() >>> o.f()", "not having run before. Args: f: A function to wrap", "ensures a wrapped method is only ever run once. >>>", "as not having run before. Args: f: A function to", "law or agreed to in writing, software # distributed under", "and # limitations under the License. # ============================================================================ \"\"\"Utility to", "o.f() >>> o2 = MyObject() >>> o2.f() Hello, world! >>>", "print('Hello, world!') >>> o = MyObject() >>> o.f() Hello, world!", "# NOTE: We can't use the weakset since you can't", "uuid.uuid4() @utils.decorator def wrapper(wrapped, instance, args, kwargs): \"\"\"Decorator which ensures", "run once. >>> @snt.once ... def f(): ... print('Hello, world!')", "not wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none = True return # Get", "= MyObject() >>> o2.f() Hello, world! >>> o.f() >>> o2.f()", "may obtain a copy of the License at # #", "only ever run once.\"\"\" if instance is None: # NOTE:", "once. Returns: Wrapped version of `f` which will only evaluate", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "seen.add(once_id) wrapper.seen_none = False decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none", "run, it will be treated as not having run before.", "may not use this file except in compliance with the", ">>> o2 = MyObject() >>> o2.f() Hello, world! >>> o.f()", "The Sonnet Authors. All Rights Reserved. # # Licensed under", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "seen = set() setattr(instance, _ONCE_PROPERTY, seen) if once_id not in", "# TODO(tomhennigan) Perhaps some more human friendly identifier? once_id =", "a wrapped method is only ever run once. >>> @snt.once", "will be treated as not having run before. Args: f:", "limitations under the License. # ============================================================================ \"\"\"Utility to run functions", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "= set() setattr(instance, _ONCE_PROPERTY, seen) if once_id not in seen:", "@utils.decorator def wrapper(wrapped, instance, args, kwargs): \"\"\"Decorator which ensures a", "# # Licensed under the Apache License, Version 2.0 (the", "setattr(instance, _ONCE_PROPERTY, seen) if once_id not in seen: _check_no_output(wrapped(*args, **kwargs))", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "sonnet.src import utils _ONCE_PROPERTY = \"_snt_once\" def _check_no_output(output): if output", "permissions and # limitations under the License. # ============================================================================ \"\"\"Utility", "will be raised to the user. Next time the method", "\"\"\"Utility to run functions and methods once.\"\"\" import uuid from", "you can't weakref None. if not wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none", "execution of `f` it will be raised to the user.", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "is a method then it will be evaluated once per", "None: raise ValueError(\"@snt.once decorated functions cannot return values\") def once(f):", "seen: _check_no_output(wrapped(*args, **kwargs)) seen.add(once_id) wrapper.seen_none = False decorated = wrapper(f)", "import utils _ONCE_PROPERTY = \"_snt_once\" def _check_no_output(output): if output is", "def wrapper(wrapped, instance, args, kwargs): \"\"\"Decorator which ensures a wrapped", "= \"_snt_once\" def _check_no_output(output): if output is not None: raise", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", ">>> o2.f() If an error is raised during execution of", "o2 = MyObject() >>> o2.f() Hello, world! >>> o.f() >>>", "is None: seen = set() setattr(instance, _ONCE_PROPERTY, seen) if once_id", ">>> @snt.once ... def f(): ... print('Hello, world!') >>> f()", "... @snt.once ... def f(self): ... print('Hello, world!') >>> o", "If `f` is a method then it will be evaluated", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "f() Hello, world! >>> f() >>> f() If `f` is", "be raised to the user. Next time the method is", "\"\"\" # TODO(tomhennigan) Perhaps some more human friendly identifier? once_id", "version of `f` which will only evaluate `f` the first", "the weakset since you can't weakref None. if not wrapper.seen_none:", "should only be called once. Returns: Wrapped version of `f`", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "raised during execution of `f` it will be raised to", "called once. Returns: Wrapped version of `f` which will only", "set() setattr(instance, _ONCE_PROPERTY, seen) if once_id not in seen: _check_no_output(wrapped(*args,", "(the \"License\"); # you may not use this file except", "# you may not use this file except in compliance", "human friendly identifier? once_id = uuid.uuid4() @utils.decorator def wrapper(wrapped, instance,", "is not None: raise ValueError(\"@snt.once decorated functions cannot return values\")", "which ensures a wrapped method is only ever run once.\"\"\"", "cannot return values\") def once(f): \"\"\"Decorator which ensures a wrapped", "# # Unless required by applicable law or agreed to", "_ONCE_PROPERTY = \"_snt_once\" def _check_no_output(output): if output is not None:", "world!') >>> o = MyObject() >>> o.f() Hello, world! >>>", "**kwargs)) seen.add(once_id) wrapper.seen_none = False decorated = wrapper(f) # pylint:", "to run functions and methods once.\"\"\" import uuid from sonnet.src", "f(): ... print('Hello, world!') >>> f() Hello, world! >>> f()", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "@snt.once ... def f(self): ... print('Hello, world!') >>> o =", "Hello, world! >>> f() >>> f() If `f` is a", "Perhaps some more human friendly identifier? once_id = uuid.uuid4() @utils.decorator", "None: # NOTE: We can't use the weakset since you", "return values\") def once(f): \"\"\"Decorator which ensures a wrapped method", "method is only ever run once. >>> @snt.once ... def", "implied. # See the License for the specific language governing", "NOTE: We can't use the weakset since you can't weakref", "_ONCE_PROPERTY, seen) if once_id not in seen: _check_no_output(wrapped(*args, **kwargs)) seen.add(once_id)", "We can't use the weakset since you can't weakref None.", "under the Apache License, Version 2.0 (the \"License\"); # you", "print('Hello, world!') >>> f() Hello, world! >>> f() >>> f()", "by applicable law or agreed to in writing, software #", "identifier? once_id = uuid.uuid4() @utils.decorator def wrapper(wrapped, instance, args, kwargs):", "method is run, it will be treated as not having", "world! >>> f() >>> f() If `f` is a method", "getattr(instance, _ONCE_PROPERTY, None) if seen is None: seen = set()", "only ever run once. >>> @snt.once ... def f(): ...", "is only ever run once. >>> @snt.once ... def f():", "set the `seen` set for this object. seen = getattr(instance,", "it is called. \"\"\" # TODO(tomhennigan) Perhaps some more human", "# limitations under the License. # ============================================================================ \"\"\"Utility to run", "more human friendly identifier? once_id = uuid.uuid4() @utils.decorator def wrapper(wrapped,", "of `f` which will only evaluate `f` the first time", "method is only ever run once.\"\"\" if instance is None:", "decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none decorated.__snt_once_wrapped__ = f return", "MyObject: ... @snt.once ... def f(self): ... print('Hello, world!') >>>", "if not wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none = True return #", "it will be evaluated once per instance: >>> class MyObject:", "values\") def once(f): \"\"\"Decorator which ensures a wrapped method is", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "a method then it will be evaluated once per instance:", "and methods once.\"\"\" import uuid from sonnet.src import utils _ONCE_PROPERTY", "Unless required by applicable law or agreed to in writing,", "\"\"\"Decorator which ensures a wrapped method is only ever run", "class MyObject: ... @snt.once ... def f(self): ... print('Hello, world!')", "o.f() Hello, world! >>> o.f() >>> o2 = MyObject() >>>", "wrapped method is only ever run once.\"\"\" if instance is", "the specific language governing permissions and # limitations under the", "_check_no_output(wrapped(*args, **kwargs)) seen.add(once_id) wrapper.seen_none = False decorated = wrapper(f) #", "instance is None: # NOTE: We can't use the weakset", "applicable law or agreed to in writing, software # distributed", "Copyright 2019 The Sonnet Authors. All Rights Reserved. # #", "... def f(self): ... print('Hello, world!') >>> o = MyObject()", "wrap which should only be called once. Returns: Wrapped version", "= wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none decorated.__snt_once_wrapped__ = f return decorated", "weakref None. if not wrapper.seen_none: _check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none = True", "in writing, software # distributed under the License is distributed", "will only evaluate `f` the first time it is called.", "_ONCE_PROPERTY, None) if seen is None: seen = set() setattr(instance,", "Hello, world! >>> o.f() >>> o2.f() If an error is", ">>> o2.f() Hello, world! >>> o.f() >>> o2.f() If an", "it will be treated as not having run before. Args:", "wrapper.seen_none = False decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none decorated.__snt_once_wrapped__", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "2019 The Sonnet Authors. All Rights Reserved. # # Licensed", "run functions and methods once.\"\"\" import uuid from sonnet.src import", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Authors. All Rights Reserved. # # Licensed under the Apache", "# ============================================================================ \"\"\"Utility to run functions and methods once.\"\"\" import", "_check_no_output(wrapped(*args, **kwargs)) wrapper.seen_none = True return # Get or set", "world! >>> o.f() >>> o2 = MyObject() >>> o2.f() Hello,", "seen) if once_id not in seen: _check_no_output(wrapped(*args, **kwargs)) seen.add(once_id) wrapper.seen_none", "the License for the specific language governing permissions and #", "wrapped method is only ever run once. >>> @snt.once ...", "o2.f() If an error is raised during execution of `f`", "once.\"\"\" if instance is None: # NOTE: We can't use", "is raised during execution of `f` it will be raised", "Apache License, Version 2.0 (the \"License\"); # you may not", "the License. # ============================================================================ \"\"\"Utility to run functions and methods", "either express or implied. # See the License for the", "world!') >>> f() Hello, world! >>> f() >>> f() If", ">>> f() If `f` is a method then it will", "be called once. Returns: Wrapped version of `f` which will", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "which ensures a wrapped method is only ever run once.", "def f(self): ... print('Hello, world!') >>> o = MyObject() >>>", "return # Get or set the `seen` set for this", "`f` which will only evaluate `f` the first time it", "raise ValueError(\"@snt.once decorated functions cannot return values\") def once(f): \"\"\"Decorator", ">>> o.f() >>> o2.f() If an error is raised during", ">>> f() >>> f() If `f` is a method then", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "# Get or set the `seen` set for this object.", "under the License. # ============================================================================ \"\"\"Utility to run functions and", "... print('Hello, world!') >>> o = MyObject() >>> o.f() Hello,", "of `f` it will be raised to the user. Next", ">>> o.f() >>> o2 = MyObject() >>> o2.f() Hello, world!", "instance: >>> class MyObject: ... @snt.once ... def f(self): ...", "uuid from sonnet.src import utils _ONCE_PROPERTY = \"_snt_once\" def _check_no_output(output):", "\"License\"); # you may not use this file except in", "o = MyObject() >>> o.f() Hello, world! >>> o.f() >>>", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "`f` it will be raised to the user. Next time", "# distributed under the License is distributed on an \"AS", "for this object. seen = getattr(instance, _ONCE_PROPERTY, None) if seen", "wrapper(wrapped, instance, args, kwargs): \"\"\"Decorator which ensures a wrapped method", "# Unless required by applicable law or agreed to in", "o.f() >>> o2.f() If an error is raised during execution", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "which will only evaluate `f` the first time it is", "an error is raised during execution of `f` it will", "You may obtain a copy of the License at #", "if output is not None: raise ValueError(\"@snt.once decorated functions cannot", "the first time it is called. \"\"\" # TODO(tomhennigan) Perhaps", "the Apache License, Version 2.0 (the \"License\"); # you may", "f: A function to wrap which should only be called", "once_id not in seen: _check_no_output(wrapped(*args, **kwargs)) seen.add(once_id) wrapper.seen_none = False", "object. seen = getattr(instance, _ONCE_PROPERTY, None) if seen is None:", "a wrapped method is only ever run once.\"\"\" if instance", "MyObject() >>> o.f() Hello, world! >>> o.f() >>> o2 =", "error is raised during execution of `f` it will be", "governing permissions and # limitations under the License. # ============================================================================" ]
[ "for i in range(self.num_members + 1)] self.update_positions() self.distance2goal = None", "self.move_robot: self.close_connection() def open_connection(self): if self.robot.alive: raise Exception(\"Robot is already", "r * np.sin(theta) y = r * np.cos(theta) self.goal_coords =", "* np.cos(theta) self.goal_coords = [int(x), int(y)] def update_distance_2_goal(self): gripper_pos =", "numpy as np import cv2 import random class MyArm2D: def", "ranging between # min_angle and max_angle self.member_thickness = 30 self.img_width", "to check that all the joints (except for the last", "import piarm import time import numpy as np import cv2", "# angles of motor 3, 4 and 5 ranging between", "above # the ground self.min_joint_heights = [20, 20, 10] self.goal_coords", "# Flip image upside down self.img = cv2.flip(self.img, 0) self.img", "False if self.angles[member_index] > self.max_angles[member_index]: return False return True def", "if self.move_robot: self.robot = piarm.PiArm() self.open_connection() self.DEFAULT = [500, 500,", "cv2.rectangle(self.img, (int(self.x_offset - self.base_width/2), self.y_offset), (int(self.x_offset - self.base_width/2 + self.base_width),", "piarm import time import numpy as np import cv2 import", "-1) goal_x, goal_y = self.goal_coords self.img = cv2.circle(self.img, (goal_x +", "motor 5. It is positive if it is away from", "self.move_robot: self.robot = piarm.PiArm() self.open_connection() self.DEFAULT = [500, 500, 500,", "check_arm_positions(self): for joint_index in range(1,len(self.joint_positions)): member_pos = self.joint_positions[joint_index][1] min_height =", "1, (255,255,255), 2) cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50) def reset(self): self.angles =", "4 and 5 ranging between # min_angle and max_angle self.member_thickness", "self.y_offset), (int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset + self.base_height), (0,", "underneath motor 5. It is positive if it is away", "second_joint[0] += self.x_offset second_joint[1] += self.y_offset self.img = cv2.line(self.img, tuple(first_joint),", "robot\") return False def move_to_default_pos(self): if self.robot.alive: for ID in", "# First, convert the angles in degrees between -90º and", "self.img) cv2.moveWindow(\"Arm\",20,50) def reset(self): self.angles = 90*np.ones(self.num_members) self.update_positions() self.img =", "switched on\") self.robot.connect(\"/dev/ttyS0\") if self.robot.alive: print(\"Success connecting to robot\") return", "[0, self.lengths[\"h_0\"]] self.joint_positions[1] = [ self.joint_positions[0][0] + self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])),", "member_pos < min_height: return False return True def get_reward(self, forbidden_action):", "500 # 0 -> 125 angles_deg = self.angles - 90", "random.uniform(-np.pi/4, np.pi/2) x = r * np.sin(theta) y = r", "from robot\") return True else: print(\"Failed to disconnect from robot\")", "self.img_width = 1000 self.x_offset = int(self.img_width/2) self.y_offset = self.lengths[\"h_0\"] self.img_height", "self.update_positions() self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.update_goal_coords()", "[int(x), int(y)] def update_distance_2_goal(self): gripper_pos = self.joint_positions[-1] self.distance2goal = np.sqrt(sum([(gripper_pos[i]", "self.base_height), (0, 165, 255), -1) goal_x, goal_y = self.goal_coords self.img", "return True else: return False def move_to_pos(self): # First, convert", "- 1] - 90 self.update_positions() self.update_distance_2_goal() def render(self): self.img =", "class MyArm2D: def __init__(self, move_robot = False): self.move_robot = move_robot", "3 self.adjustable_joints = [3,4,5] self.initial_height = 73 # height in", "self.min_joint_heights = [20, 20, 10] self.goal_coords = [None, None] self.update_goal_coords()", "+ self.base_width), self.y_offset + self.base_height), (0, 165, 255), -1) goal_x,", "i, action in enumerate(actions): self.angles[i:] += action for member_index in", "# the ground self.min_joint_heights = [20, 20, 10] self.goal_coords =", "return True else: print(\"Failed to disconnect from robot\") return False", "already switched off\") self.robot.disconnect() if not self.robot.alive: print(\"Success disconnecting from", "self.goal_coords self.img = cv2.circle(self.img, (goal_x + self.x_offset, goal_y + self.y_offset),", "+ self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2] = [ self.joint_positions[1][0] +", "switched off\") self.robot.disconnect() if not self.robot.alive: print(\"Success disconnecting from robot\")", "+ 20) self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0", "20, 10] self.goal_coords = [None, None] self.update_goal_coords() self.joint_positions = [[0,0]", "cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50) def reset(self): self.angles", "first_joint = self.joint_positions[member_id].copy() second_joint = self.joint_positions[member_id + 1].copy() first_joint[0] +=", "base of the arm self.angles = 90*np.ones(self.num_members) # angles of", "True self.render() if self.move_robot: self.move_to_pos() r = self.get_reward(forbidden_action) self.timestep +=", "\"a\": 97.5, \"b\": 96, \"c\": 160 } self.base_width = 110", "-= angles_deg[0] angles_piarm = [int(500 + (375/90)*angle_deg) for angle_deg in", "\"c\": 160 } self.base_width = 110 self.base_height = 45 #", "3]), 500) time.sleep(1) return True else: return False def close_connection(self):", "Exception(\"Robot is already switched off\") self.robot.disconnect() if not self.robot.alive: print(\"Success", "+= self.x_offset second_joint[1] += self.y_offset self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint),", "[500, 500, 500, 500, 500, 500] self.num_members = 3 self.adjustable_joints", "if not okay_angles: print(\"An angle threshold was exceeded\") self.move_arm(-actions) forbidden_action", "in mm of motor 5's axle self.lengths = { \"h_0\":", "np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.update_goal_coords() self.render() if self.move_robot:", "30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50) def reset(self):", "# Convert to integers self.joint_positions = [[int(x[0]),int(x[1])] for x in", "\", angles_deg) print(\"Moving arms with angles: \", angles_piarm) if self.robot.alive:", "self.x_offset first_joint[1] += self.y_offset second_joint[0] += self.x_offset second_joint[1] += self.y_offset", "angles between 125 and 875 # 90 -> 500 #", "actions): \"\"\" The inputs are the new set of angles", "origin (0,0), right underneath motor 5. It is positive if", "(0,255,0), -1) # Render the base of the arm self.img", "not self.robot.alive: print(\"Success disconnecting from robot\") return True else: print(\"Failed", "okay_positions: print(\"A position threshold was exqqceeded\") self.move_arm(-actions) forbidden_action = True", "was exqqceeded\") self.move_arm(-actions) forbidden_action = True self.render() if self.move_robot: self.move_to_pos()", "sum(list(self.lengths.values())[1:]) r = random.uniform(0.8*max_length,max_length) theta = random.uniform(-np.pi/4, np.pi/2) x =", "member_index in range(1,self.num_members): self.max_angles[member_index] = self.angles[member_index - 1] + 90", "max_length = sum(list(self.lengths.values())[1:]) r = random.uniform(0.8*max_length,max_length) theta = random.uniform(-np.pi/4, np.pi/2)", "self.joint_positions[1][0] + self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] + self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1]))", "165, 255), -1) goal_x, goal_y = self.goal_coords self.img = cv2.circle(self.img,", "= 30 self.img_width = 1000 self.x_offset = int(self.img_width/2) self.y_offset =", "1] - 90 self.update_positions() self.update_distance_2_goal() def render(self): self.img = np.zeros((self.img_height,", "robot\") return True else: print(\"Failed to connect to robot\") return", "+ str(round(self.distance2goal,2)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) cv2.imshow(\"Arm\", self.img)", "for angle_deg in angles_deg] angles_piarm[0] = 1000 - angles_piarm[0] angles_piarm[1]", "is to check that all the joints (except for the", "on\") self.robot.connect(\"/dev/ttyS0\") if self.robot.alive: print(\"Success connecting to robot\") return True", "\"h_0\": 73, \"a\": 97.5, \"b\": 96, \"c\": 160 } self.base_width", "self.move_to_default_pos() def check_arm_angles(self): for member_index in range(self.num_members): if self.angles[member_index] <", "the arm self.img = cv2.rectangle(self.img, (int(self.x_offset - self.base_width/2), self.y_offset), (int(self.x_offset", "125 angles_deg = self.angles - 90 angles_deg[2] -= angles_deg[1] angles_deg[1]", "raise Exception(\"Robot is already switched off\") self.robot.disconnect() if not self.robot.alive:", "r * np.cos(theta) self.goal_coords = [int(x), int(y)] def update_distance_2_goal(self): gripper_pos", "= 0 self.update_goal_coords() self.render() if self.move_robot: self.move_to_default_pos() def check_arm_angles(self): for", "angles of motor 3, 4 and 5 ranging between #", "1]), 500) return True else: return False def move_to_pos(self): #", "self.img_height = int(sum(list(self.lengths.values())) + self.y_offset + 20) self.img = np.zeros((self.img_height,", "the ground self.min_joint_heights = [20, 20, 10] self.goal_coords = [None,", "96, \"c\": 160 } self.base_width = 110 self.base_height = 45", "= 45 # All the angles are with respect to", "self.base_width), self.y_offset + self.base_height), (0, 165, 255), -1) goal_x, goal_y", "= self.angles[member_index - 1] + 90 self.min_angles[member_index] = self.angles[member_index -", "i in range(self.num_members + 1)] self.update_positions() self.distance2goal = None self.update_distance_2_goal()", "for ID in range(1, 7): self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500)", "def __init__(self, move_robot = False): self.move_robot = move_robot if self.move_robot:", "return True else: return False def close_connection(self): if not self.robot.alive:", "self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2])) ] # Convert to integers self.joint_positions =", "(except for the last one) is above # the ground", "= self.get_reward(forbidden_action) self.timestep += 1 is_done = self.timestep >= self.max_timestep", "import numpy as np import cv2 import random class MyArm2D:", "= False okay_angles = self.check_arm_angles() okay_positions = self.check_arm_positions() if not", "-= angles_deg[1] angles_deg[1] -= angles_deg[0] angles_piarm = [int(500 + (375/90)*angle_deg)", "is above # the ground self.min_joint_heights = [20, 20, 10]", "self.img = cv2.putText(self.img, \"Distance: \" + str(round(self.distance2goal,2)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX,", "of the arm self.angles = 90*np.ones(self.num_members) # angles of motor", "height in mm of motor 5's axle self.lengths = {", "range(self.num_members): first_joint = self.joint_positions[member_id].copy() second_joint = self.joint_positions[member_id + 1].copy() first_joint[0]", "prevent it from hitting the base of the arm self.angles", "and 875 # 90 -> 500 # 0 -> 125", "reward_scaling_factor = 1 return - self.distance2goal * reward_scaling_factor def step(self,", "cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1) # Flip image upside down", "between 125 and 875 # 90 -> 500 # 0", "reset(self): self.angles = 90*np.ones(self.num_members) self.update_positions() self.img = np.zeros((self.img_height, self.img_width, 3))", "self.angles[member_index] < self.min_angles[member_index]: return False if self.angles[member_index] > self.max_angles[member_index]: return", "def step(self, actions): self.move_arm(actions) forbidden_action = False okay_angles = self.check_arm_angles()", "True else: return False def move_to_pos(self): # First, convert the", "def reset(self): self.angles = 90*np.ones(self.num_members) self.update_positions() self.img = np.zeros((self.img_height, self.img_width,", "None self.update_distance_2_goal() def __del__(self): print(\"Closing connection...\") if self.move_robot: self.close_connection() def", "with respect to the vertical self.max_angles = [90 for _", "print(\"Angles in degrees: \", angles_deg) print(\"Moving arms with angles: \",", "if self.angles[member_index] < self.min_angles[member_index]: return False if self.angles[member_index] > self.max_angles[member_index]:", "np.cos(theta) self.goal_coords = [int(x), int(y)] def update_distance_2_goal(self): gripper_pos = self.joint_positions[-1]", "(255,255,0), -1) # Flip image upside down self.img = cv2.flip(self.img,", "step(self, actions): self.move_arm(actions) forbidden_action = False okay_angles = self.check_arm_angles() okay_positions", "are the new set of angles [theta0, theta1, theta2] \"\"\"", "self.DEFAULT = [500, 500, 500, 500, 500, 500] self.num_members =", "True else: print(\"Failed to connect to robot\") return False def", "= self.joint_positions[member_id].copy() second_joint = self.joint_positions[member_id + 1].copy() first_joint[0] += self.x_offset", "angles are with respect to the vertical self.max_angles = [90", "angles_piarm[1] = 1000 - angles_piarm[1] print(\"Angles in degrees: \", angles_deg)", "= [int(x), int(y)] def update_distance_2_goal(self): gripper_pos = self.joint_positions[-1] self.distance2goal =", "if it is away from the origin. \"\"\" self.joint_positions[0] =", "first_joint[1] += self.y_offset second_joint[0] += self.x_offset second_joint[1] += self.y_offset self.img", "str(round(self.distance2goal,2)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50)", "for joint_index in range(1,len(self.joint_positions)): member_pos = self.joint_positions[joint_index][1] min_height = self.min_joint_heights[joint_index-1]", "< min_height: return False return True def get_reward(self, forbidden_action): if", "= [3,4,5] self.initial_height = 73 # height in mm of", "# to angles between 125 and 875 # 90 ->", "self.img_width, 3)) # Render the floor self.img = cv2.rectangle(self.img, (0,0),", "3)) self.timestep = 0 self.update_goal_coords() self.render() if self.move_robot: self.move_to_default_pos() def", "90 self.update_positions() self.update_distance_2_goal() def render(self): self.img = np.zeros((self.img_height, self.img_width, 3))", "already switched on\") self.robot.connect(\"/dev/ttyS0\") if self.robot.alive: print(\"Success connecting to robot\")", "check that all the joints (except for the last one)", "def render(self): self.img = np.zeros((self.img_height, self.img_width, 3)) # Render the", "np import cv2 import random class MyArm2D: def __init__(self, move_robot", "forbidden_action = True self.render() if self.move_robot: self.move_to_pos() r = self.get_reward(forbidden_action)", "return - self.distance2goal * reward_scaling_factor def step(self, actions): self.move_arm(actions) forbidden_action", "self.angles - 90 angles_deg[2] -= angles_deg[1] angles_deg[1] -= angles_deg[0] angles_piarm", "in range(self.num_members + 1)] self.update_positions() self.distance2goal = None self.update_distance_2_goal() def", "it from hitting the base of the arm self.angles =", "- 3]), 500) time.sleep(1) return True else: return False def", "image upside down self.img = cv2.flip(self.img, 0) self.img = cv2.putText(self.img,", "range(3, 6): self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]), 500) time.sleep(1)", "in self.joint_positions] def move_arm(self, actions): \"\"\" The inputs are the", "The inputs are the new set of angles [theta0, theta1,", "= [None, None] self.update_goal_coords() self.joint_positions = [[0,0] for i in", "{ \"h_0\": 73, \"a\": 97.5, \"b\": 96, \"c\": 160 }", "False return True def check_arm_positions(self): for joint_index in range(1,len(self.joint_positions)): member_pos", "angles [theta0, theta1, theta2] \"\"\" for i, action in enumerate(actions):", "+ 90 self.min_angles[member_index] = self.angles[member_index - 1] - 90 self.update_positions()", "if member_pos < min_height: return False return True def get_reward(self,", "min_angle and max_angle self.member_thickness = 30 self.img_width = 1000 self.x_offset", "np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in range(2)])) def update_positions(self): \"\"\"", "forbidden_action = False okay_angles = self.check_arm_angles() okay_positions = self.check_arm_positions() if", "5. It is positive if it is away from the", "self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] + self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2])) ] #", "second_joint = self.joint_positions[member_id + 1].copy() first_joint[0] += self.x_offset first_joint[1] +=", "if not okay_positions: print(\"A position threshold was exqqceeded\") self.move_arm(-actions) forbidden_action", "check_arm_angles(self): for member_index in range(self.num_members): if self.angles[member_index] < self.min_angles[member_index]: return", "All the angles are with respect to the vertical self.max_angles", "= move_robot if self.move_robot: self.robot = piarm.PiArm() self.open_connection() self.DEFAULT =", "73 # height in mm of motor 5's axle self.lengths", "np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.max_timestep = 200 #", "away from the origin. \"\"\" self.joint_positions[0] = [0, self.lengths[\"h_0\"]] self.joint_positions[1]", "= cv2.putText(self.img, \"Distance: \" + str(round(self.distance2goal,2)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,", "# All the angles are with respect to the vertical", "if not self.robot.alive: print(\"Success disconnecting from robot\") return True else:", "gripper_pos = self.joint_positions[-1] self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i", "1 is_done = self.timestep >= self.max_timestep return self.angles, r, is_done", "= r * np.cos(theta) self.goal_coords = [int(x), int(y)] def update_distance_2_goal(self):", "+ self.base_height), (0, 165, 255), -1) goal_x, goal_y = self.goal_coords", "angles_piarm[0] = 1000 - angles_piarm[0] angles_piarm[1] = 1000 - angles_piarm[1]", "2) cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50) def reset(self): self.angles = 90*np.ones(self.num_members) self.update_positions()", "self.min_angles[0] = 0 # To prevent it from hitting the", "update_distance_2_goal(self): gripper_pos = self.joint_positions[-1] self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for", "print(\"An angle threshold was exceeded\") self.move_arm(-actions) forbidden_action = True if", "right underneath motor 5. It is positive if it is", "-> 125 angles_deg = self.angles - 90 angles_deg[2] -= angles_deg[1]", "arm self.angles = 90*np.ones(self.num_members) # angles of motor 3, 4", "print(\"Moving arms with angles: \", angles_piarm) if self.robot.alive: for ID", "self.goal_coords = [int(x), int(y)] def update_distance_2_goal(self): gripper_pos = self.joint_positions[-1] self.distance2goal", "= self.lengths[\"h_0\"] self.img_height = int(sum(list(self.lengths.values())) + self.y_offset + 20) self.img", "-> 500 # 0 -> 125 angles_deg = self.angles -", "= cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness) self.img = cv2.circle(self.img, tuple(first_joint),", "return True def check_arm_positions(self): for joint_index in range(1,len(self.joint_positions)): member_pos =", "+ 1)] self.update_positions() self.distance2goal = None self.update_distance_2_goal() def __del__(self): print(\"Closing", "_ in range(self.num_members)] self.min_angles = [-90 for _ in range(self.num_members)]", "piarm.PiArm() self.open_connection() self.DEFAULT = [500, 500, 500, 500, 500, 500]", "the last one) is above # the ground self.min_joint_heights =", "= [20, 20, 10] self.goal_coords = [None, None] self.update_goal_coords() self.joint_positions", "degrees: \", angles_deg) print(\"Moving arms with angles: \", angles_piarm) if", "was exceeded\") self.move_arm(-actions) forbidden_action = True if not okay_positions: print(\"A", "30 self.img_width = 1000 self.x_offset = int(self.img_width/2) self.y_offset = self.lengths[\"h_0\"]", "i in range(2)])) def update_positions(self): \"\"\" Positions are with respect", "enumerate(actions): self.angles[i:] += action for member_index in range(1,self.num_members): self.max_angles[member_index] =", "# To prevent it from hitting the base of the", "new set of angles [theta0, theta1, theta2] \"\"\" for i,", "action in enumerate(actions): self.angles[i:] += action for member_index in range(1,self.num_members):", "* np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] + self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2])) ] # Convert", "160 } self.base_width = 110 self.base_height = 45 # All", "self.render() if self.move_robot: self.move_to_pos() r = self.get_reward(forbidden_action) self.timestep += 1", "= None self.update_distance_2_goal() def __del__(self): print(\"Closing connection...\") if self.move_robot: self.close_connection()", "= [0, self.lengths[\"h_0\"]] self.joint_positions[1] = [ self.joint_positions[0][0] + self.lengths[\"a\"] *", "+ (375/90)*angle_deg) for angle_deg in angles_deg] angles_piarm[0] = 1000 -", "angles: \", angles_piarm) if self.robot.alive: for ID in range(3, 6):", "= False): self.move_robot = move_robot if self.move_robot: self.robot = piarm.PiArm()", "self.update_positions() self.distance2goal = None self.update_distance_2_goal() def __del__(self): print(\"Closing connection...\") if", "for member_index in range(self.num_members): if self.angles[member_index] < self.min_angles[member_index]: return False", "respect to the vertical self.max_angles = [90 for _ in", "5 ranging between # min_angle and max_angle self.member_thickness = 30", "np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] + self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2] = [", "90 angles_deg[2] -= angles_deg[1] angles_deg[1] -= angles_deg[0] angles_piarm = [int(500", "self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3] = [ self.joint_positions[2][0] + self.lengths[\"c\"]", "Render the floor self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0),", "= self.min_joint_heights[joint_index-1] if member_pos < min_height: return False return True", "+ self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5) for member_id in", "is already switched on\") self.robot.connect(\"/dev/ttyS0\") if self.robot.alive: print(\"Success connecting to", "are with respect to the vertical self.max_angles = [90 for", "to the vertical self.max_angles = [90 for _ in range(self.num_members)]", "are with respect to the origin (0,0), right underneath motor", "[ self.joint_positions[2][0] + self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] + self.lengths[\"c\"] *", "= 90*np.ones(self.num_members) self.update_positions() self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep =", "return False def update_goal_coords(self): max_length = sum(list(self.lengths.values())[1:]) r = random.uniform(0.8*max_length,max_length)", "45 # All the angles are with respect to the", "False def close_connection(self): if not self.robot.alive: raise Exception(\"Robot is already", "in degrees between -90º and +90º # to angles between", "angle threshold was exceeded\") self.move_arm(-actions) forbidden_action = True if not", "angles_piarm[0] angles_piarm[1] = 1000 - angles_piarm[1] print(\"Angles in degrees: \",", "= int(sum(list(self.lengths.values())) + self.y_offset + 20) self.img = np.zeros((self.img_height, self.img_width,", "cv2.flip(self.img, 0) self.img = cv2.putText(self.img, \"Distance: \" + str(round(self.distance2goal,2)), (10,", "self.goal_coords[i])**2 for i in range(2)])) def update_positions(self): \"\"\" Positions are", "angles_deg[0] angles_piarm = [int(500 + (375/90)*angle_deg) for angle_deg in angles_deg]", "(0,0), right underneath motor 5. It is positive if it", "theta2] \"\"\" for i, action in enumerate(actions): self.angles[i:] += action", "np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] + self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3] = [", "# height in mm of motor 5's axle self.lengths =", "in range(1,self.num_members): self.max_angles[member_index] = self.angles[member_index - 1] + 90 self.min_angles[member_index]", "* np.sin(theta) y = r * np.cos(theta) self.goal_coords = [int(x),", "500, 500, 500, 500, 500] self.num_members = 3 self.adjustable_joints =", "angles_deg[1] -= angles_deg[0] angles_piarm = [int(500 + (375/90)*angle_deg) for angle_deg", "-1) # Flip image upside down self.img = cv2.flip(self.img, 0)", "= self.joint_positions[-1] self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in", "[ self.joint_positions[0][0] + self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] + self.lengths[\"a\"] *", "90 self.min_angles[member_index] = self.angles[member_index - 1] - 90 self.update_positions() self.update_distance_2_goal()", "[[0,0] for i in range(self.num_members + 1)] self.update_positions() self.distance2goal =", "# 0 -> 125 angles_deg = self.angles - 90 angles_deg[2]", "self.y_offset = self.lengths[\"h_0\"] self.img_height = int(sum(list(self.lengths.values())) + self.y_offset + 20)", "get_reward(self, forbidden_action): if forbidden_action: reward_scaling_factor = 2 else: reward_scaling_factor =", "actions): self.move_arm(actions) forbidden_action = False okay_angles = self.check_arm_angles() okay_positions =", "exqqceeded\") self.move_arm(-actions) forbidden_action = True self.render() if self.move_robot: self.move_to_pos() r", "+= 1 is_done = self.timestep >= self.max_timestep return self.angles, r,", "return False return True def check_arm_positions(self): for joint_index in range(1,len(self.joint_positions)):", "to integers self.joint_positions = [[int(x[0]),int(x[1])] for x in self.joint_positions] def", "= 1000 self.x_offset = int(self.img_width/2) self.y_offset = self.lengths[\"h_0\"] self.img_height =", "self.joint_positions[member_id].copy() second_joint = self.joint_positions[member_id + 1].copy() first_joint[0] += self.x_offset first_joint[1]", "self.max_angles[member_index] = self.angles[member_index - 1] + 90 self.min_angles[member_index] = self.angles[member_index", "500, 500, 500] self.num_members = 3 self.adjustable_joints = [3,4,5] self.initial_height", "np.sin(theta) y = r * np.cos(theta) self.goal_coords = [int(x), int(y)]", "73, \"a\": 97.5, \"b\": 96, \"c\": 160 } self.base_width =", "self.robot.connect(\"/dev/ttyS0\") if self.robot.alive: print(\"Success connecting to robot\") return True else:", "x in self.joint_positions] def move_arm(self, actions): \"\"\" The inputs are", "range(1, 7): self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500) return True else:", "from robot\") return False def update_goal_coords(self): max_length = sum(list(self.lengths.values())[1:]) r", "cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1) # Render the base", "the arm self.angles = 90*np.ones(self.num_members) # angles of motor 3,", "forbidden_action): if forbidden_action: reward_scaling_factor = 2 else: reward_scaling_factor = 1", "self.angles = 90*np.ones(self.num_members) # angles of motor 3, 4 and", "self.lengths[\"h_0\"]] self.joint_positions[1] = [ self.joint_positions[0][0] + self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1]", "cv2 import random class MyArm2D: def __init__(self, move_robot = False):", "False okay_angles = self.check_arm_angles() okay_positions = self.check_arm_positions() if not okay_angles:", "= np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.max_timestep = 200", "connecting to robot\") return True else: print(\"Failed to connect to", "in range(self.num_members): if self.angles[member_index] < self.min_angles[member_index]: return False if self.angles[member_index]", "* np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2] = [ self.joint_positions[1][0] + self.lengths[\"b\"] *", "+= self.y_offset self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness) self.img", "- angles_piarm[0] angles_piarm[1] = 1000 - angles_piarm[1] print(\"Angles in degrees:", "(int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset + self.base_height), (0, 165,", "self.goal_coords = [None, None] self.update_goal_coords() self.joint_positions = [[0,0] for i", "self.joint_positions[1][1] + self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3] = [ self.joint_positions[2][0]", "def update_goal_coords(self): max_length = sum(list(self.lengths.values())[1:]) r = random.uniform(0.8*max_length,max_length) theta =", "\"\"\" The inputs are the new set of angles [theta0,", "(375/90)*angle_deg) for angle_deg in angles_deg] angles_piarm[0] = 1000 - angles_piarm[0]", "range(self.num_members)] self.min_angles[0] = 0 # To prevent it from hitting", "7): self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500) return True else: return", "= [ self.joint_positions[1][0] + self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] + self.lengths[\"b\"]", "+= self.x_offset first_joint[1] += self.y_offset second_joint[0] += self.x_offset second_joint[1] +=", "1] + 90 self.min_angles[member_index] = self.angles[member_index - 1] - 90", "self.distance2goal = None self.update_distance_2_goal() def __del__(self): print(\"Closing connection...\") if self.move_robot:", "= [ self.joint_positions[0][0] + self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] + self.lengths[\"a\"]", "= self.goal_coords self.img = cv2.circle(self.img, (goal_x + self.x_offset, goal_y +", "to robot\") return True else: print(\"Failed to connect to robot\")", "(255,0,0), self.member_thickness) self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1) #", "forbidden_action: reward_scaling_factor = 2 else: reward_scaling_factor = 1 return -", "for _ in range(self.num_members)] self.min_angles = [-90 for _ in", "Convert to integers self.joint_positions = [[int(x[0]),int(x[1])] for x in self.joint_positions]", "as np import cv2 import random class MyArm2D: def __init__(self,", "if self.move_robot: self.move_to_pos() r = self.get_reward(forbidden_action) self.timestep += 1 is_done", "(255,255,255), 2) cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50) def reset(self): self.angles = 90*np.ones(self.num_members)", "+ 1].copy() first_joint[0] += self.x_offset first_joint[1] += self.y_offset second_joint[0] +=", "# This is to check that all the joints (except", "np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2] = [ self.joint_positions[1][0] + self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])),", "angles in degrees between -90º and +90º # to angles", "self.robot.disconnect() if not self.robot.alive: print(\"Success disconnecting from robot\") return True", "for member_index in range(1,self.num_members): self.max_angles[member_index] = self.angles[member_index - 1] +", "self.min_joint_heights[joint_index-1] if member_pos < min_height: return False return True def", "off\") self.robot.disconnect() if not self.robot.alive: print(\"Success disconnecting from robot\") return", "= 3 self.adjustable_joints = [3,4,5] self.initial_height = 73 # height", "move_to_default_pos(self): if self.robot.alive: for ID in range(1, 7): self.robot.servoWrite(ID, int(self.DEFAULT[ID", "0 self.update_goal_coords() self.render() if self.move_robot: self.move_to_default_pos() def check_arm_angles(self): for member_index", "that all the joints (except for the last one) is", "if self.move_robot: self.close_connection() def open_connection(self): if self.robot.alive: raise Exception(\"Robot is", "int(self.member_thickness/2), (255,255,0), -1) # Flip image upside down self.img =", "self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]), 500) time.sleep(1) return True", "= random.uniform(0.8*max_length,max_length) theta = random.uniform(-np.pi/4, np.pi/2) x = r *", "Render the base of the arm self.img = cv2.rectangle(self.img, (int(self.x_offset", "and +90º # to angles between 125 and 875 #", "and 5 ranging between # min_angle and max_angle self.member_thickness =", "reward_scaling_factor def step(self, actions): self.move_arm(actions) forbidden_action = False okay_angles =", "def check_arm_positions(self): for joint_index in range(1,len(self.joint_positions)): member_pos = self.joint_positions[joint_index][1] min_height", "okay_angles = self.check_arm_angles() okay_positions = self.check_arm_positions() if not okay_angles: print(\"An", "[int(500 + (375/90)*angle_deg) for angle_deg in angles_deg] angles_piarm[0] = 1000", "range(self.num_members + 1)] self.update_positions() self.distance2goal = None self.update_distance_2_goal() def __del__(self):", "return False if self.angles[member_index] > self.max_angles[member_index]: return False return True", "- self.distance2goal * reward_scaling_factor def step(self, actions): self.move_arm(actions) forbidden_action =", "the angles are with respect to the vertical self.max_angles =", "self.joint_positions[1] = [ self.joint_positions[0][0] + self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] +", "= np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.update_goal_coords() self.render() if", "1].copy() first_joint[0] += self.x_offset first_joint[1] += self.y_offset second_joint[0] += self.x_offset", "in range(1, 7): self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500) return True", "] self.joint_positions[2] = [ self.joint_positions[1][0] + self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1]", "to connect to robot\") return False def move_to_default_pos(self): if self.robot.alive:", "for x in self.joint_positions] def move_arm(self, actions): \"\"\" The inputs", "= np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in range(2)])) def update_positions(self):", "else: return False def move_to_pos(self): # First, convert the angles", "= self.joint_positions[member_id + 1].copy() first_joint[0] += self.x_offset first_joint[1] += self.y_offset", "= True if not okay_positions: print(\"A position threshold was exqqceeded\")", "np.pi/2) x = r * np.sin(theta) y = r *", "print(\"Failed to connect to robot\") return False def move_to_default_pos(self): if", "close_connection(self): if not self.robot.alive: raise Exception(\"Robot is already switched off\")", "self.member_thickness) self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1) # Flip", "self.update_positions() self.update_distance_2_goal() def render(self): self.img = np.zeros((self.img_height, self.img_width, 3)) #", "= self.check_arm_angles() okay_positions = self.check_arm_positions() if not okay_angles: print(\"An angle", "+ self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] + self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2])) ]", "set of angles [theta0, theta1, theta2] \"\"\" for i, action", "This is to check that all the joints (except for", "* np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3] = [ self.joint_positions[2][0] + self.lengths[\"c\"] *", "the base of the arm self.img = cv2.rectangle(self.img, (int(self.x_offset -", "convert the angles in degrees between -90º and +90º #", "* np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] + self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3] =", "self.robot.alive: print(\"Success disconnecting from robot\") return True else: print(\"Failed to", "move_robot = False): self.move_robot = move_robot if self.move_robot: self.robot =", "self.y_offset self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness) self.img =", "int(y)] def update_distance_2_goal(self): gripper_pos = self.joint_positions[-1] self.distance2goal = np.sqrt(sum([(gripper_pos[i] -", "+ self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] + self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0])) ]", "self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.update_goal_coords() self.render()", "for ID in range(3, 6): self.robot.servoWrite(8 - ID, int(angles_piarm[ID -", "True if not okay_positions: print(\"A position threshold was exqqceeded\") self.move_arm(-actions)", "0 # To prevent it from hitting the base of", "if forbidden_action: reward_scaling_factor = 2 else: reward_scaling_factor = 1 return", "print(\"Closing connection...\") if self.move_robot: self.close_connection() def open_connection(self): if self.robot.alive: raise", "range(2)])) def update_positions(self): \"\"\" Positions are with respect to the", "int(self.DEFAULT[ID - 1]), 500) return True else: return False def", "int(angles_piarm[ID - 3]), 500) time.sleep(1) return True else: return False", "= [ self.joint_positions[2][0] + self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] + self.lengths[\"c\"]", "= [90 for _ in range(self.num_members)] self.min_angles = [-90 for", "# Render the floor self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset),", "= sum(list(self.lengths.values())[1:]) r = random.uniform(0.8*max_length,max_length) theta = random.uniform(-np.pi/4, np.pi/2) x", "self.joint_positions[member_id + 1].copy() first_joint[0] += self.x_offset first_joint[1] += self.y_offset second_joint[0]", "To prevent it from hitting the base of the arm", "import time import numpy as np import cv2 import random", "ID, int(angles_piarm[ID - 3]), 500) time.sleep(1) return True else: return", "self.joint_positions = [[int(x[0]),int(x[1])] for x in self.joint_positions] def move_arm(self, actions):", "it is away from the origin. \"\"\" self.joint_positions[0] = [0,", "x = r * np.sin(theta) y = r * np.cos(theta)", "member_index in range(self.num_members): if self.angles[member_index] < self.min_angles[member_index]: return False if", "__del__(self): print(\"Closing connection...\") if self.move_robot: self.close_connection() def open_connection(self): if self.robot.alive:", "in range(1,len(self.joint_positions)): member_pos = self.joint_positions[joint_index][1] min_height = self.min_joint_heights[joint_index-1] if member_pos", "return False def move_to_default_pos(self): if self.robot.alive: for ID in range(1,", "angle_deg in angles_deg] angles_piarm[0] = 1000 - angles_piarm[0] angles_piarm[1] =", "disconnecting from robot\") return True else: print(\"Failed to disconnect from", "update_goal_coords(self): max_length = sum(list(self.lengths.values())[1:]) r = random.uniform(0.8*max_length,max_length) theta = random.uniform(-np.pi/4,", "self.adjustable_joints = [3,4,5] self.initial_height = 73 # height in mm", "self.joint_positions[0] = [0, self.lengths[\"h_0\"]] self.joint_positions[1] = [ self.joint_positions[0][0] + self.lengths[\"a\"]", "range(self.num_members): if self.angles[member_index] < self.min_angles[member_index]: return False if self.angles[member_index] >", "joint_index in range(1,len(self.joint_positions)): member_pos = self.joint_positions[joint_index][1] min_height = self.min_joint_heights[joint_index-1] if", "self.joint_positions[2][0] + self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] + self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2]))", "0, 128), 5) for member_id in range(self.num_members): first_joint = self.joint_positions[member_id].copy()", "is away from the origin. \"\"\" self.joint_positions[0] = [0, self.lengths[\"h_0\"]]", "np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3] = [ self.joint_positions[2][0] + self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])),", "False def update_goal_coords(self): max_length = sum(list(self.lengths.values())[1:]) r = random.uniform(0.8*max_length,max_length) theta", "= self.angles[member_index - 1] - 90 self.update_positions() self.update_distance_2_goal() def render(self):", "= [int(500 + (375/90)*angle_deg) for angle_deg in angles_deg] angles_piarm[0] =", "time.sleep(1) return True else: return False def close_connection(self): if not", "int(self.img_width/2) self.y_offset = self.lengths[\"h_0\"] self.img_height = int(sum(list(self.lengths.values())) + self.y_offset +", "r = random.uniform(0.8*max_length,max_length) theta = random.uniform(-np.pi/4, np.pi/2) x = r", "True def get_reward(self, forbidden_action): if forbidden_action: reward_scaling_factor = 2 else:", "tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness) self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0),", "(128, 0, 128), 5) for member_id in range(self.num_members): first_joint =", "self.joint_positions[2][1] + self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2])) ] # Convert to integers", "of motor 3, 4 and 5 ranging between # min_angle", "ground self.min_joint_heights = [20, 20, 10] self.goal_coords = [None, None]", "else: return False def close_connection(self): if not self.robot.alive: raise Exception(\"Robot", "the new set of angles [theta0, theta1, theta2] \"\"\" for", "the base of the arm self.angles = 90*np.ones(self.num_members) # angles", "def check_arm_angles(self): for member_index in range(self.num_members): if self.angles[member_index] < self.min_angles[member_index]:", "y = r * np.cos(theta) self.goal_coords = [int(x), int(y)] def", "False return True def get_reward(self, forbidden_action): if forbidden_action: reward_scaling_factor =", "if self.robot.alive: raise Exception(\"Robot is already switched on\") self.robot.connect(\"/dev/ttyS0\") if", "self.max_angles[member_index]: return False return True def check_arm_positions(self): for joint_index in", "in range(self.num_members)] self.min_angles = [-90 for _ in range(self.num_members)] self.min_angles[0]", "degrees between -90º and +90º # to angles between 125", "0) self.img = cv2.putText(self.img, \"Distance: \" + str(round(self.distance2goal,2)), (10, 30),", "to disconnect from robot\") return False def update_goal_coords(self): max_length =", "joints (except for the last one) is above # the", "5's axle self.lengths = { \"h_0\": 73, \"a\": 97.5, \"b\":", "self.joint_positions[-1] self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in range(2)]))", "+ self.y_offset + 20) self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep", "self.y_offset + 20) self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep =", "97.5, \"b\": 96, \"c\": 160 } self.base_width = 110 self.base_height", "for the last one) is above # the ground self.min_joint_heights", "return True def get_reward(self, forbidden_action): if forbidden_action: reward_scaling_factor = 2", "move_to_pos(self): # First, convert the angles in degrees between -90º", "- 1] + 90 self.min_angles[member_index] = self.angles[member_index - 1] -", "if self.robot.alive: print(\"Success connecting to robot\") return True else: print(\"Failed", "range(1,len(self.joint_positions)): member_pos = self.joint_positions[joint_index][1] min_height = self.min_joint_heights[joint_index-1] if member_pos <", "in range(2)])) def update_positions(self): \"\"\" Positions are with respect to", "90*np.ones(self.num_members) # angles of motor 3, 4 and 5 ranging", "ID in range(1, 7): self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500) return", "base of the arm self.img = cv2.rectangle(self.img, (int(self.x_offset - self.base_width/2),", "self.angles[member_index - 1] - 90 self.update_positions() self.update_distance_2_goal() def render(self): self.img", "self.min_angles[member_index] = self.angles[member_index - 1] - 90 self.update_positions() self.update_distance_2_goal() def", "True def check_arm_positions(self): for joint_index in range(1,len(self.joint_positions)): member_pos = self.joint_positions[joint_index][1]", "import random class MyArm2D: def __init__(self, move_robot = False): self.move_robot", "angles_piarm = [int(500 + (375/90)*angle_deg) for angle_deg in angles_deg] angles_piarm[0]", "self.move_arm(-actions) forbidden_action = True self.render() if self.move_robot: self.move_to_pos() r =", "1000 - angles_piarm[1] print(\"Angles in degrees: \", angles_deg) print(\"Moving arms", "= 0 self.max_timestep = 200 # This is to check", "in degrees: \", angles_deg) print(\"Moving arms with angles: \", angles_piarm)", "\"\"\" for i, action in enumerate(actions): self.angles[i:] += action for", "cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness) self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2),", "vertical self.max_angles = [90 for _ in range(self.num_members)] self.min_angles =", "self.angles[member_index - 1] + 90 self.min_angles[member_index] = self.angles[member_index - 1]", "- self.goal_coords[i])**2 for i in range(2)])) def update_positions(self): \"\"\" Positions", "self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1) # Flip image", "\", angles_piarm) if self.robot.alive: for ID in range(3, 6): self.robot.servoWrite(8", "disconnect from robot\") return False def update_goal_coords(self): max_length = sum(list(self.lengths.values())[1:])", "self.joint_positions[3] = [ self.joint_positions[2][0] + self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] +", "-90º and +90º # to angles between 125 and 875", "max_angle self.member_thickness = 30 self.img_width = 1000 self.x_offset = int(self.img_width/2)", "= self.angles - 90 angles_deg[2] -= angles_deg[1] angles_deg[1] -= angles_deg[0]", "= [[int(x[0]),int(x[1])] for x in self.joint_positions] def move_arm(self, actions): \"\"\"", "None] self.update_goal_coords() self.joint_positions = [[0,0] for i in range(self.num_members +", "- 90 self.update_positions() self.update_distance_2_goal() def render(self): self.img = np.zeros((self.img_height, self.img_width,", "+ self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] + self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1])) ]", "cv2.putText(self.img, \"Distance: \" + str(round(self.distance2goal,2)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),", "connect to robot\") return False def move_to_default_pos(self): if self.robot.alive: for", "inputs are the new set of angles [theta0, theta1, theta2]", "self.robot = piarm.PiArm() self.open_connection() self.DEFAULT = [500, 500, 500, 500,", "20) self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.max_timestep", "self.y_offset), (0,255,0), -1) # Render the base of the arm", "def open_connection(self): if self.robot.alive: raise Exception(\"Robot is already switched on\")", "# 90 -> 500 # 0 -> 125 angles_deg =", "self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0 self.max_timestep =", "to robot\") return False def move_to_default_pos(self): if self.robot.alive: for ID", "= int(self.img_width/2) self.y_offset = self.lengths[\"h_0\"] self.img_height = int(sum(list(self.lengths.values())) + self.y_offset", "= 0 # To prevent it from hitting the base", "raise Exception(\"Robot is already switched on\") self.robot.connect(\"/dev/ttyS0\") if self.robot.alive: print(\"Success", "def close_connection(self): if not self.robot.alive: raise Exception(\"Robot is already switched", "positive if it is away from the origin. \"\"\" self.joint_positions[0]", "else: print(\"Failed to disconnect from robot\") return False def update_goal_coords(self):", "self.joint_positions[2] = [ self.joint_positions[1][0] + self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] +", "first_joint[0] += self.x_offset first_joint[1] += self.y_offset second_joint[0] += self.x_offset second_joint[1]", "second_joint[1] += self.y_offset self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness)", "self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1) # Render", "[-90 for _ in range(self.num_members)] self.min_angles[0] = 0 # To", "MyArm2D: def __init__(self, move_robot = False): self.move_robot = move_robot if", "self.robot.alive: print(\"Success connecting to robot\") return True else: print(\"Failed to", "self.initial_height = 73 # height in mm of motor 5's", "not self.robot.alive: raise Exception(\"Robot is already switched off\") self.robot.disconnect() if", "member_pos = self.joint_positions[joint_index][1] min_height = self.min_joint_heights[joint_index-1] if member_pos < min_height:", "[None, None] self.update_goal_coords() self.joint_positions = [[0,0] for i in range(self.num_members", "int(sum(list(self.lengths.values())) + self.y_offset + 20) self.img = np.zeros((self.img_height, self.img_width, 3))", "self.get_reward(forbidden_action) self.timestep += 1 is_done = self.timestep >= self.max_timestep return", "500) return True else: return False def move_to_pos(self): # First,", "render(self): self.img = np.zeros((self.img_height, self.img_width, 3)) # Render the floor", "self.move_arm(-actions) forbidden_action = True if not okay_positions: print(\"A position threshold", "[ self.joint_positions[1][0] + self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] + self.lengths[\"b\"] *", "min_height: return False return True def get_reward(self, forbidden_action): if forbidden_action:", "motor 5's axle self.lengths = { \"h_0\": 73, \"a\": 97.5,", "False def move_to_default_pos(self): if self.robot.alive: for ID in range(1, 7):", "def get_reward(self, forbidden_action): if forbidden_action: reward_scaling_factor = 2 else: reward_scaling_factor", "875 # 90 -> 500 # 0 -> 125 angles_deg", "import cv2 import random class MyArm2D: def __init__(self, move_robot =", "theta1, theta2] \"\"\" for i, action in enumerate(actions): self.angles[i:] +=", "self.timestep = 0 self.max_timestep = 200 # This is to", "member_id in range(self.num_members): first_joint = self.joint_positions[member_id].copy() second_joint = self.joint_positions[member_id +", "= cv2.circle(self.img, (goal_x + self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128,", "First, convert the angles in degrees between -90º and +90º", "= 1000 - angles_piarm[1] print(\"Angles in degrees: \", angles_deg) print(\"Moving", "random.uniform(0.8*max_length,max_length) theta = random.uniform(-np.pi/4, np.pi/2) x = r * np.sin(theta)", "self.timestep = 0 self.update_goal_coords() self.render() if self.move_robot: self.move_to_default_pos() def check_arm_angles(self):", "self.update_goal_coords() self.joint_positions = [[0,0] for i in range(self.num_members + 1)]", "self.lengths[\"h_0\"] self.img_height = int(sum(list(self.lengths.values())) + self.y_offset + 20) self.img =", "def move_to_pos(self): # First, convert the angles in degrees between", "= [500, 500, 500, 500, 500, 500] self.num_members = 3", "self.robot.alive: for ID in range(3, 6): self.robot.servoWrite(8 - ID, int(angles_piarm[ID", "self.angles[i:] += action for member_index in range(1,self.num_members): self.max_angles[member_index] = self.angles[member_index", "1000 self.x_offset = int(self.img_width/2) self.y_offset = self.lengths[\"h_0\"] self.img_height = int(sum(list(self.lengths.values()))", "in range(3, 6): self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]), 500)", "for i, action in enumerate(actions): self.angles[i:] += action for member_index", "self.joint_positions] def move_arm(self, actions): \"\"\" The inputs are the new", "= 110 self.base_height = 45 # All the angles are", "False def move_to_pos(self): # First, convert the angles in degrees", "robot\") return True else: print(\"Failed to disconnect from robot\") return", "] self.joint_positions[3] = [ self.joint_positions[2][0] + self.lengths[\"c\"] * np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1]", "self.check_arm_positions() if not okay_angles: print(\"An angle threshold was exceeded\") self.move_arm(-actions)", "500, 500] self.num_members = 3 self.adjustable_joints = [3,4,5] self.initial_height =", "of angles [theta0, theta1, theta2] \"\"\" for i, action in", "self.num_members = 3 self.adjustable_joints = [3,4,5] self.initial_height = 73 #", "self.base_width/2), self.y_offset), (int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset + self.base_height),", "return True else: print(\"Failed to connect to robot\") return False", "+90º # to angles between 125 and 875 # 90", "self.img = np.zeros((self.img_height, self.img_width, 3)) # Render the floor self.img", "threshold was exqqceeded\") self.move_arm(-actions) forbidden_action = True self.render() if self.move_robot:", "range(self.num_members)] self.min_angles = [-90 for _ in range(self.num_members)] self.min_angles[0] =", "- angles_piarm[1] print(\"Angles in degrees: \", angles_deg) print(\"Moving arms with", "okay_angles: print(\"An angle threshold was exceeded\") self.move_arm(-actions) forbidden_action = True", "1000 - angles_piarm[0] angles_piarm[1] = 1000 - angles_piarm[1] print(\"Angles in", "self.base_height = 45 # All the angles are with respect", "arms with angles: \", angles_piarm) if self.robot.alive: for ID in", "self.distance2goal * reward_scaling_factor def step(self, actions): self.move_arm(actions) forbidden_action = False", "origin. \"\"\" self.joint_positions[0] = [0, self.lengths[\"h_0\"]] self.joint_positions[1] = [ self.joint_positions[0][0]", "print(\"A position threshold was exqqceeded\") self.move_arm(-actions) forbidden_action = True self.render()", "self.img = cv2.flip(self.img, 0) self.img = cv2.putText(self.img, \"Distance: \" +", "10] self.goal_coords = [None, None] self.update_goal_coords() self.joint_positions = [[0,0] for", "+= self.y_offset second_joint[0] += self.x_offset second_joint[1] += self.y_offset self.img =", "= [-90 for _ in range(self.num_members)] self.min_angles[0] = 0 #", "\"\"\" Positions are with respect to the origin (0,0), right", "2 else: reward_scaling_factor = 1 return - self.distance2goal * reward_scaling_factor", "(int(self.x_offset - self.base_width/2), self.y_offset), (int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset", "= r * np.sin(theta) y = r * np.cos(theta) self.goal_coords", "self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2] = [ self.joint_positions[1][0] + self.lengths[\"b\"]", "(self.img_width, self.y_offset), (0,255,0), -1) # Render the base of the", "= 200 # This is to check that all the", "return False return True def get_reward(self, forbidden_action): if forbidden_action: reward_scaling_factor", "okay_positions = self.check_arm_positions() if not okay_angles: print(\"An angle threshold was", "= 2 else: reward_scaling_factor = 1 return - self.distance2goal *", "self.lengths[\"b\"] * np.sin(np.deg2rad(self.angles[1])), self.joint_positions[1][1] + self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3]", "self.update_distance_2_goal() def render(self): self.img = np.zeros((self.img_height, self.img_width, 3)) # Render", "128), 5) for member_id in range(self.num_members): first_joint = self.joint_positions[member_id].copy() second_joint", "hitting the base of the arm self.angles = 90*np.ones(self.num_members) #", "<reponame>DGarciaMedina/PiArmDiego import piarm import time import numpy as np import", "of motor 5's axle self.lengths = { \"h_0\": 73, \"a\":", "- 90 angles_deg[2] -= angles_deg[1] angles_deg[1] -= angles_deg[0] angles_piarm =", "-1) # Render the base of the arm self.img =", "of the arm self.img = cv2.rectangle(self.img, (int(self.x_offset - self.base_width/2), self.y_offset),", "respect to the origin (0,0), right underneath motor 5. It", "self.img_width, 3)) self.timestep = 0 self.max_timestep = 200 # This", "\"b\": 96, \"c\": 160 } self.base_width = 110 self.base_height =", "+ self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2])) ] # Convert to integers self.joint_positions", "] # Convert to integers self.joint_positions = [[int(x[0]),int(x[1])] for x", "(0,0), (self.img_width, self.y_offset), (0,255,0), -1) # Render the base of", "open_connection(self): if self.robot.alive: raise Exception(\"Robot is already switched on\") self.robot.connect(\"/dev/ttyS0\")", "the joints (except for the last one) is above #", "last one) is above # the ground self.min_joint_heights = [20,", "= 90*np.ones(self.num_members) # angles of motor 3, 4 and 5", "self.move_to_pos() r = self.get_reward(forbidden_action) self.timestep += 1 is_done = self.timestep", "(goal_x + self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128, 0, 128),", "125 and 875 # 90 -> 500 # 0 ->", "self.img = cv2.circle(self.img, (goal_x + self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2),", "in angles_deg] angles_piarm[0] = 1000 - angles_piarm[0] angles_piarm[1] = 1000", "(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50) def", "= 1000 - angles_piarm[0] angles_piarm[1] = 1000 - angles_piarm[1] print(\"Angles", "[theta0, theta1, theta2] \"\"\" for i, action in enumerate(actions): self.angles[i:]", "255), -1) goal_x, goal_y = self.goal_coords self.img = cv2.circle(self.img, (goal_x", "angles_deg] angles_piarm[0] = 1000 - angles_piarm[0] angles_piarm[1] = 1000 -", "all the joints (except for the last one) is above", "_ in range(self.num_members)] self.min_angles[0] = 0 # To prevent it", "if self.robot.alive: for ID in range(1, 7): self.robot.servoWrite(ID, int(self.DEFAULT[ID -", "def move_arm(self, actions): \"\"\" The inputs are the new set", "= piarm.PiArm() self.open_connection() self.DEFAULT = [500, 500, 500, 500, 500,", "from the origin. \"\"\" self.joint_positions[0] = [0, self.lengths[\"h_0\"]] self.joint_positions[1] =", "def move_to_default_pos(self): if self.robot.alive: for ID in range(1, 7): self.robot.servoWrite(ID,", "* reward_scaling_factor def step(self, actions): self.move_arm(actions) forbidden_action = False okay_angles", "threshold was exceeded\") self.move_arm(-actions) forbidden_action = True if not okay_positions:", "self.y_offset + self.base_height), (0, 165, 255), -1) goal_x, goal_y =", "3, 4 and 5 ranging between # min_angle and max_angle", "not okay_angles: print(\"An angle threshold was exceeded\") self.move_arm(-actions) forbidden_action =", "ID in range(3, 6): self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]),", "Exception(\"Robot is already switched on\") self.robot.connect(\"/dev/ttyS0\") if self.robot.alive: print(\"Success connecting", "self.open_connection() self.DEFAULT = [500, 500, 500, 500, 500, 500] self.num_members", "self.base_width = 110 self.base_height = 45 # All the angles", "3)) # Render the floor self.img = cv2.rectangle(self.img, (0,0), (self.img_width,", "tuple(second_joint), (255,0,0), self.member_thickness) self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1)", "return False def move_to_pos(self): # First, convert the angles in", "self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in range(2)])) def", "goal_x, goal_y = self.goal_coords self.img = cv2.circle(self.img, (goal_x + self.x_offset,", "int(self.member_thickness/2), (128, 0, 128), 5) for member_id in range(self.num_members): first_joint", "else: print(\"Failed to connect to robot\") return False def move_to_default_pos(self):", "+ self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5)", "5) for member_id in range(self.num_members): first_joint = self.joint_positions[member_id].copy() second_joint =", "self.check_arm_angles() okay_positions = self.check_arm_positions() if not okay_angles: print(\"An angle threshold", "goal_y + self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5) for member_id", "self.x_offset second_joint[1] += self.y_offset self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0),", "cv2.moveWindow(\"Arm\",20,50) def reset(self): self.angles = 90*np.ones(self.num_members) self.update_positions() self.img = np.zeros((self.img_height,", "update_positions(self): \"\"\" Positions are with respect to the origin (0,0),", "between -90º and +90º # to angles between 125 and", "* np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] + self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2] =", "is already switched off\") self.robot.disconnect() if not self.robot.alive: print(\"Success disconnecting", "self.x_offset = int(self.img_width/2) self.y_offset = self.lengths[\"h_0\"] self.img_height = int(sum(list(self.lengths.values())) +", "90 -> 500 # 0 -> 125 angles_deg = self.angles", "< self.min_angles[member_index]: return False if self.angles[member_index] > self.max_angles[member_index]: return False", "self.timestep += 1 is_done = self.timestep >= self.max_timestep return self.angles,", "upside down self.img = cv2.flip(self.img, 0) self.img = cv2.putText(self.img, \"Distance:", "500, 500, 500, 500] self.num_members = 3 self.adjustable_joints = [3,4,5]", "90*np.ones(self.num_members) self.update_positions() self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep = 0", "500] self.num_members = 3 self.adjustable_joints = [3,4,5] self.initial_height = 73", "tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1) # Flip image upside down self.img", "= random.uniform(-np.pi/4, np.pi/2) x = r * np.sin(theta) y =", "[3,4,5] self.initial_height = 73 # height in mm of motor", "between # min_angle and max_angle self.member_thickness = 30 self.img_width =", "the angles in degrees between -90º and +90º # to", "print(\"Success connecting to robot\") return True else: print(\"Failed to connect", "[[int(x[0]),int(x[1])] for x in self.joint_positions] def move_arm(self, actions): \"\"\" The", "= self.check_arm_positions() if not okay_angles: print(\"An angle threshold was exceeded\")", "r = self.get_reward(forbidden_action) self.timestep += 1 is_done = self.timestep >=", "(0, 165, 255), -1) goal_x, goal_y = self.goal_coords self.img =", "self.move_robot = move_robot if self.move_robot: self.robot = piarm.PiArm() self.open_connection() self.DEFAULT", "for i in range(2)])) def update_positions(self): \"\"\" Positions are with", "time import numpy as np import cv2 import random class", "self.close_connection() def open_connection(self): if self.robot.alive: raise Exception(\"Robot is already switched", "min_height = self.min_joint_heights[joint_index-1] if member_pos < min_height: return False return", "self.member_thickness = 30 self.img_width = 1000 self.x_offset = int(self.img_width/2) self.y_offset", "True else: return False def close_connection(self): if not self.robot.alive: raise", "action for member_index in range(1,self.num_members): self.max_angles[member_index] = self.angles[member_index - 1]", "if self.move_robot: self.move_to_default_pos() def check_arm_angles(self): for member_index in range(self.num_members): if", "np.zeros((self.img_height, self.img_width, 3)) # Render the floor self.img = cv2.rectangle(self.img,", "self.base_width/2 + self.base_width), self.y_offset + self.base_height), (0, 165, 255), -1)", "500) time.sleep(1) return True else: return False def close_connection(self): if", "move_arm(self, actions): \"\"\" The inputs are the new set of", "[20, 20, 10] self.goal_coords = [None, None] self.update_goal_coords() self.joint_positions =", "0 self.max_timestep = 200 # This is to check that", "\" + str(round(self.distance2goal,2)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) cv2.imshow(\"Arm\",", "} self.base_width = 110 self.base_height = 45 # All the", "self.robot.alive: raise Exception(\"Robot is already switched off\") self.robot.disconnect() if not", "exceeded\") self.move_arm(-actions) forbidden_action = True if not okay_positions: print(\"A position", "axle self.lengths = { \"h_0\": 73, \"a\": 97.5, \"b\": 96,", "floor self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1) #", "= cv2.flip(self.img, 0) self.img = cv2.putText(self.img, \"Distance: \" + str(round(self.distance2goal,2)),", "if self.angles[member_index] > self.max_angles[member_index]: return False return True def check_arm_positions(self):", "range(1,self.num_members): self.max_angles[member_index] = self.angles[member_index - 1] + 90 self.min_angles[member_index] =", "mm of motor 5's axle self.lengths = { \"h_0\": 73,", "self.img_width, 3)) self.timestep = 0 self.update_goal_coords() self.render() if self.move_robot: self.move_to_default_pos()", "angles_deg) print(\"Moving arms with angles: \", angles_piarm) if self.robot.alive: for", "with angles: \", angles_piarm) if self.robot.alive: for ID in range(3,", "self.img = cv2.rectangle(self.img, (int(self.x_offset - self.base_width/2), self.y_offset), (int(self.x_offset - self.base_width/2", "position threshold was exqqceeded\") self.move_arm(-actions) forbidden_action = True self.render() if", "print(\"Success disconnecting from robot\") return True else: print(\"Failed to disconnect", "to angles between 125 and 875 # 90 -> 500", "self.render() if self.move_robot: self.move_to_default_pos() def check_arm_angles(self): for member_index in range(self.num_members):", "and max_angle self.member_thickness = 30 self.img_width = 1000 self.x_offset =", "def update_distance_2_goal(self): gripper_pos = self.joint_positions[-1] self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2", "self.update_goal_coords() self.render() if self.move_robot: self.move_to_default_pos() def check_arm_angles(self): for member_index in", "self.robot.alive: for ID in range(1, 7): self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]),", "print(\"Failed to disconnect from robot\") return False def update_goal_coords(self): max_length", "self.joint_positions = [[0,0] for i in range(self.num_members + 1)] self.update_positions()", "random class MyArm2D: def __init__(self, move_robot = False): self.move_robot =", "self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500) return True else: return False", "if self.robot.alive: for ID in range(3, 6): self.robot.servoWrite(8 - ID,", "- ID, int(angles_piarm[ID - 3]), 500) time.sleep(1) return True else:", "angles_deg[1] angles_deg[1] -= angles_deg[0] angles_piarm = [int(500 + (375/90)*angle_deg) for", "1)] self.update_positions() self.distance2goal = None self.update_distance_2_goal() def __del__(self): print(\"Closing connection...\")", "integers self.joint_positions = [[int(x[0]),int(x[1])] for x in self.joint_positions] def move_arm(self,", "to the origin (0,0), right underneath motor 5. It is", "# Render the base of the arm self.img = cv2.rectangle(self.img,", "cv2.circle(self.img, (goal_x + self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128, 0,", "def update_positions(self): \"\"\" Positions are with respect to the origin", "angles_deg[2] -= angles_deg[1] angles_deg[1] -= angles_deg[0] angles_piarm = [int(500 +", "np.cos(np.deg2rad(self.angles[2])) ] # Convert to integers self.joint_positions = [[int(x[0]),int(x[1])] for", "= [[0,0] for i in range(self.num_members + 1)] self.update_positions() self.distance2goal", "Flip image upside down self.img = cv2.flip(self.img, 0) self.img =", "not okay_positions: print(\"A position threshold was exqqceeded\") self.move_arm(-actions) forbidden_action =", "self.lengths = { \"h_0\": 73, \"a\": 97.5, \"b\": 96, \"c\":", "in range(self.num_members)] self.min_angles[0] = 0 # To prevent it from", "else: reward_scaling_factor = 1 return - self.distance2goal * reward_scaling_factor def", "[90 for _ in range(self.num_members)] self.min_angles = [-90 for _", "self.max_angles = [90 for _ in range(self.num_members)] self.min_angles = [-90", "np.sin(np.deg2rad(self.angles[2])), self.joint_positions[2][1] + self.lengths[\"c\"] * np.cos(np.deg2rad(self.angles[2])) ] # Convert to", "in enumerate(actions): self.angles[i:] += action for member_index in range(1,self.num_members): self.max_angles[member_index]", "= 1 return - self.distance2goal * reward_scaling_factor def step(self, actions):", "self.move_arm(actions) forbidden_action = False okay_angles = self.check_arm_angles() okay_positions = self.check_arm_positions()", "* np.cos(np.deg2rad(self.angles[2])) ] # Convert to integers self.joint_positions = [[int(x[0]),int(x[1])]", "self.joint_positions[joint_index][1] min_height = self.min_joint_heights[joint_index-1] if member_pos < min_height: return False", "angles_piarm[1] print(\"Angles in degrees: \", angles_deg) print(\"Moving arms with angles:", "self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5) for member_id in range(self.num_members):", "= np.zeros((self.img_height, self.img_width, 3)) # Render the floor self.img =", "self.min_angles = [-90 for _ in range(self.num_members)] self.min_angles[0] = 0", "self.y_offset second_joint[0] += self.x_offset second_joint[1] += self.y_offset self.img = cv2.line(self.img,", "1 return - self.distance2goal * reward_scaling_factor def step(self, actions): self.move_arm(actions)", "self.angles[member_index] > self.max_angles[member_index]: return False return True def check_arm_positions(self): for", "= 73 # height in mm of motor 5's axle", "= { \"h_0\": 73, \"a\": 97.5, \"b\": 96, \"c\": 160", "self.update_distance_2_goal() def __del__(self): print(\"Closing connection...\") if self.move_robot: self.close_connection() def open_connection(self):", "is positive if it is away from the origin. \"\"\"", "the origin. \"\"\" self.joint_positions[0] = [0, self.lengths[\"h_0\"]] self.joint_positions[1] = [", "self.move_robot: self.move_to_pos() r = self.get_reward(forbidden_action) self.timestep += 1 is_done =", "the origin (0,0), right underneath motor 5. It is positive", "110 self.base_height = 45 # All the angles are with", "reward_scaling_factor = 2 else: reward_scaling_factor = 1 return - self.distance2goal", "Positions are with respect to the origin (0,0), right underneath", "with respect to the origin (0,0), right underneath motor 5.", "the vertical self.max_angles = [90 for _ in range(self.num_members)] self.min_angles", "self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] + self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2]", "one) is above # the ground self.min_joint_heights = [20, 20,", "move_robot if self.move_robot: self.robot = piarm.PiArm() self.open_connection() self.DEFAULT = [500,", "True else: print(\"Failed to disconnect from robot\") return False def", "self.min_angles[member_index]: return False if self.angles[member_index] > self.max_angles[member_index]: return False return", "cv2.imshow(\"Arm\", self.img) cv2.moveWindow(\"Arm\",20,50) def reset(self): self.angles = 90*np.ones(self.num_members) self.update_positions() self.img", "= cv2.rectangle(self.img, (int(self.x_offset - self.base_width/2), self.y_offset), (int(self.x_offset - self.base_width/2 +", "self.angles = 90*np.ones(self.num_members) self.update_positions() self.img = np.zeros((self.img_height, self.img_width, 3)) self.timestep", "- 1]), 500) return True else: return False def move_to_pos(self):", "= cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1) # Render the", "+= action for member_index in range(1,self.num_members): self.max_angles[member_index] = self.angles[member_index -", "connection...\") if self.move_robot: self.close_connection() def open_connection(self): if self.robot.alive: raise Exception(\"Robot", "the floor self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1)", "arm self.img = cv2.rectangle(self.img, (int(self.x_offset - self.base_width/2), self.y_offset), (int(self.x_offset -", "# min_angle and max_angle self.member_thickness = 30 self.img_width = 1000", "__init__(self, move_robot = False): self.move_robot = move_robot if self.move_robot: self.robot", "- self.base_width/2 + self.base_width), self.y_offset + self.base_height), (0, 165, 255),", "= self.joint_positions[joint_index][1] min_height = self.min_joint_heights[joint_index-1] if member_pos < min_height: return", "robot\") return False def update_goal_coords(self): max_length = sum(list(self.lengths.values())[1:]) r =", "angles_piarm) if self.robot.alive: for ID in range(3, 6): self.robot.servoWrite(8 -", "200 # This is to check that all the joints", "\"Distance: \" + str(round(self.distance2goal,2)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)", "for member_id in range(self.num_members): first_joint = self.joint_positions[member_id].copy() second_joint = self.joint_positions[member_id", "\"\"\" self.joint_positions[0] = [0, self.lengths[\"h_0\"]] self.joint_positions[1] = [ self.joint_positions[0][0] +", "self.max_timestep = 200 # This is to check that all", "theta = random.uniform(-np.pi/4, np.pi/2) x = r * np.sin(theta) y", "for _ in range(self.num_members)] self.min_angles[0] = 0 # To prevent", "self.move_robot: self.move_to_default_pos() def check_arm_angles(self): for member_index in range(self.num_members): if self.angles[member_index]", "motor 3, 4 and 5 ranging between # min_angle and", "return False def close_connection(self): if not self.robot.alive: raise Exception(\"Robot is", "0 -> 125 angles_deg = self.angles - 90 angles_deg[2] -=", "False): self.move_robot = move_robot if self.move_robot: self.robot = piarm.PiArm() self.open_connection()", "self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness) self.img = cv2.circle(self.img,", "> self.max_angles[member_index]: return False return True def check_arm_positions(self): for joint_index", "def __del__(self): print(\"Closing connection...\") if self.move_robot: self.close_connection() def open_connection(self): if", "from hitting the base of the arm self.angles = 90*np.ones(self.num_members)", "goal_y = self.goal_coords self.img = cv2.circle(self.img, (goal_x + self.x_offset, goal_y", "3)) self.timestep = 0 self.max_timestep = 200 # This is", "self.joint_positions[0][1] + self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0])) ] self.joint_positions[2] = [ self.joint_positions[1][0]", "It is positive if it is away from the origin.", "forbidden_action = True if not okay_positions: print(\"A position threshold was", "angles_deg = self.angles - 90 angles_deg[2] -= angles_deg[1] angles_deg[1] -=", "self.robot.alive: raise Exception(\"Robot is already switched on\") self.robot.connect(\"/dev/ttyS0\") if self.robot.alive:", "= True self.render() if self.move_robot: self.move_to_pos() r = self.get_reward(forbidden_action) self.timestep", "in range(self.num_members): first_joint = self.joint_positions[member_id].copy() second_joint = self.joint_positions[member_id + 1].copy()", "down self.img = cv2.flip(self.img, 0) self.img = cv2.putText(self.img, \"Distance: \"", "self.joint_positions[0][0] + self.lengths[\"a\"] * np.sin(np.deg2rad(self.angles[0])), self.joint_positions[0][1] + self.lengths[\"a\"] * np.cos(np.deg2rad(self.angles[0]))", "+ self.lengths[\"b\"] * np.cos(np.deg2rad(self.angles[1])) ] self.joint_positions[3] = [ self.joint_positions[2][0] +", "- self.base_width/2), self.y_offset), (int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset +", "6): self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]), 500) time.sleep(1) return", "if not self.robot.alive: raise Exception(\"Robot is already switched off\") self.robot.disconnect()", "= cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1) # Flip image upside", "self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5) for" ]
[ "_test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: None}, {test_method: None},", "front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum,", "USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "rear_link.accept_front_to_back_ticket(commencement_ticket) for request in scenario.requests(): continuation_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number,", "request in scenario.requests(): continuation_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION, None,", "2015, Google Inc. # All rights reserved. # # Redistribution", "back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link =", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "payload is not None or terminal: back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id,", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY", "reproduce the above # copyright notice, this list of conditions", "logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self):", "None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or", "the following disclaimer. # * Redistributions in binary form must", "# # Redistribution and use in source and binary forms,", "front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, response) rear_sequence_number[0] +=", "None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind", "class RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80)", "tuple( ticket.payload for ticket in test_rear_link.tickets if ticket.payload is not", "test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: None},", "with test_fore_link.condition: responses = tuple( ticket.payload for ticket in test_fore_link.tickets", "binary form must reproduce the above # copyright notice, this", "None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link =", "the above # copyright notice, this list of conditions and", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR", "without specific prior written permission. # # THIS SOFTWARE IS", "tickets.Kind.ENTIRE) if payload is not None or terminal: back_to_front_ticket =", "= logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id =", "self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id = object() test_method = 'test", "with test_rear_link.condition: front_to_back_payloads = tuple( ticket.payload for ticket in test_rear_link.tickets", "the following disclaimer # in the documentation and/or other materials", "grpc.framework.base import interfaces from grpc.framework.base.packets import packets as tickets from", "packets as tickets from grpc.framework.foundation import logging_pool _IDENTITY = lambda", "{test_method: None}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket", "testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test( _proto_scenarios.BidirectionallyStreamingScenario()) if", "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "conditions are # met: # # * Redistributions of source", "self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self, scenario): test_operation_id = object()", "from # this software without specific prior written permission. #", "above # copyright notice, this list of conditions and the", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #", "rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None, None, None,", "FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT", "from grpc._adapter import fore from grpc._adapter import rear from grpc.framework.base", "modification, are permitted provided that the following conditions are #", "test_fore_link.tickets if ticket.payload is not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads)", "NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND", "disclaimer # in the documentation and/or other materials provided with", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "must reproduce the above # copyright notice, this list of", "endorse or promote products derived from # this software without", "rear from grpc.framework.base import interfaces from grpc.framework.base.packets import packets as", "ticket.payload is not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self,", "None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number,", "is not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self):", "test_operation_id = object() test_method = 'test method' test_front_to_back_datum = b'\\x07'", "# copyright notice, this list of conditions and the following", "ticket.payload for ticket in test_rear_link.tickets if ticket.payload is not None)", "{test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port", "None, request, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = tickets.FrontToBackPacket(", "None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket = tickets.FrontToBackPacket( test_operation_id, 0,", "source and binary forms, with or without # modification, are", "documentation and/or other materials provided with the # distribution. #", "def testZeroMessageRoundTrip(self): test_operation_id = object() test_method = 'test method' test_fore_link", "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A", "from grpc._adapter import _proto_scenarios from grpc._adapter import _test_links from grpc._adapter", "2 class RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool =", "in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION,", "from grpc.framework.foundation import logging_pool _IDENTITY = lambda x: x _TIMEOUT", "rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload is None:", "ANY WAY OUT OF THE USE # OF THIS SOFTWARE,", "testEntireRoundTrip(self): test_operation_id = object() test_method = 'test method' test_front_to_back_datum =", "with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id = object() test_method", "tickets.Kind.CONTINUATION, response) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None)", "_IDENTITY = lambda x: x _TIMEOUT = 2 class RoundTripTest(unittest.TestCase):", "'localhost', port, self.rear_link_pool, {test_method: None}, {test_method: None}, False, None, None,", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING", "following disclaimer # in the documentation and/or other materials provided", "1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request in scenario.requests(): continuation_ticket = tickets.FrontToBackPacket( test_operation_id,", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER", "self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self):", "# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink(", "test_method = scenario.method() test_fore_link = _test_links.ForeLink(None, None) rear_lock = threading.Lock()", "test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for", "if response is not None or terminal: back_to_front_ticket = tickets.BackToFrontPacket(", "test_fore_link.tickets or test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with", "= b'\\x08' test_fore_link = _test_links.ForeLink(None, None) rear_sequence_number = [0] def", "= _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY}, {test_method:", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO,", "SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR", "in test_rear_link.tickets if ticket.payload is not None) with test_fore_link.condition: responses", "= logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def", "THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY", "threading import unittest from grpc._adapter import _proto_scenarios from grpc._adapter import", "Google Inc. # All rights reserved. # # Redistribution and", "in test_fore_link.tickets if ticket.payload is not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def", "and binary forms, with or without # modification, are permitted", "names of its # contributors may be used to endorse", "with rear_lock: if front_to_back_ticket.payload is not None: response = scenario.response_for_request(front_to_back_ticket.payload)", "with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION): test_fore_link.condition.wait()", "test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION, None, None, None, request, None) fore_sequence_number +=", "fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: _IDENTITY}, {test_method:", "USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "tickets.Kind.ENTIRE) if response is not None or terminal: back_to_front_ticket =", "test_fore_link = _test_links.ForeLink(None, None) def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind in", "<gh_stars>1-10 # Copyright 2015, Google Inc. # All rights reserved.", "test_method = 'test method' test_front_to_back_datum = b'\\x07' test_back_to_front_datum = b'\\x08'", "grpc._adapter import _proto_scenarios from grpc._adapter import _test_links from grpc._adapter import", "is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: requests =", "is tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def", "self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response}, False, None, None, None) rear_link.join_fore_link(test_fore_link)", "tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if response is not None or terminal: back_to_front_ticket", "rear_link.stop() fore_link.stop() with test_rear_link.condition: requests = tuple( ticket.payload for ticket", "requests = tuple( ticket.payload for ticket in test_rear_link.tickets if ticket.payload", "this software without specific prior written permission. # # THIS", "DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link = rear.RearLink( 'localhost', port,", "TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: requests = tuple( ticket.payload for", "completion_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None, None, None, None,", "written permission. # # THIS SOFTWARE IS PROVIDED BY THE", "_IDENTITY}, {test_method: _IDENTITY}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start()", "testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test( _proto_scenarios.BidirectionallyStreamingScenario()) if __name__ == '__main__':", "if ticket.payload is not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario())", "tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: front_to_back_payloads = tuple( ticket.payload", "notice, this list of conditions and the following disclaimer. #", "( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if payload is not None or terminal:", "object() test_method = 'test method' test_fore_link = _test_links.ForeLink(None, None) def", "software without specific prior written permission. # # THIS SOFTWARE", "provided with the # distribution. # * Neither the name", "0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition:", "fore_link.stop() with test_rear_link.condition: front_to_back_payloads = tuple( ticket.payload for ticket in", "OUT OF THE USE # OF THIS SOFTWARE, EVEN IF", "+= 1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind", "must retain the above copyright # notice, this list of", "terminal else tickets.Kind.CONTINUATION, response) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link =", "rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method,", "rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): with rear_lock: if front_to_back_ticket.payload", "ticket.payload is not None) with test_fore_link.condition: responses = tuple( ticket.payload", "terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if payload is", "# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "{test_method: scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port()", "and the following disclaimer. # * Redistributions in binary form", "fore_link.stop() with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id = object()", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "GRPC-backed ForeLink and RearLink.\"\"\" import threading import unittest from grpc._adapter", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE", "front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if response is not None", "= rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, False,", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE", "# Copyright 2015, Google Inc. # All rights reserved. #", "def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload is None: payload = None", "None: payload = None else: payload = test_back_to_front_datum terminal =", "self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test(", "of source code must retain the above copyright # notice,", "in test_fore_link.tickets if ticket.payload is not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,),", "and RearLink.\"\"\" import threading import unittest from grpc._adapter import _proto_scenarios", "= tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, payload)", "BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "response = None terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)", "{test_method: _IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port()", "front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0,", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL,", "test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None, None, None, None, None) fore_sequence_number +=", "tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket)", "# this software without specific prior written permission. # #", "import threading import unittest from grpc._adapter import _proto_scenarios from grpc._adapter", "import logging_pool _IDENTITY = lambda x: x _TIMEOUT = 2", "test_rear_link.condition: front_to_back_payloads = tuple( ticket.payload for ticket in test_rear_link.tickets if", "rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not", "interfaces from grpc.framework.base.packets import packets as tickets from grpc.framework.foundation import", "scenario.response_for_request(front_to_back_ticket.payload) else: response = None terminal = front_to_back_ticket.kind in (", "= _test_links.ForeLink(None, None) rear_lock = threading.Lock() rear_sequence_number = [0] def", "OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE", "LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "grpc.framework.base.packets import packets as tickets from grpc.framework.foundation import logging_pool _IDENTITY", "_perform_scenario_test(self, scenario): test_operation_id = object() test_method = scenario.method() test_fore_link =", "commencement_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None,", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"Test of", "scenario.method() test_fore_link = _test_links.ForeLink(None, None) rear_lock = threading.Lock() rear_sequence_number =", "_TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is", "test_rear_link.tickets if ticket.payload is not None) with test_fore_link.condition: back_to_front_payloads =", "_test_links from grpc._adapter import fore from grpc._adapter import rear from", "fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: None}, {test_method:", "if ticket.payload is not None) with test_fore_link.condition: back_to_front_payloads = tuple(", "scenario.deserialize_request}, {test_method: scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port =", "POSSIBILITY OF SUCH DAMAGE. \"\"\"Test of the GRPC-backed ForeLink and", "payload = test_back_to_front_datum terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)", "or without # modification, are permitted provided that the following", "Redistribution and use in source and binary forms, with or", "_proto_scenarios from grpc._adapter import _test_links from grpc._adapter import fore from", "'test method' test_fore_link = _test_links.ForeLink(None, None) def rear_action(front_to_back_ticket, fore_link): if", "rear_link.stop() fore_link.stop() with test_rear_link.condition: front_to_back_payloads = tuple( ticket.payload for ticket", "list of conditions and the following disclaimer # in the", "request, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = tickets.FrontToBackPacket( test_operation_id,", "test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id = object() test_method =", "front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self, scenario): test_operation_id = object() test_method", "tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while", "= tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)", "provided that the following conditions are # met: # #", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR", "= _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: None}, {test_method:", "retain the above copyright # notice, this list of conditions", "= fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: None},", "is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: front_to_back_payloads =", "HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER", "name of Google Inc. nor the names of its #", "None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, None,", "SUCH DAMAGE. \"\"\"Test of the GRPC-backed ForeLink and RearLink.\"\"\" import", "not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: requests = tuple(", "= tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action,", "fore_link = fore.ForeLink( self.fore_link_pool, {test_method: None}, {test_method: None}, None, ())", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link = rear.RearLink( 'localhost',", "test_fore_link.condition: responses = tuple( ticket.payload for ticket in test_fore_link.tickets if", "rights reserved. # # Redistribution and use in source and", "_test_links.ForeLink(None, None) rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR", "from grpc.framework.base.packets import packets as tickets from grpc.framework.foundation import logging_pool", "or test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition:", "None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def", "rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link =", "IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS", "fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #", "= lambda x: x _TIMEOUT = 2 class RoundTripTest(unittest.TestCase): def", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED", "else: payload = test_back_to_front_datum terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION,", "nor the names of its # contributors may be used", "that the following conditions are # met: # # *", "front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if payload is not None", "this list of conditions and the following disclaimer. # *", "# * Neither the name of Google Inc. nor the", "def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket", "* Redistributions in binary form must reproduce the above #", "None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id,", "self.rear_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, False, None, None, None) rear_link.join_fore_link(test_fore_link)", "x: x _TIMEOUT = 2 class RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool", "test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id", "the following conditions are # met: # # * Redistributions", "# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "tickets.Kind.COMPLETION, None, None, None, None, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket)", "following conditions are # met: # # * Redistributions of", "DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE", "above copyright # notice, this list of conditions and the", "rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response}, False, None,", "= threading.Lock() rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): with rear_lock:", "# in the documentation and/or other materials provided with the", "terminal else tickets.Kind.CONTINUATION, payload) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link =", "in source and binary forms, with or without # modification,", "'test method' test_front_to_back_datum = b'\\x07' test_back_to_front_datum = b'\\x08' test_fore_link =", "tickets.Kind.CONTINUATION, None, None, None, request, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket)", "+= 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink(", "# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", "False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket = tickets.FrontToBackPacket(", "from grpc._adapter import _test_links from grpc._adapter import fore from grpc._adapter", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF", "object() test_method = 'test method' test_front_to_back_datum = b'\\x07' test_back_to_front_datum =", "products derived from # this software without specific prior written", "TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #", "(INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL,", "test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY},", "test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: requests", "of Google Inc. nor the names of its # contributors", "self.fore_link_pool, {test_method: None}, {test_method: None}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start()", "OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "use in source and binary forms, with or without #", "None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool,", "else: response = None terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION,", "tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket)", "None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.COMMENCEMENT,", "None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while (not test_fore_link.tickets", "method' test_fore_link = _test_links.ForeLink(None, None) def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind", "[0] def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload is None: payload =", "= fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link)", "WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN", "Google Inc. nor the names of its # contributors may", "# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED", "tickets.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id = object() test_method = 'test method'", "tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if payload is not None or terminal: back_to_front_ticket", "is not None) with test_fore_link.condition: responses = tuple( ticket.payload for", "ticket.payload for ticket in test_fore_link.tickets if ticket.payload is not None)", "Inc. # All rights reserved. # # Redistribution and use", "port, self.rear_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, False, None, None, None)", "permitted provided that the following conditions are # met: #", "# contributors may be used to endorse or promote products", "distribution. # * Neither the name of Google Inc. nor", "test_operation_id = object() test_method = 'test method' test_fore_link = _test_links.ForeLink(None,", "self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test( _proto_scenarios.BidirectionallyStreamingScenario()) if __name__", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"Test of the", "with test_rear_link.condition: requests = tuple( ticket.payload for ticket in test_rear_link.tickets", "test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with", "self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id = object() test_method = 'test", "def _perform_scenario_test(self, scenario): test_operation_id = object() test_method = scenario.method() test_fore_link", "= rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response}, False,", "conditions and the following disclaimer. # * Redistributions in binary", "interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request", "tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, response) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket)", "OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #", "OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED", "not None) with test_fore_link.condition: responses = tuple( ticket.payload for ticket", "front_to_back_ticket.payload is not None: response = scenario.response_for_request(front_to_back_ticket.payload) else: response =", "in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if response is not None or", "ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "# # * Redistributions of source code must retain the", "terminal: back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else", "rear_action(front_to_back_ticket, fore_link): with rear_lock: if front_to_back_ticket.payload is not None: response", "and the following disclaimer # in the documentation and/or other", "not None: response = scenario.response_for_request(front_to_back_ticket.payload) else: response = None terminal", "fore_link = fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response}, None, ())", "fore_link.stop() with test_rear_link.condition: requests = tuple( ticket.payload for ticket in", "with or without # modification, are permitted provided that the", "# distribution. # * Neither the name of Google Inc.", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO,", "is not None) with test_fore_link.condition: back_to_front_payloads = tuple( ticket.payload for", "= tuple( ticket.payload for ticket in test_fore_link.tickets if ticket.payload is", "= object() test_method = scenario.method() test_fore_link = _test_links.ForeLink(None, None) rear_lock", "test_fore_link.tickets or test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_fore_link.condition:", "{test_method: _IDENTITY}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket", "None terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if response", "OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "unittest from grpc._adapter import _proto_scenarios from grpc._adapter import _test_links from", "the GRPC-backed ForeLink and RearLink.\"\"\" import threading import unittest from", "{test_method: None}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port()", "from grpc.framework.base import interfaces from grpc.framework.base.packets import packets as tickets", "front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, payload) rear_sequence_number[0] +=", "list of conditions and the following disclaimer. # * Redistributions", "# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets", "rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response},", "not None or terminal: back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION", "rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload is None: payload = None else:", "rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket =", "responses = tuple( ticket.payload for ticket in test_fore_link.tickets if ticket.payload", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE", "= scenario.response_for_request(front_to_back_ticket.payload) else: response = None terminal = front_to_back_ticket.kind in", "if front_to_back_ticket.payload is None: payload = None else: payload =", "None or terminal: back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if", "IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets", "IN ANY WAY OUT OF THE USE # OF THIS", "if ticket.payload is not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def", "CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,", "rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION):", "for ticket in test_rear_link.tickets if ticket.payload is not None) with", "GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS;", "lambda x: x _TIMEOUT = 2 class RoundTripTest(unittest.TestCase): def setUp(self):", "{test_method: _IDENTITY}, {test_method: _IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port", "with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):", "_IDENTITY}, {test_method: _IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port =", "= tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)", "ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test( _proto_scenarios.BidirectionallyStreamingScenario())", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "def testEntireRoundTrip(self): test_operation_id = object() test_method = 'test method' test_front_to_back_datum", "else tickets.Kind.CONTINUATION, payload) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action,", "met: # # * Redistributions of source code must retain", "_TIMEOUT = 2 class RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool = logging_pool.pool(80)", "response) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL,", "materials provided with the # distribution. # * Neither the", "fore from grpc._adapter import rear from grpc.framework.base import interfaces from", "fore_link): if front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket = tickets.BackToFrontPacket(", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: front_to_back_payloads", "CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)", "# * Redistributions in binary form must reproduce the above", "back_to_front_payloads) def _perform_scenario_test(self, scenario): test_operation_id = object() test_method = scenario.method()", "and use in source and binary forms, with or without", "0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition:", "not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())", "testZeroMessageRoundTrip(self): test_operation_id = object() test_method = 'test method' test_fore_link =", "None, None, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while", "rear_lock = threading.Lock() rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): with", "test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not", "response is not None or terminal: back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id,", "test_back_to_front_datum terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if payload", "OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR", "or test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind,", "test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with", "def setUp(self): self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80) def tearDown(self):", "1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None, None,", "COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "notice, this list of conditions and the following disclaimer #", "import packets as tickets from grpc.framework.foundation import logging_pool _IDENTITY =", "other materials provided with the # distribution. # * Neither", "are # met: # # * Redistributions of source code", "Inc. nor the names of its # contributors may be", "= scenario.method() test_fore_link = _test_links.ForeLink(None, None) rear_lock = threading.Lock() rear_sequence_number", "grpc._adapter import _test_links from grpc._adapter import fore from grpc._adapter import", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT", "grpc._adapter import fore from grpc._adapter import rear from grpc.framework.base import", "of conditions and the following disclaimer. # * Redistributions in", "test_fore_link.condition: back_to_front_payloads = tuple( ticket.payload for ticket in test_fore_link.tickets if", "not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self, scenario): test_operation_id", "tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: requests = tuple( ticket.payload", "and/or other materials provided with the # distribution. # *", "if terminal else tickets.Kind.CONTINUATION, payload) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link", "be used to endorse or promote products derived from #", "None, None, _TIMEOUT) fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request in", "forms, with or without # modification, are permitted provided that", "rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: None}, {test_method: None}, False, None,", "ARISING IN ANY WAY OUT OF THE USE # OF", "for request in scenario.requests(): continuation_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION,", "binary forms, with or without # modification, are permitted provided", "= tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION, None, None, None, request, None)", "port, self.rear_link_pool, {test_method: None}, {test_method: None}, False, None, None, None)", "copyright # notice, this list of conditions and the following", "= front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if response is not", "= object() test_method = 'test method' test_front_to_back_datum = b'\\x07' test_back_to_front_datum", "None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self, scenario): test_operation_id =", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "contributors may be used to endorse or promote products derived", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA,", "= 'test method' test_fore_link = _test_links.ForeLink(None, None) def rear_action(front_to_back_ticket, fore_link):", "* Neither the name of Google Inc. nor the names", "in test_rear_link.tickets if ticket.payload is not None) with test_fore_link.condition: back_to_front_payloads", "fore.ForeLink( self.fore_link_pool, {test_method: None}, {test_method: None}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link)", "setUp(self): self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True)", "# met: # # * Redistributions of source code must", "ticket in test_fore_link.tickets if ticket.payload is not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads)", "= [0] def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload is None: payload", "in the documentation and/or other materials provided with the #", "the above copyright # notice, this list of conditions and", "test_method = 'test method' test_fore_link = _test_links.ForeLink(None, None) def rear_action(front_to_back_ticket,", "COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS", "None, _TIMEOUT) fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request in scenario.requests():", "the name of Google Inc. nor the names of its", "STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "[0] def rear_action(front_to_back_ticket, fore_link): with rear_lock: if front_to_back_ticket.payload is not", "= 'test method' test_front_to_back_datum = b'\\x07' test_back_to_front_datum = b'\\x08' test_fore_link", "None, None, None, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition:", "# All rights reserved. # # Redistribution and use in", "DAMAGE. \"\"\"Test of the GRPC-backed ForeLink and RearLink.\"\"\" import threading", "with the # distribution. # * Neither the name of", "None}, {test_method: None}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port =", "tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None)", "_test_links.ForeLink(None, None) def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind in ( tickets.Kind.COMPLETION,", "self.rear_link_pool, {test_method: None}, {test_method: None}, False, None, None, None) rear_link.join_fore_link(test_fore_link)", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY,", "fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method:", "fore_sequence_number, tickets.Kind.COMPLETION, None, None, None, None, None) fore_sequence_number += 1", "self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True)", "= fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: _IDENTITY},", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "continuation_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION, None, None, None, request,", "test_fore_link.tickets if ticket.payload is not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self):", "None}, {test_method: None}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start()", "# Redistribution and use in source and binary forms, with", "b'\\x07' test_back_to_front_datum = b'\\x08' test_fore_link = _test_links.ForeLink(None, None) rear_sequence_number =", "None}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket =", "INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF", "method' test_front_to_back_datum = b'\\x07' test_back_to_front_datum = b'\\x08' test_fore_link = _test_links.ForeLink(None,", "def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id = object() test_method", "code must retain the above copyright # notice, this list", "rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, False, None,", "tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None, None, None, None, None) fore_sequence_number", "test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop()", "None else: payload = test_back_to_front_datum terminal = front_to_back_ticket.kind in (", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY", "response = scenario.response_for_request(front_to_back_ticket.payload) else: response = None terminal = front_to_back_ticket.kind", "Copyright 2015, Google Inc. # All rights reserved. # #", "= b'\\x07' test_back_to_front_datum = b'\\x08' test_fore_link = _test_links.ForeLink(None, None) rear_sequence_number", "None) rear_lock = threading.Lock() rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link):", "FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO", "= fore.ForeLink( self.fore_link_pool, {test_method: None}, {test_method: None}, None, ()) fore_link.join_rear_link(test_rear_link)", "None) rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload is", "threading.Lock() rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): with rear_lock: if", "port, self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response}, False, None, None, None)", "= tuple( ticket.payload for ticket in test_rear_link.tickets if ticket.payload is", "* Redistributions of source code must retain the above copyright", "tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket)", "# modification, are permitted provided that the following conditions are", "def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test( _proto_scenarios.BidirectionallyStreamingScenario()) if __name__ ==", "{test_method: None}, {test_method: None}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port", "_test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response},", "scenario.deserialize_response}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket =", "SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS", "None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response}, None,", "_IDENTITY}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket =", "of conditions and the following disclaimer # in the documentation", "fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link)", "tickets.Kind.ENTIRE): back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link", "is not None: response = scenario.response_for_request(front_to_back_ticket.payload) else: response = None", "None) def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE):", "INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY,", "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE", "self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id = object() test_method = 'test method'", "0, tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) fore_sequence_number = 1", "fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link)", "if payload is not None or terminal: back_to_front_ticket = tickets.BackToFrontPacket(", "with test_fore_link.condition: back_to_front_payloads = tuple( ticket.payload for ticket in test_fore_link.tickets", "OF SUCH DAMAGE. \"\"\"Test of the GRPC-backed ForeLink and RearLink.\"\"\"", "the # distribution. # * Neither the name of Google", "may be used to endorse or promote products derived from", "_IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link", "rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not", "back_to_front_payloads = tuple( ticket.payload for ticket in test_fore_link.tickets if ticket.payload", "rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY},", "is not None or terminal: back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0],", "# notice, this list of conditions and the following disclaimer.", "{test_method: scenario.deserialize_response}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket", "tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) fore_sequence_number", "test_rear_link.tickets if ticket.payload is not None) with test_fore_link.condition: responses =", "scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link", "of the GRPC-backed ForeLink and RearLink.\"\"\" import threading import unittest", "not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: front_to_back_payloads = tuple(", "RearLink.\"\"\" import threading import unittest from grpc._adapter import _proto_scenarios from", "or promote products derived from # this software without specific", "conditions and the following disclaimer # in the documentation and/or", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop()", "form must reproduce the above # copyright notice, this list", "payload = None else: payload = test_back_to_front_datum terminal = front_to_back_ticket.kind", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if response is not None or terminal:", "ticket in test_rear_link.tickets if ticket.payload is not None) with test_fore_link.condition:", "EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE", "= test_back_to_front_datum terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if", "if front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id,", "tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, payload) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket)", "INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #", "INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if payload is not None or", "# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "rear_link.start() front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None,", "HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR", "self.fore_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start()", "the names of its # contributors may be used to", "= fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: scenario.serialize_request},", "None, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while (not", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method:", "promote products derived from # this software without specific prior", "following disclaimer. # * Redistributions in binary form must reproduce", "fore_sequence_number, tickets.Kind.CONTINUATION, None, None, None, request, None) fore_sequence_number += 1", "fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION,", "from grpc._adapter import rear from grpc.framework.base import interfaces from grpc.framework.base.packets", "fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request in scenario.requests(): continuation_ticket =", "scenario.requests(): continuation_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION, None, None, None,", "import interfaces from grpc.framework.base.packets import packets as tickets from grpc.framework.foundation", "front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None,", "+= 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None,", "rear_link.stop() fore_link.stop() with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id =", "{test_method: None}, {test_method: None}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link)", "self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start()", "test_front_to_back_datum = b'\\x07' test_back_to_front_datum = b'\\x08' test_fore_link = _test_links.ForeLink(None, None)", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #", "front_to_back_ticket.payload is None: payload = None else: payload = test_back_to_front_datum", "test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION)", "test_fore_link = _test_links.ForeLink(None, None) rear_lock = threading.Lock() rear_sequence_number = [0]", "grpc._adapter import rear from grpc.framework.base import interfaces from grpc.framework.base.packets import", "= tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, response)", "PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #", "tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_fore_link.condition: self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION) def testEntireRoundTrip(self):", "fore_link.start() port = fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool,", "self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self, scenario): test_operation_id = object() test_method =", "= 2 class RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool", "All rights reserved. # # Redistribution and use in source", "disclaimer. # * Redistributions in binary form must reproduce the", "test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION): test_fore_link.condition.wait()", "without # modification, are permitted provided that the following conditions", "tickets from grpc.framework.foundation import logging_pool _IDENTITY = lambda x: x", "= [0] def rear_action(front_to_back_ticket, fore_link): with rear_lock: if front_to_back_ticket.payload is", "self.rear_link_pool = logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id", "rear_lock: if front_to_back_ticket.payload is not None: response = scenario.response_for_request(front_to_back_ticket.payload) else:", "= tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT)", "is not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self, scenario):", "= None else: payload = test_back_to_front_datum terminal = front_to_back_ticket.kind in", "None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket = tickets.FrontToBackPacket( test_operation_id,", "used to endorse or promote products derived from # this", "AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN", "or terminal: back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal", "front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link", "THE POSSIBILITY OF SUCH DAMAGE. \"\"\"Test of the GRPC-backed ForeLink", "THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF", "IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED", "tuple( ticket.payload for ticket in test_fore_link.tickets if ticket.payload is not", "as tickets from grpc.framework.foundation import logging_pool _IDENTITY = lambda x:", "grpc.framework.foundation import logging_pool _IDENTITY = lambda x: x _TIMEOUT =", "def rear_action(front_to_back_ticket, fore_link): with rear_lock: if front_to_back_ticket.payload is not None:", "tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION, None, None, None, request, None) fore_sequence_number", "MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED.", "test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind", "None) with test_fore_link.condition: responses = tuple( ticket.payload for ticket in", "{test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link)", "False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = tickets.FrontToBackPacket(", "\"\"\"Test of the GRPC-backed ForeLink and RearLink.\"\"\" import threading import", "while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop()", "ticket.payload is not None) with test_fore_link.condition: back_to_front_payloads = tuple( ticket.payload", "tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, payload) rear_sequence_number[0]", "tickets.Kind.CONTINUATION, payload) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None)", "None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.ENTIRE,", "reserved. # # Redistribution and use in source and binary", "CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN", "self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test( _proto_scenarios.BidirectionallyStreamingScenario()) if __name__ == '__main__': unittest.main()", "= tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None, None, None, None, None)", "= _test_links.ForeLink(None, None) def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind in (", "# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY", "this list of conditions and the following disclaimer # in", "ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool,", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is", "( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE): back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None)", "tickets.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while", "# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL", "Redistributions of source code must retain the above copyright #", "tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket)", "import fore from grpc._adapter import rear from grpc.framework.base import interfaces", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"Test", "WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES", "AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT,", "import _test_links from grpc._adapter import fore from grpc._adapter import rear", "Redistributions in binary form must reproduce the above # copyright", "None, None, None, None, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket) with", "None, None, request, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket =", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "# * Redistributions of source code must retain the above", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "0, tickets.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link =", "Neither the name of Google Inc. nor the names of", "None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = tickets.FrontToBackPacket( test_operation_id, 0,", "= None terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if", "payload) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #", "logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id = object()", "in binary form must reproduce the above # copyright notice,", "front_to_back_payloads = tuple( ticket.payload for ticket in test_rear_link.tickets if ticket.payload", "RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80) def", "LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "scenario): test_operation_id = object() test_method = scenario.method() test_fore_link = _test_links.ForeLink(None,", "rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, payload) rear_sequence_number[0] += 1", "copyright notice, this list of conditions and the following disclaimer", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "fore_link = fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, None, ())", "permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request},", "are permitted provided that the following conditions are # met:", "'localhost', port, self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response}, False, None, None,", "AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED", "= _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method:", "object() test_method = scenario.method() test_fore_link = _test_links.ForeLink(None, None) rear_lock =", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND", "is None: payload = None else: payload = test_back_to_front_datum terminal", "None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: None}, {test_method: None}, None,", "to endorse or promote products derived from # this software", "the documentation and/or other materials provided with the # distribution.", "for ticket in test_fore_link.tickets if ticket.payload is not None) self.assertTupleEqual((test_front_to_back_datum,),", "test_operation_id = object() test_method = scenario.method() test_fore_link = _test_links.ForeLink(None, None)", "test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not", "scenario.serialize_request}, {test_method: scenario.deserialize_response}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start()", "None: response = scenario.response_for_request(front_to_back_ticket.payload) else: response = None terminal =", "ForeLink and RearLink.\"\"\" import threading import unittest from grpc._adapter import", "= object() test_method = 'test method' test_fore_link = _test_links.ForeLink(None, None)", "None) with test_fore_link.condition: back_to_front_payloads = tuple( ticket.payload for ticket in", "ticket.payload is not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def", "= fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link)", "NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "its # contributors may be used to endorse or promote", "port = fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method:", "'localhost', port, self.rear_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, False, None, None,", "rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, response) rear_sequence_number[0] += 1", "if terminal else tickets.Kind.CONTINUATION, response) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link", "OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #", "source code must retain the above copyright # notice, this", "= front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if payload is not", "{test_method: _IDENTITY}, {test_method: _IDENTITY}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link)", "None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or", "of its # contributors may be used to endorse or", "OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"Test of the GRPC-backed", "None, None, None, request, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket", "not None) with test_fore_link.condition: back_to_front_payloads = tuple( ticket.payload for ticket", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF", "test_fore_link = _test_links.ForeLink(None, None) rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link):", "b'\\x08' test_fore_link = _test_links.ForeLink(None, None) rear_sequence_number = [0] def rear_action(front_to_back_ticket,", "_test_links.ForeLink(None, None) rear_lock = threading.Lock() rear_sequence_number = [0] def rear_action(front_to_back_ticket,", "# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "import unittest from grpc._adapter import _proto_scenarios from grpc._adapter import _test_links", "rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method,", "NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE", "= 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request in scenario.requests(): continuation_ticket = tickets.FrontToBackPacket(", "rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: None}, {test_method: None},", "in scenario.requests(): continuation_ticket = tickets.FrontToBackPacket( test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION, None, None,", "()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link = rear.RearLink(", "for ticket in test_fore_link.tickets if ticket.payload is not None) self.assertTrue(scenario.verify_requests(requests))", "derived from # this software without specific prior written permission.", "back_to_front_ticket = tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,", "terminal = front_to_back_ticket.kind in ( tickets.Kind.COMPLETION, tickets.Kind.ENTIRE) if response is", "if ticket.payload is not None) with test_fore_link.condition: responses = tuple(", "fore_link): if front_to_back_ticket.payload is None: payload = None else: payload", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "= _test_links.ForeLink(None, None) rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): if", "(not test_fore_link.tickets or test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with", "import rear from grpc.framework.base import interfaces from grpc.framework.base.packets import packets", "(not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop()", "test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: front_to_back_payloads = tuple( ticket.payload for", "fore_link): with rear_lock: if front_to_back_ticket.payload is not None: response =", "tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id = object() test_method =", "test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) fore_sequence_number =", "test_rear_link.condition: requests = tuple( ticket.payload for ticket in test_rear_link.tickets if", "x _TIMEOUT = 2 class RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool =", "rear_link.start() commencement_ticket = tickets.FrontToBackPacket( test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None,", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\"", "_TIMEOUT) fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request in scenario.requests(): continuation_ticket", "_test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY},", "if front_to_back_ticket.payload is not None: response = scenario.response_for_request(front_to_back_ticket.payload) else: response", "logging_pool _IDENTITY = lambda x: x _TIMEOUT = 2 class", "None}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link", "PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "tickets.BackToFrontPacket( front_to_back_ticket.operation_id, rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION, response) rear_sequence_number[0]", "import _proto_scenarios from grpc._adapter import _test_links from grpc._adapter import fore", "= rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: None}, {test_method: None}, False,", "test_back_to_front_datum = b'\\x08' test_fore_link = _test_links.ForeLink(None, None) rear_sequence_number = [0]", "else tickets.Kind.CONTINUATION, response) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action,", "ticket in test_fore_link.tickets if ticket.payload is not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses))" ]
[ "file_name = str(file_path.rsplit(\"/\", maxsplit=1)[1]) try: print(\"\\033[0;33;40m Testing\", file_name, end=\"...\\r\") subprocess.check_call([\"pytest\",", "import os, glob import subprocess from subprocess import DEVNULL, STDOUT", "import DEVNULL, STDOUT abspath = os.path.abspath(__file__) dir_ = os.path.dirname(abspath) files", "subprocess.CalledProcessError: print(\"\\033[0;31;40m Error in\", file_name) else: print(\"\\033[0;32;40m\", file_name, \"is correct\")", "+ \"/_progress_board_tests/_test_progress_board_*.py\") for file_path in files: file_name = str(file_path.rsplit(\"/\", maxsplit=1)[1])", "maxsplit=1)[1]) try: print(\"\\033[0;33;40m Testing\", file_name, end=\"...\\r\") subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL, stderr=STDOUT)", "STDOUT abspath = os.path.abspath(__file__) dir_ = os.path.dirname(abspath) files = glob.glob(dir_", "file_path in files: file_name = str(file_path.rsplit(\"/\", maxsplit=1)[1]) try: print(\"\\033[0;33;40m Testing\",", "subprocess import DEVNULL, STDOUT abspath = os.path.abspath(__file__) dir_ = os.path.dirname(abspath)", "abspath = os.path.abspath(__file__) dir_ = os.path.dirname(abspath) files = glob.glob(dir_ +", "stdout=DEVNULL, stderr=STDOUT) except subprocess.CalledProcessError: print(\"\\033[0;31;40m Error in\", file_name) else: print(\"\\033[0;32;40m\",", "= os.path.dirname(abspath) files = glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\") for file_path in", "files = glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\") for file_path in files: file_name", "for file_path in files: file_name = str(file_path.rsplit(\"/\", maxsplit=1)[1]) try: print(\"\\033[0;33;40m", "except subprocess.CalledProcessError: print(\"\\033[0;31;40m Error in\", file_name) else: print(\"\\033[0;32;40m\", file_name, \"is", "glob import subprocess from subprocess import DEVNULL, STDOUT abspath =", "files: file_name = str(file_path.rsplit(\"/\", maxsplit=1)[1]) try: print(\"\\033[0;33;40m Testing\", file_name, end=\"...\\r\")", "os, glob import subprocess from subprocess import DEVNULL, STDOUT abspath", "= os.path.abspath(__file__) dir_ = os.path.dirname(abspath) files = glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\")", "DEVNULL, STDOUT abspath = os.path.abspath(__file__) dir_ = os.path.dirname(abspath) files =", "from subprocess import DEVNULL, STDOUT abspath = os.path.abspath(__file__) dir_ =", "glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\") for file_path in files: file_name = str(file_path.rsplit(\"/\",", "= str(file_path.rsplit(\"/\", maxsplit=1)[1]) try: print(\"\\033[0;33;40m Testing\", file_name, end=\"...\\r\") subprocess.check_call([\"pytest\", file_path],", "os.path.abspath(__file__) dir_ = os.path.dirname(abspath) files = glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\") for", "print(\"\\033[0;33;40m Testing\", file_name, end=\"...\\r\") subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL, stderr=STDOUT) except subprocess.CalledProcessError:", "<gh_stars>100-1000 import os, glob import subprocess from subprocess import DEVNULL,", "file_name, end=\"...\\r\") subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL, stderr=STDOUT) except subprocess.CalledProcessError: print(\"\\033[0;31;40m Error", "try: print(\"\\033[0;33;40m Testing\", file_name, end=\"...\\r\") subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL, stderr=STDOUT) except", "subprocess from subprocess import DEVNULL, STDOUT abspath = os.path.abspath(__file__) dir_", "import subprocess from subprocess import DEVNULL, STDOUT abspath = os.path.abspath(__file__)", "subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL, stderr=STDOUT) except subprocess.CalledProcessError: print(\"\\033[0;31;40m Error in\", file_name)", "os.path.dirname(abspath) files = glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\") for file_path in files:", "in files: file_name = str(file_path.rsplit(\"/\", maxsplit=1)[1]) try: print(\"\\033[0;33;40m Testing\", file_name,", "= glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\") for file_path in files: file_name =", "str(file_path.rsplit(\"/\", maxsplit=1)[1]) try: print(\"\\033[0;33;40m Testing\", file_name, end=\"...\\r\") subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL,", "file_path], stdout=DEVNULL, stderr=STDOUT) except subprocess.CalledProcessError: print(\"\\033[0;31;40m Error in\", file_name) else:", "stderr=STDOUT) except subprocess.CalledProcessError: print(\"\\033[0;31;40m Error in\", file_name) else: print(\"\\033[0;32;40m\", file_name,", "\"/_progress_board_tests/_test_progress_board_*.py\") for file_path in files: file_name = str(file_path.rsplit(\"/\", maxsplit=1)[1]) try:", "dir_ = os.path.dirname(abspath) files = glob.glob(dir_ + \"/_progress_board_tests/_test_progress_board_*.py\") for file_path", "Testing\", file_name, end=\"...\\r\") subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL, stderr=STDOUT) except subprocess.CalledProcessError: print(\"\\033[0;31;40m", "end=\"...\\r\") subprocess.check_call([\"pytest\", file_path], stdout=DEVNULL, stderr=STDOUT) except subprocess.CalledProcessError: print(\"\\033[0;31;40m Error in\"," ]
[ "tracker.forest_tree, date)] = tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree, date)] = None", "\"GET\": return render_create_tasks(request, study) form = CreateTasksForm(data=request.POST, study=study) if not", "params_conflict = False for tree in set([k[1] for k in", "= \"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request, study) form.save() messages.success(request, \"Forest", "daterange from middleware.abort_middleware import abort from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer", "Forest task.\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_admin @forest_enabled def download_task_data(request:", "the metadata params = dict() results = defaultdict(lambda: \"--\") tracker:", "error_messages = [ f'\"{field}\": {message}' for field, messages in form.errors.items()", "from libs.http_utils import easy_url from libs.internal_types import ParticipantQuerySet, ResearcherRequest from", "it is bad. if request.method == \"GET\": return render_create_tasks(request, study)", "import defaultdict from django.contrib import messages from django.http.response import FileResponse", "render_create_tasks(request, study) form.save() messages.success(request, \"Forest tasks successfully queued!\") return redirect(easy_url(\"forest_pages.task_log\",", "database.tableau_api_models import ForestTask from database.user_models import Participant from forms.django_forms import", "params.items() if m is not None and k[1] == tree]))", "if not request.session_researcher.site_admin: return abort(403) try: study = Study.objects.get(pk=study_id) except", "results = defaultdict(lambda: \"--\") tracker: ForestTask for tracker in trackers:", "import datetime from collections import defaultdict from django.contrib import messages", "# by participant and tree, and tracks the metadata params", "flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) ) class CSVBuffer: line", "results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status if tracker.status == tracker.status.success: params[(tracker.participant_id,", "for participant in participants: for tree in ForestTree.values(): row =", "within each tree, only a single set of param values", "generate chart of study analysis progress logs trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\")", ") if number_updated > 0: messages.success(request, \"Forest task successfully cancelled.\")", "successfully cancelled.\") else: messages.warning(request, \"Sorry, we were unable to find", "= ForestTask.objects.order_by(\"created_on\") return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, ) @require_POST", "= csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read() for forest_task in forest_tasks:", "# generate chart of study analysis progress logs trackers =", "authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants import CHUNK_FIELDS from constants.forest_constants import ForestTaskStatus,", "study analysis progress logs trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date = (study.get_earliest_data_time_bin()", "import (authenticate_admin, authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants import CHUNK_FIELDS from constants.forest_constants", "forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) ) @require_GET @authenticate_admin def download_task_log(request: ResearcherRequest): forest_tasks", "date)] = tracker.status if tracker.status == tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)]", "# are considered, and unsuccessful runs are assumed to invalidate", "defaultdict(lambda: \"--\") tracker: ForestTask for tracker in trackers: for date", "f def stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer() writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields)", "\"forest/task_log.html\", context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) ) @require_GET", "end_date = timezone.now().date() return render( request, \"forest/create_tasks.html\", context=dict( study=study, participants=list(", "{message}' for field, messages in form.errors.items() for message in messages", "forest_task_external_id): if not request.session_researcher.site_admin: return abort(403) number_updated = \\ ForestTask.objects.filter(", ") f.set_headers(None) return f def stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer() writer", "@require_GET @authenticate_admin @forest_enabled def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id): try: tracker:", "for message in messages ] error_messages_string = \"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\")", "request, 'forest/analysis_progress.html', context=dict( study=study, chart_columns=[\"participant\", \"tree\"] + dates, status_choices=ForestTaskStatus, params_conflict=params_conflict,", "fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read() for forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield", "tree])) > 1: params_conflict = True break return render( request,", "f'\"{field}\": {message}' for field, messages in form.errors.items() for message in", "is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) ) @require_GET @authenticate_admin def download_task_log(request:", "study) form.save() messages.success(request, \"Forest tasks successfully queued!\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id))", "require_GET, require_http_methods, require_POST from authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access, forest_enabled) from", "recent runs # are considered, and unsuccessful runs are assumed", "import abort from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled", "\"--\") tracker: ForestTask for tracker in trackers: for date in", "from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled def analysis_progress(request:", "@authenticate_admin @forest_enabled def create_tasks(request: ResearcherRequest, study_id=None): # Only a SITE", "this uses the jinja safe filter and should never involve", "set([k[1] for k in params.keys()]): if len(set([m for k, m", "ForestTaskStatus, ForestTree from database.data_access_models import ChunkRegistry from database.study_models import Study", "ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled def analysis_progress(request: ResearcherRequest, study_id=None): study:", "tree] + \\ [results[(participant.id, tree, date)] for date in dates]", "redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_admin @forest_enabled def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id):", "stacktrace=f\"Canceled by {request.session_researcher.username} on {datetime.date.today()}\", ) if number_updated > 0:", "simultaneously builds up the chart of most recent forest results", "Participant.objects.filter(study=study) try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date =", "tracks the metadata params = dict() results = defaultdict(lambda: \"--\")", "csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read() for forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data)", "buffer.read() for forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def render_create_tasks(request:", "are used (only the most recent runs # are considered,", "ResearcherRequest, study: Study): participants = Participant.objects.filter(study=study) try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\")", "CSVBuffer: line = \"\" def read(self): return self.line def write(self,", "CreateTasksForm(data=request.POST, study=study) if not form.is_valid(): error_messages = [ f'\"{field}\": {message}'", "return abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse( zip_generator(chunks), content_type=\"zip\",", "queued!\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled def task_log(request: ResearcherRequest,", "queue forest tasks if not request.session_researcher.site_admin: return abort(403) try: study", "= CreateTasksForm(data=request.POST, study=study) if not form.is_valid(): error_messages = [ f'\"{field}\":", "@require_POST @authenticate_admin @forest_enabled def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id): if not", "for tree in set([k[1] for k in params.keys()]): if len(set([m", "by participant and tree, and tracks the metadata params =", "django.views.decorators.http import require_GET, require_http_methods, require_POST from authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access,", "trackers: for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)]", "number_updated > 0: messages.success(request, \"Forest task successfully cancelled.\") else: messages.warning(request,", ").update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by {request.session_researcher.username} on {datetime.date.today()}\", ) if number_updated", "libs.streaming_zip import zip_generator from libs.utils.date_utils import daterange from middleware.abort_middleware import", "forest_task_external_id): try: tracker: ForestTask = ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id ) except", "return f def stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer() writer = csv.DictWriter(buffer,", "and unsuccessful runs are assumed to invalidate old runs, clearing", "tree, only a single set of param values are used", "FIXME: remove this double endpoint pattern, it is bad. if", "date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status", "study: Study = Study.objects.get(pk=study_id) participants: ParticipantQuerySet = Participant.objects.filter(study=study_id) # generate", "except Study.DoesNotExist: return abort(404) # FIXME: remove this double endpoint", "def create_tasks(request: ResearcherRequest, study_id=None): # Only a SITE admin can", "in messages ] error_messages_string = \"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request,", "for date in dates] chart.append(row) # ensure that within each", "def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id): try: tracker: ForestTask = ForestTask.objects.get(", "# this uses the jinja safe filter and should never", "CHUNK_FIELDS from constants.forest_constants import ForestTaskStatus, ForestTree from database.data_access_models import ChunkRegistry", "from libs.streaming_zip import zip_generator from libs.utils.date_utils import daterange from middleware.abort_middleware", "] error_messages_string = \"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request, study) form.save()", "else: params[(tracker.participant_id, tracker.forest_tree, date)] = None # generate the date", "ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled def analysis_progress(request: ResearcherRequest, study_id=None): study: Study", "not form.is_valid(): error_messages = [ f'\"{field}\": {message}' for field, messages", "filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None) return f def stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer()", "defaultdict from django.contrib import messages from django.http.response import FileResponse from", "values are used (only the most recent runs # are", "django.contrib import messages from django.http.response import FileResponse from django.shortcuts import", "database.study_models import Study from database.tableau_api_models import ForestTask from database.user_models import", "request, \"forest/create_tasks.html\", context=dict( study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'),", "= tracker.status if tracker.status == tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)] =", "ChunkRegistry.DoesNotExist: start_date = study.created_on.date() end_date = timezone.now().date() return render( request,", "return render_create_tasks(request, study) form.save() messages.success(request, \"Forest tasks successfully queued!\") return", "abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse( zip_generator(chunks), content_type=\"zip\", as_attachment=True,", "analysis_progress(request: ResearcherRequest, study_id=None): study: Study = Study.objects.get(pk=study_id) participants: ParticipantQuerySet =", "except ForestTask.DoesNotExist: return abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse(", "tracker.forest_tree, date)] = None # generate the date range for", "in participants: for tree in ForestTree.values(): row = [participant.patient_id, tree]", "remove this double endpoint pattern, it is bad. if request.method", "> 0: messages.success(request, \"Forest task successfully cancelled.\") else: messages.warning(request, \"Sorry,", "class CSVBuffer: line = \"\" def read(self): return self.line def", "return abort(403) try: study = Study.objects.get(pk=study_id) except Study.DoesNotExist: return abort(404)", "the chart of most recent forest results for date ranges", "easy_url from libs.internal_types import ParticipantQuerySet, ResearcherRequest from libs.streaming_zip import zip_generator", "in form.errors.items() for message in messages ] error_messages_string = \"\\n\".join(error_messages)", "ParticipantQuerySet, ResearcherRequest from libs.streaming_zip import zip_generator from libs.utils.date_utils import daterange", "by {request.session_researcher.username} on {datetime.date.today()}\", ) if number_updated > 0: messages.success(request,", "\"tree\"] + dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart # this", "ParticipantQuerySet = Participant.objects.filter(study=study_id) # generate chart of study analysis progress", "tree, and tracks the metadata params = dict() results =", "jinja safe filter and should never involve user input )", "tree, date)] for date in dates] chart.append(row) # ensure that", "= Study.objects.get(pk=study_id) participants: ParticipantQuerySet = Participant.objects.filter(study=study_id) # generate chart of", "or cancel this Forest task.\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_admin", "= True break return render( request, 'forest/analysis_progress.html', context=dict( study=study, chart_columns=[\"participant\",", "abort(403) number_updated = \\ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled", "constants.data_access_api_constants import CHUNK_FIELDS from constants.forest_constants import ForestTaskStatus, ForestTree from database.data_access_models", "tree in set([k[1] for k in params.keys()]): if len(set([m for", "0: messages.success(request, \"Forest task successfully cancelled.\") else: messages.warning(request, \"Sorry, we", "'POST']) @authenticate_admin @forest_enabled def create_tasks(request: ResearcherRequest, study_id=None): # Only a", "timezone.now()).date() # this code simultaneously builds up the chart of", "FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, ) @require_POST @authenticate_admin @forest_enabled def", "zip_generator from libs.utils.date_utils import daterange from middleware.abort_middleware import abort from", "ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date = start_date.time_bin.date() end_date = end_date.time_bin.date()", "user input ) ) @require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled def create_tasks(request:", "messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request, study) form.save() messages.success(request, \"Forest tasks successfully", "# ensure that within each tree, only a single set", "import require_GET, require_http_methods, require_POST from authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access, forest_enabled)", "if request.method == \"GET\": return render_create_tasks(request, study) form = CreateTasksForm(data=request.POST,", "runs are assumed to invalidate old runs, clearing params) params_conflict", "line = \"\" def read(self): return self.line def write(self, line):", "field, messages in form.errors.items() for message in messages ] error_messages_string", "double endpoint pattern, it is bad. if request.method == \"GET\":", "and should never involve user input ) ) @require_http_methods(['GET', 'POST'])", "require_http_methods, require_POST from authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants", "task_log(request: ResearcherRequest, study_id=None): study = Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return", "abort(404) # FIXME: remove this double endpoint pattern, it is", ") ) @require_GET @authenticate_admin def download_task_log(request: ResearcherRequest): forest_tasks = ForestTask.objects.order_by(\"created_on\")", "dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart # this uses the", "start_date = (study.get_earliest_data_time_bin() or study.created_on).date() end_date = (study.get_latest_data_time_bin() or timezone.now()).date()", "row = [participant.patient_id, tree] + \\ [results[(participant.id, tree, date)] for", ") ) @require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled def create_tasks(request: ResearcherRequest, study_id=None):", "[ f'\"{field}\": {message}' for field, messages in form.errors.items() for message", "\"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request, study) form.save() messages.success(request, \"Forest tasks", "tasks successfully queued!\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled def", "form.save() messages.success(request, \"Forest tasks successfully queued!\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET", "study = Study.objects.get(pk=study_id) except Study.DoesNotExist: return abort(404) # FIXME: remove", "study_id, forest_task_external_id): if not request.session_researcher.site_admin: return abort(403) number_updated = \\", "def analysis_progress(request: ResearcherRequest, study_id=None): study: Study = Study.objects.get(pk=study_id) participants: ParticipantQuerySet", "many=True).data, ) ) @require_GET @authenticate_admin def download_task_log(request: ResearcherRequest): forest_tasks =", "ResearcherRequest): forest_tasks = ForestTask.objects.order_by(\"created_on\") return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True,", "Study.objects.get(pk=study_id) participants: ParticipantQuerySet = Participant.objects.filter(study=study_id) # generate chart of study", "messages.success(request, \"Forest tasks successfully queued!\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_researcher_study_access", "start_date=start_date, end_date=end_date, chart=chart # this uses the jinja safe filter", "[participant.patient_id, tree] + \\ [results[(participant.id, tree, date)] for date in", "if not request.session_researcher.site_admin: return abort(403) number_updated = \\ ForestTask.objects.filter( external_id=forest_task_external_id,", "can queue forest tasks if not request.session_researcher.site_admin: return abort(403) try:", "zip_generator(chunks), content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None) return f def stream_forest_task_log_csv(forest_tasks):", "forest results for date ranges # by participant and tree,", "@authenticate_admin @forest_enabled def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id): if not request.session_researcher.site_admin:", "def task_log(request: ResearcherRequest, study_id=None): study = Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\")", "dates = list(daterange(start_date, end_date, inclusive=True)) chart = [] for participant", "messages from django.http.response import FileResponse from django.shortcuts import redirect, render", "for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)] =", "from libs.utils.date_utils import daterange from middleware.abort_middleware import abort from serializers.forest_serializers", "form = CreateTasksForm(data=request.POST, study=study) if not form.is_valid(): error_messages = [", "yield buffer.read() for forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def", "as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None) return f def stream_forest_task_log_csv(forest_tasks): buffer =", "for charting dates = list(daterange(start_date, end_date, inclusive=True)) chart = []", "redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled def task_log(request: ResearcherRequest, study_id=None): study", "@forest_enabled def create_tasks(request: ResearcherRequest, study_id=None): # Only a SITE admin", "writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def render_create_tasks(request: ResearcherRequest, study: Study): participants =", "= ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render( request, \"forest/task_log.html\", context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus,", "date ranges # by participant and tree, and tracks the", "study_id=None): study = Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render( request,", "study.created_on).date() end_date = (study.get_latest_data_time_bin() or timezone.now()).date() # this code simultaneously", "ForestTask from database.user_models import Participant from forms.django_forms import CreateTasksForm from", "in ForestTree.values(): row = [participant.patient_id, tree] + \\ [results[(participant.id, tree,", "invalidate old runs, clearing params) params_conflict = False for tree", "from collections import defaultdict from django.contrib import messages from django.http.response", "of most recent forest results for date ranges # by", "params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree, date)] =", "create_tasks(request: ResearcherRequest, study_id=None): # Only a SITE admin can queue", "in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status if", "charting dates = list(daterange(start_date, end_date, inclusive=True)) chart = [] for", "date)] = None # generate the date range for charting", "= (study.get_earliest_data_time_bin() or study.created_on).date() end_date = (study.get_latest_data_time_bin() or timezone.now()).date() #", "admin can queue forest tasks if not request.session_researcher.site_admin: return abort(403)", "= [participant.patient_id, tree] + \\ [results[(participant.id, tree, date)] for date", "django.utils import timezone from django.views.decorators.http import require_GET, require_http_methods, require_POST from", "True break return render( request, 'forest/analysis_progress.html', context=dict( study=study, chart_columns=[\"participant\", \"tree\"]", "import ParticipantQuerySet, ResearcherRequest from libs.streaming_zip import zip_generator from libs.utils.date_utils import", "except ChunkRegistry.DoesNotExist: start_date = study.created_on.date() end_date = timezone.now().date() return render(", "+ \\ [results[(participant.id, tree, date)] for date in dates] chart.append(row)", "writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read() for forest_task in", "import CHUNK_FIELDS from constants.forest_constants import ForestTaskStatus, ForestTree from database.data_access_models import", "filter and should never involve user input ) ) @require_http_methods(['GET',", "builds up the chart of most recent forest results for", "database.data_access_models import ChunkRegistry from database.study_models import Study from database.tableau_api_models import", "ForestTask for tracker in trackers: for date in daterange(tracker.data_date_start, tracker.data_date_end,", "considered, and unsuccessful runs are assumed to invalidate old runs,", "params.keys()]): if len(set([m for k, m in params.items() if m", "end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date = start_date.time_bin.date() end_date = end_date.time_bin.date() except", "), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) ) class CSVBuffer: line =", "stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer() writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield", "date in dates] chart.append(row) # ensure that within each tree,", "task.\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_admin @forest_enabled def download_task_data(request: ResearcherRequest,", "status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by {request.session_researcher.username} on {datetime.date.today()}\", ) if number_updated >", "study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled def task_log(request: ResearcherRequest, study_id=None): study =", "= Participant.objects.filter(study=study) try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date", "Study): participants = Participant.objects.filter(study=study) try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date =", "django.shortcuts import redirect, render from django.utils import timezone from django.views.decorators.http", "participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) ) class", "timezone.now().date() return render( request, \"forest/create_tasks.html\", context=dict( study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True)", "tasks if not request.session_researcher.site_admin: return abort(403) try: study = Study.objects.get(pk=study_id)", "dates] chart.append(row) # ensure that within each tree, only a", "tracker: ForestTask for tracker in trackers: for date in daterange(tracker.data_date_start,", "study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) )", "in trackers: for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree,", "= Participant.objects.filter(study=study_id) # generate chart of study analysis progress logs", "== tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree,", "endpoint pattern, it is bad. if request.method == \"GET\": return", "collections import defaultdict from django.contrib import messages from django.http.response import", "render from django.utils import timezone from django.views.decorators.http import require_GET, require_http_methods,", "libs.internal_types import ParticipantQuerySet, ResearcherRequest from libs.streaming_zip import zip_generator from libs.utils.date_utils", "start_date.time_bin.date() end_date = end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date = study.created_on.date() end_date", "param values are used (only the most recent runs #", "study_id=None): # Only a SITE admin can queue forest tasks", "import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled def analysis_progress(request: ResearcherRequest, study_id=None):", "chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse( zip_generator(chunks), content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\",", "inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status if tracker.status == tracker.status.success:", "study_id=study_id)) @require_GET @authenticate_admin @forest_enabled def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id): try:", "k[1] == tree])) > 1: params_conflict = True break return", "= tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree, date)] = None # generate", "params) params_conflict = False for tree in set([k[1] for k", "return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, ) @require_POST @authenticate_admin @forest_enabled", "buffer.read() def render_create_tasks(request: ResearcherRequest, study: Study): participants = Participant.objects.filter(study=study) try:", "chart = [] for participant in participants: for tree in", "= defaultdict(lambda: \"--\") tracker: ForestTask for tracker in trackers: for", "for k, m in params.items() if m is not None", "(study.get_latest_data_time_bin() or timezone.now()).date() # this code simultaneously builds up the", "on {datetime.date.today()}\", ) if number_updated > 0: messages.success(request, \"Forest task", "= ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date = start_date.time_bin.date() end_date = end_date.time_bin.date() except ChunkRegistry.DoesNotExist:", "= Study.objects.get(pk=study_id) except Study.DoesNotExist: return abort(404) # FIXME: remove this", "ResearcherRequest, study_id=None): # Only a SITE admin can queue forest", ") except ForestTask.DoesNotExist: return abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f =", "not request.session_researcher.site_admin: return abort(403) try: study = Study.objects.get(pk=study_id) except Study.DoesNotExist:", "is not None and k[1] == tree])) > 1: params_conflict", "= (study.get_latest_data_time_bin() or timezone.now()).date() # this code simultaneously builds up", "== \"GET\": return render_create_tasks(request, study) form = CreateTasksForm(data=request.POST, study=study) if", "CSVBuffer() writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read() for forest_task", "import FileResponse from django.shortcuts import redirect, render from django.utils import", "abort from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled def", "of param values are used (only the most recent runs", "to find or cancel this Forest task.\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id))", "from middleware.abort_middleware import abort from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET", "cancel_task(request: ResearcherRequest, study_id, forest_task_external_id): if not request.session_researcher.site_admin: return abort(403) number_updated", "chart of most recent forest results for date ranges #", "from forms.django_forms import CreateTasksForm from libs.http_utils import easy_url from libs.internal_types", "cancel this Forest task.\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_admin @forest_enabled", "daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status if tracker.status", "most recent runs # are considered, and unsuccessful runs are", "len(set([m for k, m in params.items() if m is not", "django.http.response import FileResponse from django.shortcuts import redirect, render from django.utils", "study.created_on.date() end_date = timezone.now().date() return render( request, \"forest/create_tasks.html\", context=dict( study=study,", "else: messages.warning(request, \"Sorry, we were unable to find or cancel", "import CreateTasksForm from libs.http_utils import easy_url from libs.internal_types import ParticipantQuerySet,", "tracker: ForestTask = ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id ) except ForestTask.DoesNotExist: return", "timezone from django.views.decorators.http import require_GET, require_http_methods, require_POST from authentication.admin_authentication import", ") ) class CSVBuffer: line = \"\" def read(self): return", "a SITE admin can queue forest tasks if not request.session_researcher.site_admin:", "content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, ) @require_POST @authenticate_admin @forest_enabled def cancel_task(request: ResearcherRequest,", "chart.append(row) # ensure that within each tree, only a single", "safe filter and should never involve user input ) )", "ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date = start_date.time_bin.date() end_date = end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date", "that within each tree, only a single set of param", "None and k[1] == tree])) > 1: params_conflict = True", "study=study, chart_columns=[\"participant\", \"tree\"] + dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart", "from django.http.response import FileResponse from django.shortcuts import redirect, render from", "and tree, and tracks the metadata params = dict() results", "from database.data_access_models import ChunkRegistry from database.study_models import Study from database.tableau_api_models", "not request.session_researcher.site_admin: return abort(403) number_updated = \\ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued", "[] for participant in participants: for tree in ForestTree.values(): row", "involve user input ) ) @require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled def", "[results[(participant.id, tree, date)] for date in dates] chart.append(row) # ensure", "serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled def analysis_progress(request: ResearcherRequest,", "from constants.data_access_api_constants import CHUNK_FIELDS from constants.forest_constants import ForestTaskStatus, ForestTree from", "to invalidate old runs, clearing params) params_conflict = False for", "import redirect, render from django.utils import timezone from django.views.decorators.http import", "in params.items() if m is not None and k[1] ==", "libs.utils.date_utils import daterange from middleware.abort_middleware import abort from serializers.forest_serializers import", "ensure that within each tree, only a single set of", "@require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled def create_tasks(request: ResearcherRequest, study_id=None): # Only", "Study = Study.objects.get(pk=study_id) participants: ParticipantQuerySet = Participant.objects.filter(study=study_id) # generate chart", "= \"\" def read(self): return self.line def write(self, line): self.line", "= False for tree in set([k[1] for k in params.keys()]):", "ResearcherRequest, study_id, forest_task_external_id): if not request.session_researcher.site_admin: return abort(403) number_updated =", "participant in participants: for tree in ForestTree.values(): row = [participant.patient_id,", "= end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date = study.created_on.date() end_date = timezone.now().date()", "csv import datetime from collections import defaultdict from django.contrib import", "\\ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by {request.session_researcher.username} on", "request.method == \"GET\": return render_create_tasks(request, study) form = CreateTasksForm(data=request.POST, study=study)", "ResearcherRequest, study_id=None): study = Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render(", "form.is_valid(): error_messages = [ f'\"{field}\": {message}' for field, messages in", "forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def render_create_tasks(request: ResearcherRequest, study: Study): participants", "import easy_url from libs.internal_types import ParticipantQuerySet, ResearcherRequest from libs.streaming_zip import", "each tree, only a single set of param values are", "= CSVBuffer() writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read() for", "end_date=end_date.strftime('%Y-%m-%d') ) ) class CSVBuffer: line = \"\" def read(self):", "yield buffer.read() def render_create_tasks(request: ResearcherRequest, study: Study): participants = Participant.objects.filter(study=study)", "ResearcherRequest from libs.streaming_zip import zip_generator from libs.utils.date_utils import daterange from", "def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id): if not request.session_researcher.site_admin: return abort(403)", "return render( request, 'forest/analysis_progress.html', context=dict( study=study, chart_columns=[\"participant\", \"tree\"] + dates,", "ForestTree.values(): row = [participant.patient_id, tree] + \\ [results[(participant.id, tree, date)]", "context=dict( study=study, chart_columns=[\"participant\", \"tree\"] + dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date,", "ForestTree from database.data_access_models import ChunkRegistry from database.study_models import Study from", "ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse( zip_generator(chunks), content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None)", "study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) ) @require_GET @authenticate_admin def", "params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart # this uses the jinja safe", "clearing params) params_conflict = False for tree in set([k[1] for", "forms.django_forms import CreateTasksForm from libs.http_utils import easy_url from libs.internal_types import", "import messages from django.http.response import FileResponse from django.shortcuts import redirect,", "constants.forest_constants import ForestTaskStatus, ForestTree from database.data_access_models import ChunkRegistry from database.study_models", "list(daterange(start_date, end_date, inclusive=True)) chart = [] for participant in participants:", "context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) ) @require_GET @authenticate_admin", "ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render( request, \"forest/task_log.html\", context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks,", "study=study) if not form.is_valid(): error_messages = [ f'\"{field}\": {message}' for", "single set of param values are used (only the most", "for tracker in trackers: for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True):", "progress logs trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date = (study.get_earliest_data_time_bin() or study.created_on).date()", "as_attachment=True, ) @require_POST @authenticate_admin @forest_enabled def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id):", "date)] for date in dates] chart.append(row) # ensure that within", "if tracker.status == tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id else:", "find or cancel this Forest task.\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET", ") @require_GET @authenticate_admin def download_task_log(request: ResearcherRequest): forest_tasks = ForestTask.objects.order_by(\"created_on\") return", "try: tracker: ForestTask = ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id ) except ForestTask.DoesNotExist:", "range for charting dates = list(daterange(start_date, end_date, inclusive=True)) chart =", "request, \"forest/task_log.html\", context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) )", "@forest_enabled def analysis_progress(request: ResearcherRequest, study_id=None): study: Study = Study.objects.get(pk=study_id) participants:", "from django.views.decorators.http import require_GET, require_http_methods, require_POST from authentication.admin_authentication import (authenticate_admin,", "(authenticate_admin, authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants import CHUNK_FIELDS from constants.forest_constants import", "chart of study analysis progress logs trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date", "stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, ) @require_POST @authenticate_admin @forest_enabled def cancel_task(request:", "@forest_enabled def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id): try: tracker: ForestTask =", "if len(set([m for k, m in params.items() if m is", "middleware.abort_middleware import abort from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access", "external_id=forest_task_external_id, participant__study_id=study_id ) except ForestTask.DoesNotExist: return abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS)", "k, m in params.items() if m is not None and", "forest_enabled) from constants.data_access_api_constants import CHUNK_FIELDS from constants.forest_constants import ForestTaskStatus, ForestTree", "pattern, it is bad. if request.method == \"GET\": return render_create_tasks(request,", "FileResponse( zip_generator(chunks), content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None) return f def", "used (only the most recent runs # are considered, and", "study) form = CreateTasksForm(data=request.POST, study=study) if not form.is_valid(): error_messages =", "= ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date = (study.get_earliest_data_time_bin() or study.created_on).date() end_date = (study.get_latest_data_time_bin()", "the most recent runs # are considered, and unsuccessful runs", "are assumed to invalidate old runs, clearing params) params_conflict =", "input ) ) @require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled def create_tasks(request: ResearcherRequest,", "FileResponse from django.shortcuts import redirect, render from django.utils import timezone", "participant__study_id=study_id ) except ForestTask.DoesNotExist: return abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f", "f = FileResponse( zip_generator(chunks), content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None) return", "in dates] chart.append(row) # ensure that within each tree, only", "trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) ) class CSVBuffer: line = \"\"", "@authenticate_admin def download_task_log(request: ResearcherRequest): forest_tasks = ForestTask.objects.order_by(\"created_on\") return FileResponse( stream_forest_task_log_csv(forest_tasks),", "= [] for participant in participants: for tree in ForestTree.values():", "start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date = start_date.time_bin.date() end_date", "import ForestTaskStatus, ForestTree from database.data_access_models import ChunkRegistry from database.study_models import", "\"forest/create_tasks.html\", context=dict( study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d')", "this double endpoint pattern, it is bad. if request.method ==", "chart=chart # this uses the jinja safe filter and should", "Study.objects.get(pk=study_id) except Study.DoesNotExist: return abort(404) # FIXME: remove this double", "only a single set of param values are used (only", "render( request, \"forest/create_tasks.html\", context=dict( study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ), trees=ForestTree.choices(),", "= \\ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by {request.session_researcher.username}", "\\ [results[(participant.id, tree, date)] for date in dates] chart.append(row) #", "import Participant from forms.django_forms import CreateTasksForm from libs.http_utils import easy_url", "params = dict() results = defaultdict(lambda: \"--\") tracker: ForestTask for", "tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree, date)]", "of study analysis progress logs trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date =", "chart_columns=[\"participant\", \"tree\"] + dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart #", "results for date ranges # by participant and tree, and", "messages.success(request, \"Forest task successfully cancelled.\") else: messages.warning(request, \"Sorry, we were", "= ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date = start_date.time_bin.date() end_date =", "ResearcherRequest, study_id=None): study: Study = Study.objects.get(pk=study_id) participants: ParticipantQuerySet = Participant.objects.filter(study=study_id)", "cancelled.\") else: messages.warning(request, \"Sorry, we were unable to find or", "# this code simultaneously builds up the chart of most", "is bad. if request.method == \"GET\": return render_create_tasks(request, study) form", "download_task_data(request: ResearcherRequest, study_id, forest_task_external_id): try: tracker: ForestTask = ForestTask.objects.get( external_id=forest_task_external_id,", "@forest_enabled def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id): if not request.session_researcher.site_admin: return", "def render_create_tasks(request: ResearcherRequest, study: Study): participants = Participant.objects.filter(study=study) try: start_date", "end_date = (study.get_latest_data_time_bin() or timezone.now()).date() # this code simultaneously builds", "tree in ForestTree.values(): row = [participant.patient_id, tree] + \\ [results[(participant.id,", "tracker.status == tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id else: params[(tracker.participant_id,", "libs.http_utils import easy_url from libs.internal_types import ParticipantQuerySet, ResearcherRequest from libs.streaming_zip", "buffer = CSVBuffer() writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read()", "return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled def task_log(request: ResearcherRequest, study_id=None):", "generate the date range for charting dates = list(daterange(start_date, end_date,", "for date ranges # by participant and tree, and tracks", "forest tasks if not request.session_researcher.site_admin: return abort(403) try: study =", "from django.shortcuts import redirect, render from django.utils import timezone from", "try: study = Study.objects.get(pk=study_id) except Study.DoesNotExist: return abort(404) # FIXME:", "@authenticate_researcher_study_access @forest_enabled def task_log(request: ResearcherRequest, study_id=None): study = Study.objects.get(pk=study_id) forest_tasks", "status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by {request.session_researcher.username} on {datetime.date.today()}\", ) if", "= FileResponse( zip_generator(chunks), content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None) return f", "\"Sorry, we were unable to find or cancel this Forest", "+ dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart # this uses", "CreateTasksForm from libs.http_utils import easy_url from libs.internal_types import ParticipantQuerySet, ResearcherRequest", "unable to find or cancel this Forest task.\") return redirect(easy_url(\"forest_pages.task_log\",", "def stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer() writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader()", "{datetime.date.today()}\", ) if number_updated > 0: messages.success(request, \"Forest task successfully", "import Study from database.tableau_api_models import ForestTask from database.user_models import Participant", "messages.warning(request, \"Sorry, we were unable to find or cancel this", "from libs.internal_types import ParticipantQuerySet, ResearcherRequest from libs.streaming_zip import zip_generator from", "k in params.keys()]): if len(set([m for k, m in params.items()", "ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id ) except ForestTask.DoesNotExist: return abort(404) chunks =", "params_conflict = True break return render( request, 'forest/analysis_progress.html', context=dict( study=study,", "tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree, date)] = None # generate the", "ForestTask.DoesNotExist: return abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse( zip_generator(chunks),", "@require_GET @authenticate_researcher_study_access @forest_enabled def task_log(request: ResearcherRequest, study_id=None): study = Study.objects.get(pk=study_id)", "never involve user input ) ) @require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled", "study_id=None): study: Study = Study.objects.get(pk=study_id) participants: ParticipantQuerySet = Participant.objects.filter(study=study_id) #", "import daterange from middleware.abort_middleware import abort from serializers.forest_serializers import ForestTaskCsvSerializer,", "number_updated = \\ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by", "start_date = study.created_on.date() end_date = timezone.now().date() return render( request, \"forest/create_tasks.html\",", "SITE admin can queue forest tasks if not request.session_researcher.site_admin: return", "logs trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date = (study.get_earliest_data_time_bin() or study.created_on).date() end_date", "recent forest results for date ranges # by participant and", "= dict() results = defaultdict(lambda: \"--\") tracker: ForestTask for tracker", "import csv import datetime from collections import defaultdict from django.contrib", "the jinja safe filter and should never involve user input", "context=dict( study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') )", "are considered, and unsuccessful runs are assumed to invalidate old", "= Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render( request, \"forest/task_log.html\", context=dict(", "study: Study): participants = Participant.objects.filter(study=study) try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date", "study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) ) class CSVBuffer:", "tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status if tracker.status ==", "and tracks the metadata params = dict() results = defaultdict(lambda:", "Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render( request, \"forest/task_log.html\", context=dict( study=study,", "external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by {request.session_researcher.username} on {datetime.date.today()}\", )", "params[(tracker.participant_id, tracker.forest_tree, date)] = None # generate the date range", "\"\" def read(self): return self.line def write(self, line): self.line =", "import ForestTask from database.user_models import Participant from forms.django_forms import CreateTasksForm", "for k in params.keys()]): if len(set([m for k, m in", "return abort(403) number_updated = \\ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled,", "metadata params = dict() results = defaultdict(lambda: \"--\") tracker: ForestTask", "old runs, clearing params) params_conflict = False for tree in", "in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def render_create_tasks(request: ResearcherRequest, study: Study):", "Participant.objects.filter(study=study_id) # generate chart of study analysis progress logs trackers", "runs # are considered, and unsuccessful runs are assumed to", "ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f\"Canceled by {request.session_researcher.username} on {datetime.date.today()}\",", "require_POST from authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants import", "start_date = start_date.time_bin.date() end_date = end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date =", "unsuccessful runs are assumed to invalidate old runs, clearing params)", "date)] = tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree, date)] = None #", "for tree in ForestTree.values(): row = [participant.patient_id, tree] + \\", "break return render( request, 'forest/analysis_progress.html', context=dict( study=study, chart_columns=[\"participant\", \"tree\"] +", "a single set of param values are used (only the", "from database.user_models import Participant from forms.django_forms import CreateTasksForm from libs.http_utils", "status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) ) @require_GET @authenticate_admin def download_task_log(request: ResearcherRequest):", "def download_task_log(request: ResearcherRequest): forest_tasks = ForestTask.objects.order_by(\"created_on\") return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\",", "status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart # this uses the jinja", "end_date = end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date = study.created_on.date() end_date =", "runs, clearing params) params_conflict = False for tree in set([k[1]", "writer.writeheader() yield buffer.read() for forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read()", "@forest_enabled def task_log(request: ResearcherRequest, study_id=None): study = Study.objects.get(pk=study_id) forest_tasks =", "(study.get_earliest_data_time_bin() or study.created_on).date() end_date = (study.get_latest_data_time_bin() or timezone.now()).date() # this", "were unable to find or cancel this Forest task.\") return", "return abort(404) # FIXME: remove this double endpoint pattern, it", "and k[1] == tree])) > 1: params_conflict = True break", "error_messages_string = \"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request, study) form.save() messages.success(request,", "ChunkRegistry from database.study_models import Study from database.tableau_api_models import ForestTask from", "not None and k[1] == tree])) > 1: params_conflict =", "should never involve user input ) ) @require_http_methods(['GET', 'POST']) @authenticate_admin", "inclusive=True)) chart = [] for participant in participants: for tree", "study_id, forest_task_external_id): try: tracker: ForestTask = ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id )", "from django.contrib import messages from django.http.response import FileResponse from django.shortcuts", "dict() results = defaultdict(lambda: \"--\") tracker: ForestTask for tracker in", "{request.session_researcher.username} on {datetime.date.today()}\", ) if number_updated > 0: messages.success(request, \"Forest", "@authenticate_admin @forest_enabled def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id): try: tracker: ForestTask", "uses the jinja safe filter and should never involve user", "if m is not None and k[1] == tree])) >", "if not form.is_valid(): error_messages = [ f'\"{field}\": {message}' for field,", "task successfully cancelled.\") else: messages.warning(request, \"Sorry, we were unable to", "messages in form.errors.items() for message in messages ] error_messages_string =", "= study.created_on.date() end_date = timezone.now().date() return render( request, \"forest/create_tasks.html\", context=dict(", "filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, ) @require_POST @authenticate_admin @forest_enabled def cancel_task(request: ResearcherRequest, study_id,", "in params.keys()]): if len(set([m for k, m in params.items() if", "Study from database.tableau_api_models import ForestTask from database.user_models import Participant from", "request.session_researcher.site_admin: return abort(403) try: study = Study.objects.get(pk=study_id) except Study.DoesNotExist: return", "return render_create_tasks(request, study) form = CreateTasksForm(data=request.POST, study=study) if not form.is_valid():", "= [ f'\"{field}\": {message}' for field, messages in form.errors.items() for", "end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date = study.created_on.date() end_date = timezone.now().date() return", "messages ] error_messages_string = \"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request, study)", "tracker in trackers: for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id,", "successfully queued!\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled def task_log(request:", "ForestTask.objects.order_by(\"created_on\") return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, ) @require_POST @authenticate_admin", ") @require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled def create_tasks(request: ResearcherRequest, study_id=None): #", "from database.tableau_api_models import ForestTask from database.user_models import Participant from forms.django_forms", "database.user_models import Participant from forms.django_forms import CreateTasksForm from libs.http_utils import", "most recent forest results for date ranges # by participant", "ranges # by participant and tree, and tracks the metadata", "render_create_tasks(request, study) form = CreateTasksForm(data=request.POST, study=study) if not form.is_valid(): error_messages", "1: params_conflict = True break return render( request, 'forest/analysis_progress.html', context=dict(", "f\"Errors:\\n\\n{error_messages_string}\") return render_create_tasks(request, study) form.save() messages.success(request, \"Forest tasks successfully queued!\")", "forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def render_create_tasks(request: ResearcherRequest, study:", "@authenticate_researcher_study_access @forest_enabled def analysis_progress(request: ResearcherRequest, study_id=None): study: Study = Study.objects.get(pk=study_id)", "tracker.status if tracker.status == tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id", "return render( request, \"forest/create_tasks.html\", context=dict( study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\", flat=True) ),", "ForestTask = ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id ) except ForestTask.DoesNotExist: return abort(404)", "code simultaneously builds up the chart of most recent forest", "= start_date.time_bin.date() end_date = end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date = study.created_on.date()", "\"Forest task successfully cancelled.\") else: messages.warning(request, \"Sorry, we were unable", "Study.DoesNotExist: return abort(404) # FIXME: remove this double endpoint pattern,", "analysis progress logs trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date = (study.get_earliest_data_time_bin() or", "authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants import CHUNK_FIELDS from", "import zip_generator from libs.utils.date_utils import daterange from middleware.abort_middleware import abort", "request.session_researcher.site_admin: return abort(403) number_updated = \\ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update(", "tracker.forest_tree, date)] = tracker.status if tracker.status == tracker.status.success: params[(tracker.participant_id, tracker.forest_tree,", "= ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse( zip_generator(chunks), content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", )", "ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date = (study.get_earliest_data_time_bin() or study.created_on).date() end_date = (study.get_latest_data_time_bin() or", "import ChunkRegistry from database.study_models import Study from database.tableau_api_models import ForestTask", "for forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def render_create_tasks(request: ResearcherRequest,", "forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render( request, \"forest/task_log.html\", context=dict( study=study, is_site_admin=request.session_researcher.site_admin,", "content_type=\"zip\", as_attachment=True, filename=f\"{tracker.get_slug()}.zip\", ) f.set_headers(None) return f def stream_forest_task_log_csv(forest_tasks): buffer", "study = Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by(\"-created_on\") return render( request, \"forest/task_log.html\",", "datetime from collections import defaultdict from django.contrib import messages from", "= list(daterange(start_date, end_date, inclusive=True)) chart = [] for participant in", "date range for charting dates = list(daterange(start_date, end_date, inclusive=True)) chart", "ResearcherRequest, study_id, forest_task_external_id): try: tracker: ForestTask = ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id", "f.set_headers(None) return f def stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer() writer =", "participants: for tree in ForestTree.values(): row = [participant.patient_id, tree] +", "if number_updated > 0: messages.success(request, \"Forest task successfully cancelled.\") else:", "abort(403) try: study = Study.objects.get(pk=study_id) except Study.DoesNotExist: return abort(404) #", "Only a SITE admin can queue forest tasks if not", "= None # generate the date range for charting dates", "or timezone.now()).date() # this code simultaneously builds up the chart", "m in params.items() if m is not None and k[1]", "form.errors.items() for message in messages ] error_messages_string = \"\\n\".join(error_messages) messages.warning(request,", "render( request, \"forest/task_log.html\", context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, )", "trackers = ForestTask.objects.filter(participant__in=participants).order_by(\"created_on\") start_date = (study.get_earliest_data_time_bin() or study.created_on).date() end_date =", "the date range for charting dates = list(daterange(start_date, end_date, inclusive=True))", "participants = Participant.objects.filter(study=study) try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\")", "participant and tree, and tracks the metadata params = dict()", "start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) ) class CSVBuffer: line = \"\" def", "m is not None and k[1] == tree])) > 1:", "assumed to invalidate old runs, clearing params) params_conflict = False", "# Only a SITE admin can queue forest tasks if", "= ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id ) except ForestTask.DoesNotExist: return abort(404) chunks", "in set([k[1] for k in params.keys()]): if len(set([m for k,", ") class CSVBuffer: line = \"\" def read(self): return self.line", "forest_tasks = ForestTask.objects.order_by(\"created_on\") return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\", as_attachment=True, )", "redirect, render from django.utils import timezone from django.views.decorators.http import require_GET,", "we were unable to find or cancel this Forest task.\")", "def read(self): return self.line def write(self, line): self.line = line", "up the chart of most recent forest results for date", "> 1: params_conflict = True break return render( request, 'forest/analysis_progress.html',", "end_date, inclusive=True)) chart = [] for participant in participants: for", "end_date=end_date, chart=chart # this uses the jinja safe filter and", "# FIXME: remove this double endpoint pattern, it is bad.", "render_create_tasks(request: ResearcherRequest, study: Study): participants = Participant.objects.filter(study=study) try: start_date =", "this Forest task.\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_admin @forest_enabled def", "@require_GET @authenticate_admin def download_task_log(request: ResearcherRequest): forest_tasks = ForestTask.objects.order_by(\"created_on\") return FileResponse(", "message in messages ] error_messages_string = \"\\n\".join(error_messages) messages.warning(request, f\"Errors:\\n\\n{error_messages_string}\") return", "None # generate the date range for charting dates =", "\"Forest tasks successfully queued!\") return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled", "return render( request, \"forest/task_log.html\", context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data,", "Participant from forms.django_forms import CreateTasksForm from libs.http_utils import easy_url from", ") @require_POST @authenticate_admin @forest_enabled def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id): if", "False for tree in set([k[1] for k in params.keys()]): if", "from constants.forest_constants import ForestTaskStatus, ForestTree from database.data_access_models import ChunkRegistry from", "render( request, 'forest/analysis_progress.html', context=dict( study=study, chart_columns=[\"participant\", \"tree\"] + dates, status_choices=ForestTaskStatus,", "this code simultaneously builds up the chart of most recent", "import timezone from django.views.decorators.http import require_GET, require_http_methods, require_POST from authentication.admin_authentication", "participants: ParticipantQuerySet = Participant.objects.filter(study=study_id) # generate chart of study analysis", "return redirect(easy_url(\"forest_pages.task_log\", study_id=study_id)) @require_GET @authenticate_admin @forest_enabled def download_task_data(request: ResearcherRequest, study_id,", "set of param values are used (only the most recent", "try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest(\"time_bin\") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest(\"time_bin\") start_date = start_date.time_bin.date()", "@require_GET @authenticate_researcher_study_access @forest_enabled def analysis_progress(request: ResearcherRequest, study_id=None): study: Study =", "from django.utils import timezone from django.views.decorators.http import require_GET, require_http_methods, require_POST", "or study.created_on).date() end_date = (study.get_latest_data_time_bin() or timezone.now()).date() # this code", "from database.study_models import Study from database.tableau_api_models import ForestTask from database.user_models", "bad. if request.method == \"GET\": return render_create_tasks(request, study) form =", "# generate the date range for charting dates = list(daterange(start_date,", "== tree])) > 1: params_conflict = True break return render(", "download_task_log(request: ResearcherRequest): forest_tasks = ForestTask.objects.order_by(\"created_on\") return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type=\"text/csv\", filename=f\"forest_task_log_{timezone.now().isoformat()}.csv\",", "= timezone.now().date() return render( request, \"forest/create_tasks.html\", context=dict( study=study, participants=list( study.participants.order_by(\"patient_id\").values_list(\"patient_id\",", "(only the most recent runs # are considered, and unsuccessful", "'forest/analysis_progress.html', context=dict( study=study, chart_columns=[\"participant\", \"tree\"] + dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date,", "for field, messages in form.errors.items() for message in messages ]", "from authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants import CHUNK_FIELDS" ]
[ "op in \"+-*/^%\": op2 = self.evaluate_stack(s) op1 = self.evaluate_stack(s) return", "places. ''' class Custom(Transformation): \"\"\" Most of this code comes", "Forward() atom = ((Optional(oneOf(\"- +\")) + (ident + lpar +", "\"tan\": math.tan, \"tanh\": math.tanh, \"exp\": math.exp, \"sqrt\": math.sqrt, \"radians\": math.radians,", "row, index: int): \"\"\"This class is called on each row.", "None): \"\"\" Initialize the transformation with the given parameters. expop", "{\"name\": \"Output column\", \"type\": \"string\", \"input\": \"text\", \"required\": True, \"help\":", "if x == 0 else x / abs(x), \"log\": math.log10,", "multop factor ]* expr :: term [ addop term ]*", "atom [ expop factor ]* term :: factor [ multop", "term = factor + \\ ZeroOrMore((multop + factor).setParseAction(self.push_first)) expr <<", "else: return float(op) def eval(self, num_string, parse_all=True): self.expr_stack = []", ":: factor [ multop factor ]* expr :: term [", "self.bnf = expr # map operator symbols to corresponding arithmetic", "self.evaluate_stack(s) return self.opn[op](op1, op2) elif op == \"PI\": return math.pi", "= Literal(\"(\").suppress() rpar = Literal(\")\").suppress() addop = plus | minus", "nums))) ident = Word(alphas, alphas + nums + \"_$\") plus", "including the extra output column \"\"\" row[self.output] = self.eval(re.sub(r'{(\\w+)}', lambda", "instead of # \"atom [ ^ atom ]...\", we get", "lambda a: abs(a) > epsilon and cmp(a, 0) or 0}", "= arguments[\"equation\"] self.output = arguments[\"output\"] point = Literal(\".\") e =", "math.degrees, \"sign\": lambda x: 0 if x == 0 else", "a: int(a), \"round\": round, \"floor\": math.floor, \"ceil\": math.ceil, \"sgn\": lambda", "fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"- +\")) + Group(lpar + expr + rpar)", "def evaluate_stack(self, s): op = s.pop() if op == 'unary", "| '/' addop :: '+' | '-' integer :: ['+'", "factor [ multop factor ]* expr :: term [ addop", "== \"E\": return math.e # 2.718281828 elif op in self.fn:", "The row, including the extra output column \"\"\" row[self.output] =", "values should be entered as {COLUMN NAME}\", \"required\": True, \"input\":", "math.exp, \"sqrt\": math.sqrt, \"radians\": math.radians, \"degrees\": math.degrees, \"sign\": lambda x:", "+\")) + (ident + lpar + expr + rpar |", "+ factor).setParseAction(self.push_first)) term = factor + \\ ZeroOrMore((multop + factor).setParseAction(self.push_first))", "Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf) import math", "0 else x / abs(x), \"log\": math.log10, \"ln\": math.log, \"abs\":", "[ expop factor ]* term :: factor [ multop factor", "from pyparsing import (Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore,", "+ Optional(e + Word(\"+-\" + nums, nums))) ident = Word(alphas,", "factor).setParseAction(self.push_first)) expr << term + \\ ZeroOrMore((addop + term).setParseAction(self.push_first)) #", "\"degrees\": math.degrees, \"sign\": lambda x: 0 if x == 0", "left-to-right # that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor", "column\", \"type\": \"string\", \"input\": \"text\", \"required\": True, \"help\": \"The name", "the transformation with the given parameters. expop :: '^' multop", "is rewrap Paul McGuire's fourFn.py as a class, so I", "self.equation = arguments[\"equation\"] self.output = arguments[\"output\"] point = Literal(\".\") e", "def push_u_minus(self, strg, loc, toks): if toks and toks[0] ==", "alphas, oneOf) import math import re import operator __author__ =", "2009-03-20 $' __source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__ = '''", "\"^\": operator.pow} self.expr_stack = None self.fn = {\"sin\": math.sin, \"sinh\":", "of this code comes from the fourFn.py pyparsing example \"\"\"", "toks[0] == '-': self.expr_stack.append('unary -') def evaluate_stack(self, s): op =", "''' __note__ = ''' All I've done is rewrap Paul", "+ rpar) ).setParseAction(self.push_u_minus) # by defining exponentiation as \"atom [", "Most of this code comes from the fourFn.py pyparsing example", "+ (ident + lpar + expr + rpar | pi", "Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf) import", "abs, \"trunc\": lambda a: int(a), \"round\": round, \"floor\": math.floor, \"ceil\":", "\"tanh\": math.tanh, \"exp\": math.exp, \"sqrt\": math.sqrt, \"radians\": math.radians, \"degrees\": math.degrees,", "arguments: dict, sample_size: int, example: dict = None): \"\"\" Initialize", "self.evaluate_stack(s) op1 = self.evaluate_stack(s) return self.opn[op](op1, op2) elif op ==", "# 2.718281828 elif op in self.fn: return self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha():", "\"sign\": lambda x: 0 if x == 0 else x", "column that contains the results\", \"default\": \"\"}, } def __init__(self,", "\"output\": {\"name\": \"Output column\", \"type\": \"string\", \"input\": \"text\", \"required\": True,", "\"cosh\": math.cosh, \"tan\": math.tan, \"tanh\": math.tanh, \"exp\": math.exp, \"sqrt\": math.sqrt,", "loc, toks): if toks and toks[0] == '-': self.expr_stack.append('unary -')", "$' __source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__ = ''' All", "return math.pi # 3.1415926535 elif op == \"E\": return math.e", "x == 0 else x / abs(x), \"log\": math.log10, \"ln\":", "factor ]* expr :: term [ addop term ]* Arguments:", "term ]* Arguments: arguments {dict} -- The arguments \"\"\" super().__init__(arguments,", "so I can use it more easily in other places.", "as {COLUMN NAME}\", \"required\": True, \"input\": \"text\", \"default\": \"\"}, \"output\":", "get right-to-left exponents, instead of left-to-right # that is, 2^3^2", "( addop + term ).setParseAction( self.push_first ) # general_term =", "int): \"\"\"This class is called on each row. Arguments: row", "title = \"Custom equation\" key = \"Math equation\" fields =", "| div | mod expop = Literal(\"^\") pi = CaselessLiteral(\"PI\")", "dict = None): \"\"\" Initialize the transformation with the given", "atom + \\ ZeroOrMore((expop + factor).setParseAction(self.push_first)) term = factor +", "'''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__ = ''' All I've done is", "ZeroOrMore, Forward, nums, alphas, oneOf) import math import re import", "= '<NAME>' __version__ = '$Revision: 0.0 $' __date__ = '$Date:", "0} def push_first(self, strg, loc, toks): self.expr_stack.append(toks[0]) def push_u_minus(self, strg,", "parse_all=True): self.expr_stack = [] results = self.bnf.parseString(num_string, parse_all) val =", "+ Word(\"+-\" + nums, nums))) ident = Word(alphas, alphas +", "\"_$\") plus = Literal(\"+\") minus = Literal(\"-\") mult = Literal(\"*\")", "[ ^ factor ]...\" instead of # \"atom [ ^", "Returns: dict -- The row, including the extra output column", "Literal(\"*\") div = Literal(\"/\") mod = Literal(\"%\") lpar = Literal(\"(\").suppress()", "Forward, nums, alphas, oneOf) import math import re import operator", "= None): \"\"\" Initialize the transformation with the given parameters.", "elif op in self.fn: return self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha(): return 0", "re import operator __author__ = '<NAME>' __version__ = '$Revision: 0.0", "not (2^3)^2. factor = Forward() factor << atom + \\", "# general_term = term + ZeroOrMore( addop_term ) | OneOrMore(", "Literal(\"%\") lpar = Literal(\"(\").suppress() rpar = Literal(\")\").suppress() addop = plus", "or 0} def push_first(self, strg, loc, toks): self.expr_stack.append(toks[0]) def push_u_minus(self,", "operator __author__ = '<NAME>' __version__ = '$Revision: 0.0 $' __date__", "return 0 else: return float(op) def eval(self, num_string, parse_all=True): self.expr_stack", "-': return -self.evaluate_stack(s) if op in \"+-*/^%\": op2 = self.evaluate_stack(s)", "'-': self.expr_stack.append('unary -') def evaluate_stack(self, s): op = s.pop() if", "= Literal(\"+\") minus = Literal(\"-\") mult = Literal(\"*\") div =", "epsilon = 1e-12 self.opn = {\"+\": operator.add, \"-\": operator.sub, \"*\":", "example) self.equation = arguments[\"equation\"] self.output = arguments[\"output\"] point = Literal(\".\")", "op1 = self.evaluate_stack(s) return self.opn[op](op1, op2) elif op == \"PI\":", "row Returns: dict -- The row, including the extra output", "'0'..'9'+ atom :: PI | E | real | fn", "row, including the extra output column \"\"\" row[self.output] = self.eval(re.sub(r'{(\\w+)}',", "\"\"\"This class is called on each row. Arguments: row {dict}", "\"input\": \"text\", \"default\": \"\"}, \"output\": {\"name\": \"Output column\", \"type\": \"string\",", "= self.evaluate_stack(s) return self.opn[op](op1, op2) elif op == \"PI\": return", "= Literal(\".\") e = CaselessLiteral(\"E\") fnumber = Combine(Word(\"+-\" + nums,", "\"abs\": abs, \"trunc\": lambda a: int(a), \"round\": round, \"floor\": math.floor,", "| OneOrMore( addop_term) # expr << general_term self.bnf = expr", "2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward() factor <<", "= [] results = self.bnf.parseString(num_string, parse_all) val = self.evaluate_stack(self.expr_stack[:]) return", "and toks[0] == '-': self.expr_stack.append('unary -') def evaluate_stack(self, s): op", "expr + rpar | pi | e | fnumber).setParseAction(self.push_first)) |", "op2) elif op == \"PI\": return math.pi # 3.1415926535 elif", "the (newly created) column that contains the results\", \"default\": \"\"},", "The complete row Returns: dict -- The row, including the", "\\ ZeroOrMore((expop + factor).setParseAction(self.push_first)) term = factor + \\ ZeroOrMore((multop", "True, \"input\": \"text\", \"default\": \"\"}, \"output\": {\"name\": \"Output column\", \"type\":", "complete row Returns: dict -- The row, including the extra", "| real | fn '(' expr ')' | '(' expr", "-- The arguments \"\"\" super().__init__(arguments, sample_size, example) self.equation = arguments[\"equation\"]", "<< term + \\ ZeroOrMore((addop + term).setParseAction(self.push_first)) # addop_term =", "expr :: term [ addop term ]* Arguments: arguments {dict}", "# map operator symbols to corresponding arithmetic operations epsilon =", "loc, toks): self.expr_stack.append(toks[0]) def push_u_minus(self, strg, loc, toks): if toks", "row {dict} -- The complete row Returns: dict -- The", "self.fn: return self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha(): return 0 else: return float(op)", "defining exponentiation as \"atom [ ^ factor ]...\" instead of", "expr ')' factor :: atom [ expop factor ]* term", "0.0 $' __date__ = '$Date: 2009-03-20 $' __source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py", "+ nums, nums))) ident = Word(alphas, alphas + nums +", "\"%\": operator.mod, \"/\": operator.truediv, \"^\": operator.pow} self.expr_stack = None self.fn", "called on each row. Arguments: row {dict} -- The complete", "equation to evaluate. Column values should be entered as {COLUMN", "= arguments[\"output\"] point = Literal(\".\") e = CaselessLiteral(\"E\") fnumber =", "op[0].isalpha(): return 0 else: return float(op) def eval(self, num_string, parse_all=True):", "as a class, so I can use it more easily", "\"required\": True, \"help\": \"The name of the (newly created) column", "# that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor =", "easily in other places. ''' class Custom(Transformation): \"\"\" Most of", "in self.fn: return self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha(): return 0 else: return", "Word(alphas, alphas + nums + \"_$\") plus = Literal(\"+\") minus", "Optional(point + Optional(Word(nums))) + Optional(e + Word(\"+-\" + nums, nums)))", "| fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"- +\")) + Group(lpar + expr +", "factor ]...\" instead of # \"atom [ ^ atom ]...\",", "term :: factor [ multop factor ]* expr :: term", "it more easily in other places. ''' class Custom(Transformation): \"\"\"", "Group(lpar + expr + rpar) ).setParseAction(self.push_u_minus) # by defining exponentiation", "\"type\": \"string\", \"input\": \"text\", \"required\": True, \"help\": \"The name of", "\\ ZeroOrMore((multop + factor).setParseAction(self.push_first)) expr << term + \\ ZeroOrMore((addop", "= \"Math equation\" fields = { \"equation\": {\"name\": \"Equation\", \"type\":", "(newly created) column that contains the results\", \"default\": \"\"}, }", "Word(\"+-\" + nums, nums))) ident = Word(alphas, alphas + nums", "math.cosh, \"tan\": math.tan, \"tanh\": math.tanh, \"exp\": math.exp, \"sqrt\": math.sqrt, \"radians\":", "math import re import operator __author__ = '<NAME>' __version__ =", "Literal(\"+\") minus = Literal(\"-\") mult = Literal(\"*\") div = Literal(\"/\")", "arguments {dict} -- The arguments \"\"\" super().__init__(arguments, sample_size, example) self.equation", "plus = Literal(\"+\") minus = Literal(\"-\") mult = Literal(\"*\") div", "{dict} -- The complete row Returns: dict -- The row,", "integer :: ['+' | '-'] '0'..'9'+ atom :: PI |", "\\ ZeroOrMore((addop + term).setParseAction(self.push_first)) # addop_term = ( addop +", "[] results = self.bnf.parseString(num_string, parse_all) val = self.evaluate_stack(self.expr_stack[:]) return val", "ident = Word(alphas, alphas + nums + \"_$\") plus =", "operator.mod, \"/\": operator.truediv, \"^\": operator.pow} self.expr_stack = None self.fn =", "import operator __author__ = '<NAME>' __version__ = '$Revision: 0.0 $'", "__init__(self, arguments: dict, sample_size: int, example: dict = None): \"\"\"", "math.sin, \"sinh\": math.sinh, \"cos\": math.cos, \"cosh\": math.cosh, \"tan\": math.tan, \"tanh\":", "op == \"E\": return math.e # 2.718281828 elif op in", "key = \"Math equation\" fields = { \"equation\": {\"name\": \"Equation\",", "import math import re import operator __author__ = '<NAME>' __version__", "parameters. expop :: '^' multop :: '*' | '/' addop", "corresponding arithmetic operations epsilon = 1e-12 self.opn = {\"+\": operator.add,", "rpar | pi | e | fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"- +\"))", "__version__ = '$Revision: 0.0 $' __date__ = '$Date: 2009-03-20 $'", "\"input\": \"text\", \"required\": True, \"help\": \"The name of the (newly", "The arguments \"\"\" super().__init__(arguments, sample_size, example) self.equation = arguments[\"equation\"] self.output", "nums, nums))) ident = Word(alphas, alphas + nums + \"_$\")", "]...\" instead of # \"atom [ ^ atom ]...\", we", "factor :: atom [ expop factor ]* term :: factor", "> epsilon and cmp(a, 0) or 0} def push_first(self, strg,", "+ \\ ZeroOrMore((expop + factor).setParseAction(self.push_first)) term = factor + \\", "'<NAME>' __version__ = '$Revision: 0.0 $' __date__ = '$Date: 2009-03-20", "\"sgn\": lambda a: abs(a) > epsilon and cmp(a, 0) or", "__date__ = '$Date: 2009-03-20 $' __source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 '''", "self.expr_stack.append('unary -') def evaluate_stack(self, s): op = s.pop() if op", "num_string, parse_all=True): self.expr_stack = [] results = self.bnf.parseString(num_string, parse_all) val", "\"\"\" row[self.output] = self.eval(re.sub(r'{(\\w+)}', lambda x: str(row.get(x.group(1), 0)), self.equation)) return", "by defining exponentiation as \"atom [ ^ factor ]...\" instead", "int, example: dict = None): \"\"\" Initialize the transformation with", "'$Date: 2009-03-20 $' __source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__ =", "Custom(Transformation): \"\"\" Most of this code comes from the fourFn.py", "addop_term) # expr << general_term self.bnf = expr # map", "a class, so I can use it more easily in", "div | mod expop = Literal(\"^\") pi = CaselessLiteral(\"PI\") expr", "rpar) ).setParseAction(self.push_u_minus) # by defining exponentiation as \"atom [ ^", ".transformation import Transformation from pyparsing import (Literal, CaselessLiteral, Word, Combine,", "= { \"equation\": {\"name\": \"Equation\", \"type\": \"string\", \"help\": \"The equation", ").setParseAction(self.push_u_minus) # by defining exponentiation as \"atom [ ^ factor", "Combine(Word(\"+-\" + nums, nums) + Optional(point + Optional(Word(nums))) + Optional(e", "\"+-*/^%\": op2 = self.evaluate_stack(s) op1 = self.evaluate_stack(s) return self.opn[op](op1, op2)", "math.floor, \"ceil\": math.ceil, \"sgn\": lambda a: abs(a) > epsilon and", "should be entered as {COLUMN NAME}\", \"required\": True, \"input\": \"text\",", "math.radians, \"degrees\": math.degrees, \"sign\": lambda x: 0 if x ==", "lambda a: int(a), \"round\": round, \"floor\": math.floor, \"ceil\": math.ceil, \"sgn\":", "fourFn.py pyparsing example \"\"\" title = \"Custom equation\" key =", "self.bnf.parseString(num_string, parse_all) val = self.evaluate_stack(self.expr_stack[:]) return val def __call__(self, row,", "__future__ import division from .transformation import Transformation from pyparsing import", "\"ceil\": math.ceil, \"sgn\": lambda a: abs(a) > epsilon and cmp(a,", "abs(x), \"log\": math.log10, \"ln\": math.log, \"abs\": abs, \"trunc\": lambda a:", "return math.e # 2.718281828 elif op in self.fn: return self.fn[op](self.evaluate_stack(s))", "'-'] '0'..'9'+ atom :: PI | E | real |", "= '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__ = ''' All I've done", "\"\"\" Most of this code comes from the fourFn.py pyparsing", "elif op == \"E\": return math.e # 2.718281828 elif op", "= Literal(\"%\") lpar = Literal(\"(\").suppress() rpar = Literal(\")\").suppress() addop =", "')' | '(' expr ')' factor :: atom [ expop", "]* expr :: term [ addop term ]* Arguments: arguments", "Column values should be entered as {COLUMN NAME}\", \"required\": True,", "]* term :: factor [ multop factor ]* expr ::", "eval(self, num_string, parse_all=True): self.expr_stack = [] results = self.bnf.parseString(num_string, parse_all)", "expr ')' | '(' expr ')' factor :: atom [", "'+' | '-' integer :: ['+' | '-'] '0'..'9'+ atom", "extra output column \"\"\" row[self.output] = self.eval(re.sub(r'{(\\w+)}', lambda x: str(row.get(x.group(1),", "code comes from the fourFn.py pyparsing example \"\"\" title =", "Arguments: arguments {dict} -- The arguments \"\"\" super().__init__(arguments, sample_size, example)", "fourFn.py as a class, so I can use it more", "\"help\": \"The equation to evaluate. Column values should be entered", "nums) + Optional(point + Optional(Word(nums))) + Optional(e + Word(\"+-\" +", "== '-': self.expr_stack.append('unary -') def evaluate_stack(self, s): op = s.pop()", "0 else: return float(op) def eval(self, num_string, parse_all=True): self.expr_stack =", "the results\", \"default\": \"\"}, } def __init__(self, arguments: dict, sample_size:", "operator.pow} self.expr_stack = None self.fn = {\"sin\": math.sin, \"sinh\": math.sinh,", "super().__init__(arguments, sample_size, example) self.equation = arguments[\"equation\"] self.output = arguments[\"output\"] point", "example: dict = None): \"\"\" Initialize the transformation with the", "\"PI\": return math.pi # 3.1415926535 elif op == \"E\": return", "'*' | '/' addop :: '+' | '-' integer ::", "math.sqrt, \"radians\": math.radians, \"degrees\": math.degrees, \"sign\": lambda x: 0 if", "\"/\": operator.truediv, \"^\": operator.pow} self.expr_stack = None self.fn = {\"sin\":", "\"\"}, } def __init__(self, arguments: dict, sample_size: int, example: dict", "= CaselessLiteral(\"E\") fnumber = Combine(Word(\"+-\" + nums, nums) + Optional(point", "fields = { \"equation\": {\"name\": \"Equation\", \"type\": \"string\", \"help\": \"The", "-') def evaluate_stack(self, s): op = s.pop() if op ==", "CaselessLiteral(\"PI\") expr = Forward() atom = ((Optional(oneOf(\"- +\")) + (ident", "| Optional(oneOf(\"- +\")) + Group(lpar + expr + rpar) ).setParseAction(self.push_u_minus)", "\"help\": \"The name of the (newly created) column that contains", "__call__(self, row, index: int): \"\"\"This class is called on each", "\"sqrt\": math.sqrt, \"radians\": math.radians, \"degrees\": math.degrees, \"sign\": lambda x: 0", "that contains the results\", \"default\": \"\"}, } def __init__(self, arguments:", "\"\"}, \"output\": {\"name\": \"Output column\", \"type\": \"string\", \"input\": \"text\", \"required\":", ":: '^' multop :: '*' | '/' addop :: '+'", "general_term = term + ZeroOrMore( addop_term ) | OneOrMore( addop_term)", "\"-\": operator.sub, \"*\": operator.mul, \"%\": operator.mod, \"/\": operator.truediv, \"^\": operator.pow}", "op in self.fn: return self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha(): return 0 else:", "\"Output column\", \"type\": \"string\", \"input\": \"text\", \"required\": True, \"help\": \"The", "op = s.pop() if op == 'unary -': return -self.evaluate_stack(s)", "import Transformation from pyparsing import (Literal, CaselessLiteral, Word, Combine, Group,", "-self.evaluate_stack(s) if op in \"+-*/^%\": op2 = self.evaluate_stack(s) op1 =", "self.opn[op](op1, op2) elif op == \"PI\": return math.pi # 3.1415926535", "strg, loc, toks): self.expr_stack.append(toks[0]) def push_u_minus(self, strg, loc, toks): if", "rewrap Paul McGuire's fourFn.py as a class, so I can", "more easily in other places. ''' class Custom(Transformation): \"\"\" Most", "right-to-left exponents, instead of left-to-right # that is, 2^3^2 =", "None self.fn = {\"sin\": math.sin, \"sinh\": math.sinh, \"cos\": math.cos, \"cosh\":", "factor).setParseAction(self.push_first)) term = factor + \\ ZeroOrMore((multop + factor).setParseAction(self.push_first)) expr", "| e | fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"- +\")) + Group(lpar +", "= expr # map operator symbols to corresponding arithmetic operations", "transformation with the given parameters. expop :: '^' multop ::", "<< general_term self.bnf = expr # map operator symbols to", "= ((Optional(oneOf(\"- +\")) + (ident + lpar + expr +", "math.cos, \"cosh\": math.cosh, \"tan\": math.tan, \"tanh\": math.tanh, \"exp\": math.exp, \"sqrt\":", "''' All I've done is rewrap Paul McGuire's fourFn.py as", "Literal(\")\").suppress() addop = plus | minus multop = mult |", "the fourFn.py pyparsing example \"\"\" title = \"Custom equation\" key", "True, \"help\": \"The name of the (newly created) column that", "\"string\", \"help\": \"The equation to evaluate. Column values should be", "\"string\", \"input\": \"text\", \"required\": True, \"help\": \"The name of the", "results\", \"default\": \"\"}, } def __init__(self, arguments: dict, sample_size: int,", "class Custom(Transformation): \"\"\" Most of this code comes from the", "'^' multop :: '*' | '/' addop :: '+' |", "(Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas,", "equation\" fields = { \"equation\": {\"name\": \"Equation\", \"type\": \"string\", \"help\":", "Forward() factor << atom + \\ ZeroOrMore((expop + factor).setParseAction(self.push_first)) term", "| '-'] '0'..'9'+ atom :: PI | E | real", "= Literal(\"*\") div = Literal(\"/\") mod = Literal(\"%\") lpar =", "div = Literal(\"/\") mod = Literal(\"%\") lpar = Literal(\"(\").suppress() rpar", "operator.truediv, \"^\": operator.pow} self.expr_stack = None self.fn = {\"sin\": math.sin,", "2.718281828 elif op in self.fn: return self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha(): return", "term + \\ ZeroOrMore((addop + term).setParseAction(self.push_first)) # addop_term = (", "fnumber = Combine(Word(\"+-\" + nums, nums) + Optional(point + Optional(Word(nums)))", "^ atom ]...\", we get right-to-left exponents, instead of left-to-right", "{\"+\": operator.add, \"-\": operator.sub, \"*\": operator.mul, \"%\": operator.mod, \"/\": operator.truediv,", ":: atom [ expop factor ]* term :: factor [", "= self.evaluate_stack(self.expr_stack[:]) return val def __call__(self, row, index: int): \"\"\"This", "pi = CaselessLiteral(\"PI\") expr = Forward() atom = ((Optional(oneOf(\"- +\"))", "addop_term ) | OneOrMore( addop_term) # expr << general_term self.bnf", "arithmetic operations epsilon = 1e-12 self.opn = {\"+\": operator.add, \"-\":", "+ Optional(point + Optional(Word(nums))) + Optional(e + Word(\"+-\" + nums,", "a: abs(a) > epsilon and cmp(a, 0) or 0} def", "+\")) + Group(lpar + expr + rpar) ).setParseAction(self.push_u_minus) # by", "\"\"\" Initialize the transformation with the given parameters. expop ::", "NAME}\", \"required\": True, \"input\": \"text\", \"default\": \"\"}, \"output\": {\"name\": \"Output", "+ nums, nums) + Optional(point + Optional(Word(nums))) + Optional(e +", "= Literal(\"^\") pi = CaselessLiteral(\"PI\") expr = Forward() atom =", "abs(a) > epsilon and cmp(a, 0) or 0} def push_first(self,", "int(a), \"round\": round, \"floor\": math.floor, \"ceil\": math.ceil, \"sgn\": lambda a:", "class, so I can use it more easily in other", "http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__ = ''' All I've done is rewrap", "done is rewrap Paul McGuire's fourFn.py as a class, so", "\"atom [ ^ atom ]...\", we get right-to-left exponents, instead", "= \"Custom equation\" key = \"Math equation\" fields = {", "\"\"\" title = \"Custom equation\" key = \"Math equation\" fields", "toks): if toks and toks[0] == '-': self.expr_stack.append('unary -') def", "# by defining exponentiation as \"atom [ ^ factor ]...\"", "\"Custom equation\" key = \"Math equation\" fields = { \"equation\":", "term).setParseAction(self.push_first)) # addop_term = ( addop + term ).setParseAction( self.push_first", "factor << atom + \\ ZeroOrMore((expop + factor).setParseAction(self.push_first)) term =", "# \"atom [ ^ atom ]...\", we get right-to-left exponents,", "+ Group(lpar + expr + rpar) ).setParseAction(self.push_u_minus) # by defining", "expop factor ]* term :: factor [ multop factor ]*", "Initialize the transformation with the given parameters. expop :: '^'", "x / abs(x), \"log\": math.log10, \"ln\": math.log, \"abs\": abs, \"trunc\":", "toks and toks[0] == '-': self.expr_stack.append('unary -') def evaluate_stack(self, s):", "factor + \\ ZeroOrMore((multop + factor).setParseAction(self.push_first)) expr << term +", "arguments \"\"\" super().__init__(arguments, sample_size, example) self.equation = arguments[\"equation\"] self.output =", "| '(' expr ')' factor :: atom [ expop factor", "sample_size: int, example: dict = None): \"\"\" Initialize the transformation", "atom :: PI | E | real | fn '('", "__author__ = '<NAME>' __version__ = '$Revision: 0.0 $' __date__ =", "of the (newly created) column that contains the results\", \"default\":", "| minus multop = mult | div | mod expop", "math.sinh, \"cos\": math.cos, \"cosh\": math.cosh, \"tan\": math.tan, \"tanh\": math.tanh, \"exp\":", ":: ['+' | '-'] '0'..'9'+ atom :: PI | E", "Literal(\"(\").suppress() rpar = Literal(\")\").suppress() addop = plus | minus multop", "e = CaselessLiteral(\"E\") fnumber = Combine(Word(\"+-\" + nums, nums) +", "== 'unary -': return -self.evaluate_stack(s) if op in \"+-*/^%\": op2", "operations epsilon = 1e-12 self.opn = {\"+\": operator.add, \"-\": operator.sub,", "+ Optional(Word(nums))) + Optional(e + Word(\"+-\" + nums, nums))) ident", "\"sinh\": math.sinh, \"cos\": math.cos, \"cosh\": math.cosh, \"tan\": math.tan, \"tanh\": math.tanh,", "| fn '(' expr ')' | '(' expr ')' factor", "E | real | fn '(' expr ')' | '('", "push_u_minus(self, strg, loc, toks): if toks and toks[0] == '-':", "[ addop term ]* Arguments: arguments {dict} -- The arguments", "exponents, instead of left-to-right # that is, 2^3^2 = 2^(3^2),", "term + ZeroOrMore( addop_term ) | OneOrMore( addop_term) # expr", "| mod expop = Literal(\"^\") pi = CaselessLiteral(\"PI\") expr =", "]...\", we get right-to-left exponents, instead of left-to-right # that", "\"default\": \"\"}, \"output\": {\"name\": \"Output column\", \"type\": \"string\", \"input\": \"text\",", "-- The complete row Returns: dict -- The row, including", "0 if x == 0 else x / abs(x), \"log\":", "\"trunc\": lambda a: int(a), \"round\": round, \"floor\": math.floor, \"ceil\": math.ceil,", "return val def __call__(self, row, index: int): \"\"\"This class is", "nums + \"_$\") plus = Literal(\"+\") minus = Literal(\"-\") mult", "{COLUMN NAME}\", \"required\": True, \"input\": \"text\", \"default\": \"\"}, \"output\": {\"name\":", "| '-' integer :: ['+' | '-'] '0'..'9'+ atom ::", "addop term ]* Arguments: arguments {dict} -- The arguments \"\"\"", "mod = Literal(\"%\") lpar = Literal(\"(\").suppress() rpar = Literal(\")\").suppress() addop", "CaselessLiteral(\"E\") fnumber = Combine(Word(\"+-\" + nums, nums) + Optional(point +", "addop = plus | minus multop = mult | div", "s.pop() if op == 'unary -': return -self.evaluate_stack(s) if op", "= Combine(Word(\"+-\" + nums, nums) + Optional(point + Optional(Word(nums))) +", "the extra output column \"\"\" row[self.output] = self.eval(re.sub(r'{(\\w+)}', lambda x:", "== \"PI\": return math.pi # 3.1415926535 elif op == \"E\":", "def eval(self, num_string, parse_all=True): self.expr_stack = [] results = self.bnf.parseString(num_string,", "we get right-to-left exponents, instead of left-to-right # that is,", "val = self.evaluate_stack(self.expr_stack[:]) return val def __call__(self, row, index: int):", "\"E\": return math.e # 2.718281828 elif op in self.fn: return", "math.log, \"abs\": abs, \"trunc\": lambda a: int(a), \"round\": round, \"floor\":", "= factor + \\ ZeroOrMore((multop + factor).setParseAction(self.push_first)) expr << term", "Optional(oneOf(\"- +\")) + Group(lpar + expr + rpar) ).setParseAction(self.push_u_minus) #", "in \"+-*/^%\": op2 = self.evaluate_stack(s) op1 = self.evaluate_stack(s) return self.opn[op](op1,", "atom = ((Optional(oneOf(\"- +\")) + (ident + lpar + expr", "def __call__(self, row, index: int): \"\"\"This class is called on", "arguments[\"equation\"] self.output = arguments[\"output\"] point = Literal(\".\") e = CaselessLiteral(\"E\")", "sample_size, example) self.equation = arguments[\"equation\"] self.output = arguments[\"output\"] point =", "elif op[0].isalpha(): return 0 else: return float(op) def eval(self, num_string,", "given parameters. expop :: '^' multop :: '*' | '/'", "if op == 'unary -': return -self.evaluate_stack(s) if op in", "op2 = self.evaluate_stack(s) op1 = self.evaluate_stack(s) return self.opn[op](op1, op2) elif", "import (Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums,", "point = Literal(\".\") e = CaselessLiteral(\"E\") fnumber = Combine(Word(\"+-\" +", "\"ln\": math.log, \"abs\": abs, \"trunc\": lambda a: int(a), \"round\": round,", "on each row. Arguments: row {dict} -- The complete row", "\"cos\": math.cos, \"cosh\": math.cosh, \"tan\": math.tan, \"tanh\": math.tanh, \"exp\": math.exp,", "\"equation\": {\"name\": \"Equation\", \"type\": \"string\", \"help\": \"The equation to evaluate.", "addop + term ).setParseAction( self.push_first ) # general_term = term", "push_first(self, strg, loc, toks): self.expr_stack.append(toks[0]) def push_u_minus(self, strg, loc, toks):", "Literal(\"/\") mod = Literal(\"%\") lpar = Literal(\"(\").suppress() rpar = Literal(\")\").suppress()", "def push_first(self, strg, loc, toks): self.expr_stack.append(toks[0]) def push_u_minus(self, strg, loc,", "equation\" key = \"Math equation\" fields = { \"equation\": {\"name\":", "this code comes from the fourFn.py pyparsing example \"\"\" title", "operator.add, \"-\": operator.sub, \"*\": operator.mul, \"%\": operator.mod, \"/\": operator.truediv, \"^\":", "# addop_term = ( addop + term ).setParseAction( self.push_first )", "= self.eval(re.sub(r'{(\\w+)}', lambda x: str(row.get(x.group(1), 0)), self.equation)) return row, index", "comes from the fourFn.py pyparsing example \"\"\" title = \"Custom", "Optional(Word(nums))) + Optional(e + Word(\"+-\" + nums, nums))) ident =", "the given parameters. expop :: '^' multop :: '*' |", "= 1e-12 self.opn = {\"+\": operator.add, \"-\": operator.sub, \"*\": operator.mul,", "mod expop = Literal(\"^\") pi = CaselessLiteral(\"PI\") expr = Forward()", "self.fn = {\"sin\": math.sin, \"sinh\": math.sinh, \"cos\": math.cos, \"cosh\": math.cosh,", "cmp(a, 0) or 0} def push_first(self, strg, loc, toks): self.expr_stack.append(toks[0])", "= Forward() atom = ((Optional(oneOf(\"- +\")) + (ident + lpar", "'(' expr ')' factor :: atom [ expop factor ]*", "0) or 0} def push_first(self, strg, loc, toks): self.expr_stack.append(toks[0]) def", "= CaselessLiteral(\"PI\") expr = Forward() atom = ((Optional(oneOf(\"- +\")) +", "mult = Literal(\"*\") div = Literal(\"/\") mod = Literal(\"%\") lpar", "to corresponding arithmetic operations epsilon = 1e-12 self.opn = {\"+\":", "math.e # 2.718281828 elif op in self.fn: return self.fn[op](self.evaluate_stack(s)) elif", "= '$Revision: 0.0 $' __date__ = '$Date: 2009-03-20 $' __source__", "return float(op) def eval(self, num_string, parse_all=True): self.expr_stack = [] results", "class is called on each row. Arguments: row {dict} --", "\"The name of the (newly created) column that contains the", "created) column that contains the results\", \"default\": \"\"}, } def", "= None self.fn = {\"sin\": math.sin, \"sinh\": math.sinh, \"cos\": math.cos,", "__source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__ = ''' All I've", "float(op) def eval(self, num_string, parse_all=True): self.expr_stack = [] results =", "mult | div | mod expop = Literal(\"^\") pi =", "= s.pop() if op == 'unary -': return -self.evaluate_stack(s) if", "pyparsing example \"\"\" title = \"Custom equation\" key = \"Math", "\"Equation\", \"type\": \"string\", \"help\": \"The equation to evaluate. Column values", "column \"\"\" row[self.output] = self.eval(re.sub(r'{(\\w+)}', lambda x: str(row.get(x.group(1), 0)), self.equation))", "I can use it more easily in other places. '''", "epsilon and cmp(a, 0) or 0} def push_first(self, strg, loc,", "1e-12 self.opn = {\"+\": operator.add, \"-\": operator.sub, \"*\": operator.mul, \"%\":", "Arguments: row {dict} -- The complete row Returns: dict --", "= self.evaluate_stack(s) op1 = self.evaluate_stack(s) return self.opn[op](op1, op2) elif op", "\"radians\": math.radians, \"degrees\": math.degrees, \"sign\": lambda x: 0 if x", "from .transformation import Transformation from pyparsing import (Literal, CaselessLiteral, Word,", ") | OneOrMore( addop_term) # expr << general_term self.bnf =", "expr << term + \\ ZeroOrMore((addop + term).setParseAction(self.push_first)) # addop_term", "toks): self.expr_stack.append(toks[0]) def push_u_minus(self, strg, loc, toks): if toks and", "dict -- The row, including the extra output column \"\"\"", "nums, nums) + Optional(point + Optional(Word(nums))) + Optional(e + Word(\"+-\"", "+ \\ ZeroOrMore((addop + term).setParseAction(self.push_first)) # addop_term = ( addop", "multop = mult | div | mod expop = Literal(\"^\")", "math.pi # 3.1415926535 elif op == \"E\": return math.e #", "lpar + expr + rpar | pi | e |", "factor ]* term :: factor [ multop factor ]* expr", "''' class Custom(Transformation): \"\"\" Most of this code comes from", "to evaluate. Column values should be entered as {COLUMN NAME}\",", "self.output = arguments[\"output\"] point = Literal(\".\") e = CaselessLiteral(\"E\") fnumber", "alphas + nums + \"_$\") plus = Literal(\"+\") minus =", "addop_term = ( addop + term ).setParseAction( self.push_first ) #", "real | fn '(' expr ')' | '(' expr ')'", "| pi | e | fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"- +\")) +", "be entered as {COLUMN NAME}\", \"required\": True, \"input\": \"text\", \"default\":", "= 2^(3^2), not (2^3)^2. factor = Forward() factor << atom", "= term + ZeroOrMore( addop_term ) | OneOrMore( addop_term) #", "<filename>data_scout/transformations/math_custom.py from __future__ import division from .transformation import Transformation from", "if toks and toks[0] == '-': self.expr_stack.append('unary -') def evaluate_stack(self,", "row. Arguments: row {dict} -- The complete row Returns: dict", "'(' expr ')' | '(' expr ')' factor :: atom", "and cmp(a, 0) or 0} def push_first(self, strg, loc, toks):", "= Word(alphas, alphas + nums + \"_$\") plus = Literal(\"+\")", "import division from .transformation import Transformation from pyparsing import (Literal,", "\"atom [ ^ factor ]...\" instead of # \"atom [", "fn '(' expr ')' | '(' expr ')' factor ::", "^ factor ]...\" instead of # \"atom [ ^ atom", "{ \"equation\": {\"name\": \"Equation\", \"type\": \"string\", \"help\": \"The equation to", "\"required\": True, \"input\": \"text\", \"default\": \"\"}, \"output\": {\"name\": \"Output column\",", "use it more easily in other places. ''' class Custom(Transformation):", "= {\"+\": operator.add, \"-\": operator.sub, \"*\": operator.mul, \"%\": operator.mod, \"/\":", "with the given parameters. expop :: '^' multop :: '*'", ":: '*' | '/' addop :: '+' | '-' integer", "\"default\": \"\"}, } def __init__(self, arguments: dict, sample_size: int, example:", "= Literal(\"/\") mod = Literal(\"%\") lpar = Literal(\"(\").suppress() rpar =", "Optional, ZeroOrMore, Forward, nums, alphas, oneOf) import math import re", "expop :: '^' multop :: '*' | '/' addop ::", "{dict} -- The arguments \"\"\" super().__init__(arguments, sample_size, example) self.equation =", "minus = Literal(\"-\") mult = Literal(\"*\") div = Literal(\"/\") mod", "| E | real | fn '(' expr ')' |", "= Literal(\")\").suppress() addop = plus | minus multop = mult", "if op in \"+-*/^%\": op2 = self.evaluate_stack(s) op1 = self.evaluate_stack(s)", "self.opn = {\"+\": operator.add, \"-\": operator.sub, \"*\": operator.mul, \"%\": operator.mod,", "evaluate. Column values should be entered as {COLUMN NAME}\", \"required\":", "} def __init__(self, arguments: dict, sample_size: int, example: dict =", "def __init__(self, arguments: dict, sample_size: int, example: dict = None):", "expr = Forward() atom = ((Optional(oneOf(\"- +\")) + (ident +", "each row. Arguments: row {dict} -- The complete row Returns:", "= Literal(\"-\") mult = Literal(\"*\") div = Literal(\"/\") mod =", "elif op == \"PI\": return math.pi # 3.1415926535 elif op", "self.expr_stack = [] results = self.bnf.parseString(num_string, parse_all) val = self.evaluate_stack(self.expr_stack[:])", "can use it more easily in other places. ''' class", "expr << general_term self.bnf = expr # map operator symbols", "+ term ).setParseAction( self.push_first ) # general_term = term +", "rpar = Literal(\")\").suppress() addop = plus | minus multop =", "op == \"PI\": return math.pi # 3.1415926535 elif op ==", "instead of left-to-right # that is, 2^3^2 = 2^(3^2), not", "operator symbols to corresponding arithmetic operations epsilon = 1e-12 self.opn", "expr + rpar) ).setParseAction(self.push_u_minus) # by defining exponentiation as \"atom", "')' factor :: atom [ expop factor ]* term ::", "OneOrMore( addop_term) # expr << general_term self.bnf = expr #", "\"text\", \"required\": True, \"help\": \"The name of the (newly created)", "__note__ = ''' All I've done is rewrap Paul McGuire's", "I've done is rewrap Paul McGuire's fourFn.py as a class,", "# 3.1415926535 elif op == \"E\": return math.e # 2.718281828", "ZeroOrMore((addop + term).setParseAction(self.push_first)) # addop_term = ( addop + term", "oneOf) import math import re import operator __author__ = '<NAME>'", "return -self.evaluate_stack(s) if op in \"+-*/^%\": op2 = self.evaluate_stack(s) op1", "op == 'unary -': return -self.evaluate_stack(s) if op in \"+-*/^%\":", "'/' addop :: '+' | '-' integer :: ['+' |", "== 0 else x / abs(x), \"log\": math.log10, \"ln\": math.log,", "Optional(e + Word(\"+-\" + nums, nums))) ident = Word(alphas, alphas", "operator.mul, \"%\": operator.mod, \"/\": operator.truediv, \"^\": operator.pow} self.expr_stack = None", "\"text\", \"default\": \"\"}, \"output\": {\"name\": \"Output column\", \"type\": \"string\", \"input\":", "results = self.bnf.parseString(num_string, parse_all) val = self.evaluate_stack(self.expr_stack[:]) return val def", "]* Arguments: arguments {dict} -- The arguments \"\"\" super().__init__(arguments, sample_size,", "'-' integer :: ['+' | '-'] '0'..'9'+ atom :: PI", ").setParseAction( self.push_first ) # general_term = term + ZeroOrMore( addop_term", "expr # map operator symbols to corresponding arithmetic operations epsilon", "that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward()", "name of the (newly created) column that contains the results\",", "# expr << general_term self.bnf = expr # map operator", "row[self.output] = self.eval(re.sub(r'{(\\w+)}', lambda x: str(row.get(x.group(1), 0)), self.equation)) return row,", "CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf)", "expop = Literal(\"^\") pi = CaselessLiteral(\"PI\") expr = Forward() atom", "pi | e | fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"- +\")) + Group(lpar", "else x / abs(x), \"log\": math.log10, \"ln\": math.log, \"abs\": abs,", "(2^3)^2. factor = Forward() factor << atom + \\ ZeroOrMore((expop", "= '$Date: 2009-03-20 $' __source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 ''' __note__", "lpar = Literal(\"(\").suppress() rpar = Literal(\")\").suppress() addop = plus |", "+ expr + rpar | pi | e | fnumber).setParseAction(self.push_first))", "as \"atom [ ^ factor ]...\" instead of # \"atom", "Literal(\".\") e = CaselessLiteral(\"E\") fnumber = Combine(Word(\"+-\" + nums, nums)", "ZeroOrMore((expop + factor).setParseAction(self.push_first)) term = factor + \\ ZeroOrMore((multop +", "+ expr + rpar) ).setParseAction(self.push_u_minus) # by defining exponentiation as", "self.expr_stack = None self.fn = {\"sin\": math.sin, \"sinh\": math.sinh, \"cos\":", "evaluate_stack(self, s): op = s.pop() if op == 'unary -':", "entered as {COLUMN NAME}\", \"required\": True, \"input\": \"text\", \"default\": \"\"},", "{\"sin\": math.sin, \"sinh\": math.sinh, \"cos\": math.cos, \"cosh\": math.cosh, \"tan\": math.tan,", "return self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha(): return 0 else: return float(op) def", "= Forward() factor << atom + \\ ZeroOrMore((expop + factor).setParseAction(self.push_first))", "\"The equation to evaluate. Column values should be entered as", "\"floor\": math.floor, \"ceil\": math.ceil, \"sgn\": lambda a: abs(a) > epsilon", "<< atom + \\ ZeroOrMore((expop + factor).setParseAction(self.push_first)) term = factor", "[ ^ atom ]...\", we get right-to-left exponents, instead of", "\"log\": math.log10, \"ln\": math.log, \"abs\": abs, \"trunc\": lambda a: int(a),", ":: term [ addop term ]* Arguments: arguments {dict} --", "\"\"\" super().__init__(arguments, sample_size, example) self.equation = arguments[\"equation\"] self.output = arguments[\"output\"]", "index: int): \"\"\"This class is called on each row. Arguments:", "division from .transformation import Transformation from pyparsing import (Literal, CaselessLiteral,", "is, 2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward() factor", "\"*\": operator.mul, \"%\": operator.mod, \"/\": operator.truediv, \"^\": operator.pow} self.expr_stack =", "\"type\": \"string\", \"help\": \"The equation to evaluate. Column values should", "contains the results\", \"default\": \"\"}, } def __init__(self, arguments: dict,", "parse_all) val = self.evaluate_stack(self.expr_stack[:]) return val def __call__(self, row, index:", "self.expr_stack.append(toks[0]) def push_u_minus(self, strg, loc, toks): if toks and toks[0]", "\"round\": round, \"floor\": math.floor, \"ceil\": math.ceil, \"sgn\": lambda a: abs(a)", "'unary -': return -self.evaluate_stack(s) if op in \"+-*/^%\": op2 =", "exponentiation as \"atom [ ^ factor ]...\" instead of #", "2^(3^2), not (2^3)^2. factor = Forward() factor << atom +", "'$Revision: 0.0 $' __date__ = '$Date: 2009-03-20 $' __source__ =", ") # general_term = term + ZeroOrMore( addop_term ) |", "pyparsing import (Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward,", "math.tan, \"tanh\": math.tanh, \"exp\": math.exp, \"sqrt\": math.sqrt, \"radians\": math.radians, \"degrees\":", "-- The row, including the extra output column \"\"\" row[self.output]", "map operator symbols to corresponding arithmetic operations epsilon = 1e-12", "minus multop = mult | div | mod expop =", "output column \"\"\" row[self.output] = self.eval(re.sub(r'{(\\w+)}', lambda x: str(row.get(x.group(1), 0)),", "Literal(\"-\") mult = Literal(\"*\") div = Literal(\"/\") mod = Literal(\"%\")", "is called on each row. Arguments: row {dict} -- The", "of left-to-right # that is, 2^3^2 = 2^(3^2), not (2^3)^2.", "(ident + lpar + expr + rpar | pi |", "nums, alphas, oneOf) import math import re import operator __author__", "self.evaluate_stack(self.expr_stack[:]) return val def __call__(self, row, index: int): \"\"\"This class", "Literal(\"^\") pi = CaselessLiteral(\"PI\") expr = Forward() atom = ((Optional(oneOf(\"-", "example \"\"\" title = \"Custom equation\" key = \"Math equation\"", "+ lpar + expr + rpar | pi | e", "other places. ''' class Custom(Transformation): \"\"\" Most of this code", "/ abs(x), \"log\": math.log10, \"ln\": math.log, \"abs\": abs, \"trunc\": lambda", "term ).setParseAction( self.push_first ) # general_term = term + ZeroOrMore(", "All I've done is rewrap Paul McGuire's fourFn.py as a", "Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf) import math import", "from __future__ import division from .transformation import Transformation from pyparsing", "= plus | minus multop = mult | div |", "val def __call__(self, row, index: int): \"\"\"This class is called", "term [ addop term ]* Arguments: arguments {dict} -- The", "return self.opn[op](op1, op2) elif op == \"PI\": return math.pi #", "= {\"sin\": math.sin, \"sinh\": math.sinh, \"cos\": math.cos, \"cosh\": math.cosh, \"tan\":", "strg, loc, toks): if toks and toks[0] == '-': self.expr_stack.append('unary", "Paul McGuire's fourFn.py as a class, so I can use", "dict, sample_size: int, example: dict = None): \"\"\" Initialize the", "+ rpar | pi | e | fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"-", "lambda x: 0 if x == 0 else x /", "math.ceil, \"sgn\": lambda a: abs(a) > epsilon and cmp(a, 0)", "from the fourFn.py pyparsing example \"\"\" title = \"Custom equation\"", "+ nums + \"_$\") plus = Literal(\"+\") minus = Literal(\"-\")", "round, \"floor\": math.floor, \"ceil\": math.ceil, \"sgn\": lambda a: abs(a) >", "\"exp\": math.exp, \"sqrt\": math.sqrt, \"radians\": math.radians, \"degrees\": math.degrees, \"sign\": lambda", "s): op = s.pop() if op == 'unary -': return", "['+' | '-'] '0'..'9'+ atom :: PI | E |", ":: '+' | '-' integer :: ['+' | '-'] '0'..'9'+", "arguments[\"output\"] point = Literal(\".\") e = CaselessLiteral(\"E\") fnumber = Combine(Word(\"+-\"", "+ \\ ZeroOrMore((multop + factor).setParseAction(self.push_first)) expr << term + \\", "ZeroOrMore((multop + factor).setParseAction(self.push_first)) expr << term + \\ ZeroOrMore((addop +", "general_term self.bnf = expr # map operator symbols to corresponding", "\"Math equation\" fields = { \"equation\": {\"name\": \"Equation\", \"type\": \"string\",", "math.tanh, \"exp\": math.exp, \"sqrt\": math.sqrt, \"radians\": math.radians, \"degrees\": math.degrees, \"sign\":", "PI | E | real | fn '(' expr ')'", "atom ]...\", we get right-to-left exponents, instead of left-to-right #", "= ( addop + term ).setParseAction( self.push_first ) # general_term", "= ''' All I've done is rewrap Paul McGuire's fourFn.py", "ZeroOrMore( addop_term ) | OneOrMore( addop_term) # expr << general_term", "in other places. ''' class Custom(Transformation): \"\"\" Most of this", "Transformation from pyparsing import (Literal, CaselessLiteral, Word, Combine, Group, Optional,", "+ \"_$\") plus = Literal(\"+\") minus = Literal(\"-\") mult =", "symbols to corresponding arithmetic operations epsilon = 1e-12 self.opn =", "plus | minus multop = mult | div | mod", "McGuire's fourFn.py as a class, so I can use it", ":: PI | E | real | fn '(' expr", "multop :: '*' | '/' addop :: '+' | '-'", "of # \"atom [ ^ atom ]...\", we get right-to-left", "= mult | div | mod expop = Literal(\"^\") pi", "self.push_first ) # general_term = term + ZeroOrMore( addop_term )", "x: 0 if x == 0 else x / abs(x),", "[ multop factor ]* expr :: term [ addop term", "{\"name\": \"Equation\", \"type\": \"string\", \"help\": \"The equation to evaluate. Column", "+ ZeroOrMore( addop_term ) | OneOrMore( addop_term) # expr <<", "math.log10, \"ln\": math.log, \"abs\": abs, \"trunc\": lambda a: int(a), \"round\":", "e | fnumber).setParseAction(self.push_first)) | Optional(oneOf(\"- +\")) + Group(lpar + expr", "import re import operator __author__ = '<NAME>' __version__ = '$Revision:", "addop :: '+' | '-' integer :: ['+' | '-']", "= self.bnf.parseString(num_string, parse_all) val = self.evaluate_stack(self.expr_stack[:]) return val def __call__(self,", "+ term).setParseAction(self.push_first)) # addop_term = ( addop + term ).setParseAction(", "factor = Forward() factor << atom + \\ ZeroOrMore((expop +", "operator.sub, \"*\": operator.mul, \"%\": operator.mod, \"/\": operator.truediv, \"^\": operator.pow} self.expr_stack", "+ factor).setParseAction(self.push_first)) expr << term + \\ ZeroOrMore((addop + term).setParseAction(self.push_first))", "$' __date__ = '$Date: 2009-03-20 $' __source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426", "3.1415926535 elif op == \"E\": return math.e # 2.718281828 elif", "self.fn[op](self.evaluate_stack(s)) elif op[0].isalpha(): return 0 else: return float(op) def eval(self,", "((Optional(oneOf(\"- +\")) + (ident + lpar + expr + rpar" ]
[ "is not None and file is None: file_dict = dict()", "else: Console.error(\"File not found.\") except Exception as e: print(e) def", "self.directory = cwd['parent'] pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return", "= self.directory dir_dict['cm']['created'] = datetime.utcnow() dir_dict['cm']['modified'] = datetime.utcnow() return dir_dict", "ls(self, directory=None): try: dash = '-' * 40 if directory", "self.directory}]}) locations = \"{:<20} {:>}\".format(\"Name\", \"Location\") + \"\\n\" + dash", "locations += \"{:<20} {:>}\".format(entry['cm']['name'], location) + \"\\n\" print(locations) return locations", "self.directory: self.directory = dirname pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})", "= self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]}) count = self.col.count_documents({'$or': [{'vdirectory':", "= dirname file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1] file_dict['filename'] = os.path.basename(endpoint) file_dict['provider'] =", "count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) locations = \"{:<20}", "+= \"{:<20} {:>}\".format(entry['cm']['name'], location) + \"\\n\" print(locations) return locations except", "\"{:<20} {:>}\".format(entry['cm']['name'], location) + \"\\n\" print(locations) return locations except Exception", "that name already exists.\") except Exception as e: print(e) def", "the db wit Database update from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from", "entry['type'] == 'fileendpoint': location = entry['provider'] + \":\" + entry['cloud_directory']", "file is None: file_dict = dict() file_dict['cm'] = { 'name':", "from cloudmesh.common.console import Console from cloudmesh.storage.Provider import Provider import os", "except Exception as e: print(e) def delete(self, dir_or_name): try: result", "datetime.utcnow() return dir_dict else: Console.error(\"Directory with that name exists.\") except", "'cloud': 'local' } dir_dict['type'] = 'directory' dir_dict['parent'] = self.directory dir_dict['cm']['created']", "Exception as e: print(e) def delete(self, dir_or_name): try: result =", "e: print(e) def ls(self, directory=None): try: dash = '-' *", "and file is None: file_dict = dict() file_dict['cm'] = {", "e: print(e) def get(self, name, destination=None): try: doc = self.col.find_one({'cm.name':", "cwd['parent'] pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else:", "cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common.console import", "file = self.col.find_one({\"cm.name\": filename, 'type': 'fileendpoint'}) if directory is not", "try: if dirname is None: if self.directory == 'vdir': Console.error(\"Root", "= self.col.find_one({\"cm.name\": filename, 'type': 'fileendpoint'}) if directory is not None", "def status(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) return result", "not found.\") except Exception as e: print(e) def delete(self, dir_or_name):", "Console.error('Directory does not exist at this location.') except Exception as", "print(e) @DatabaseUpdate() def add(self, endpoint, dir_and_name): try: dirname = os.path.dirname(dir_and_name).split('/')[-1]", "'cloud': 'local' } file_dict['type'] = 'fileendpoint' file_dict['vdirectory'] = dirname file_dict['cloud_directory']", "'directory', 'cm.name': self.directory}) return pwd else: directory = self.col.find_one({'type': 'directory',", "'': location = 'Vdir' else: location = self.directory locations +=", "pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else: Console.error('Directory", "import datetime class Vdir(object): def __init__(self): self.cm = CmDatabase() self.col", "'kind': 'vdir', 'cloud': 'local' } file_dict['type'] = 'fileendpoint' file_dict['vdirectory'] =", "Vdir(object): def __init__(self): self.cm = CmDatabase() self.col = self.cm.db['local-vdir'] self.directory", "= self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) count = self.col.count_documents({'$or': [{'vdirectory':", "location = entry['provider'] + \":\" + entry['cloud_directory'] + \"/\" +", "dir_dict = dict() dir_dict['cm'] = { 'name': dirname, 'kind': 'vdir',", "} file_dict['type'] = 'fileendpoint' file_dict['vdirectory'] = dirname file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1]", "stores directly into the db wit Database update from cloudmesh.mongo.DataBaseDecorator", "\"\\n\" print(locations) return locations except Exception as e: print(e) @DatabaseUpdate()", "directory}, {'parent': directory}]}) else: cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent':", "datetime import datetime class Vdir(object): def __init__(self): self.cm = CmDatabase()", "return dir_dict else: Console.error(\"Directory with that name exists.\") except Exception", "@DatabaseUpdate() def add(self, endpoint, dir_and_name): try: dirname = os.path.dirname(dir_and_name).split('/')[-1] if", "filename = os.path.basename(dir_and_name) file = self.col.find_one({\"cm.name\": filename, 'type': 'fileendpoint'}) if", "= os.path.join(doc['cloud_directory'], doc['filename']) print(source) if destination is None: destination =", "False) return file else: Console.error(\"File not found.\") except Exception as", "return file_dict elif directory is None: Console.error(\"Virtual directory not found.\")", "'type': 'fileendpoint'}) if doc is not None: self.col.update_one({'cm.name': name, 'type':", "+ \"\\n\" print(locations) return locations except Exception as e: print(e)", "directory=None): try: dash = '-' * 40 if directory is", "'fileendpoint' file_dict['vdirectory'] = dirname file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1] file_dict['filename'] = os.path.basename(endpoint)", "add(self, endpoint, dir_and_name): try: dirname = os.path.dirname(dir_and_name).split('/')[-1] if dirname ==", "self.cm = CmDatabase() self.col = self.cm.db['local-vdir'] self.directory = 'vdir' def", "[{'vdirectory': self.directory}, {'parent': self.directory}]}) locations = \"{:<20} {:>}\".format(\"Name\", \"Location\") +", "{'modified': datetime.utcnow()}}) service = doc['provider'] source = os.path.join(doc['cloud_directory'], doc['filename']) print(source)", "import Console from cloudmesh.storage.Provider import Provider import os from datetime", "doc['filename']) print(source) if destination is None: destination = '~/.cloudmesh/vdir' p", "None: cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]}) count =", "* 40 if directory is not None: cloudmesh = self.col.find({'$or':", "dash + \"\\n\" for i in range(0, count): entry =", "elif directory is None: Console.error(\"Virtual directory not found.\") elif file", "directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]}) else: cloudmesh", "def get(self, name, destination=None): try: doc = self.col.find_one({'cm.name': name, 'type':", "{'$set': {'modified': datetime.utcnow()}}) service = doc['provider'] source = os.path.join(doc['cloud_directory'], doc['filename'])", "import CmDatabase from cloudmesh.common.console import Console from cloudmesh.storage.Provider import Provider", "None: if self.directory == 'vdir': Console.error(\"Root directory reached.\") else: cwd", "'vdir' else: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) filename =", "Console.error(\"File with that name already exists.\") except Exception as e:", "= dirname pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd", "e: print(e) def delete(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name})", "pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else: directory", "else: location = self.directory locations += \"{:<20} {:>}\".format(entry['cm']['name'], location) +", "Provider(service) file = p.get(source, destination, False) return file else: Console.error(\"File", "None: self.col.update_one({'cm.name': name, 'type': 'fileendpoint'}, {'$set': {'modified': datetime.utcnow()}}) service =", "None: Console.error(\"Virtual directory not found.\") elif file is not None:", "= 'vdir' def cd(self, dirname=None): try: if dirname is None:", "update from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from cloudmesh.mongo.CmDatabase import CmDatabase from", "= datetime.utcnow() return file_dict elif directory is None: Console.error(\"Virtual directory", "self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else: Console.error('Directory does not", "destination=None): try: doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'}) if doc", "else: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) filename = os.path.basename(dir_and_name)", "dirname=None): try: if dirname is None: if self.directory == 'vdir':", "with that name exists.\") except Exception as e: print(e) def", "location = self.directory locations += \"{:<20} {:>}\".format(entry['cm']['name'], location) + \"\\n\"", "'': dirname = 'vdir' directory = 'vdir' else: directory =", "+ entry['cloud_directory'] + \"/\" + entry['filename'] else: if self.directory ==", "= self.col.find_one({'cm.name': name, 'type': 'fileendpoint'}) if doc is not None:", "Exception as e: print(e) def get(self, name, destination=None): try: doc", "[{'vdirectory': directory}, {'parent': directory}]}) else: cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory},", "== '': dirname = 'vdir' directory = 'vdir' else: directory", "file_dict['provider'] = os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] = datetime.utcnow() file_dict['cm']['modified'] = datetime.utcnow() return", "os.path.join(doc['cloud_directory'], doc['filename']) print(source) if destination is None: destination = '~/.cloudmesh/vdir'", "directory = self.col.find_one({'type': 'directory', 'cm.name': dirname}) if directory['parent'] == self.directory:", "is not None: print(file) Console.error(\"File with that name already exists.\")", "range(0, count): entry = cloudmesh[i] if entry['type'] == 'fileendpoint': location", "Console from cloudmesh.storage.Provider import Provider import os from datetime import", "locations except Exception as e: print(e) @DatabaseUpdate() def add(self, endpoint,", "self.directory}) return pwd else: directory = self.col.find_one({'type': 'directory', 'cm.name': dirname})", "is None: file_dict = dict() file_dict['cm'] = { 'name': filename,", "'vdir', 'cloud': 'local' } dir_dict['type'] = 'directory' dir_dict['parent'] = self.directory", "Console.error(\"Directory with that name exists.\") except Exception as e: print(e)", "dirname file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1] file_dict['filename'] = os.path.basename(endpoint) file_dict['provider'] = os.path.dirname(endpoint).split(':')[0]", "= { 'name': dirname, 'kind': 'vdir', 'cloud': 'local' } dir_dict['type']", "not found.\") elif file is not None: print(file) Console.error(\"File with", "file_dict['cm'] = { 'name': filename, 'kind': 'vdir', 'cloud': 'local' }", "location = 'Vdir' else: location = self.directory locations += \"{:<20}", "datetime.utcnow()}}) service = doc['provider'] source = os.path.join(doc['cloud_directory'], doc['filename']) print(source) if", "'vdir': Console.error(\"Root directory reached.\") else: cwd = self.col.find_one({'type': 'directory', 'cm.name':", "= self.directory locations += \"{:<20} {:>}\".format(entry['cm']['name'], location) + \"\\n\" print(locations)", "def ls(self, directory=None): try: dash = '-' * 40 if", "'fileendpoint'}, {'$set': {'modified': datetime.utcnow()}}) service = doc['provider'] source = os.path.join(doc['cloud_directory'],", "cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) self.directory = cwd['parent'] pwd", "locations = \"{:<20} {:>}\".format(\"Name\", \"Location\") + \"\\n\" + dash +", "'type': 'directory'}) filename = os.path.basename(dir_and_name) file = self.col.find_one({\"cm.name\": filename, 'type':", "+ dash + \"\\n\" for i in range(0, count): entry", "'fileendpoint'}) if doc is not None: self.col.update_one({'cm.name': name, 'type': 'fileendpoint'},", "dir_dict['cm'] = { 'name': dirname, 'kind': 'vdir', 'cloud': 'local' }", "'type': 'fileendpoint'}, {'$set': {'modified': datetime.utcnow()}}) service = doc['provider'] source =", "does not exist at this location.') except Exception as e:", "delete(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name': dir_or_name}) return", "dirname = 'vdir' directory = 'vdir' else: directory = self.col.find_one({\"cm.name\":", "datetime class Vdir(object): def __init__(self): self.cm = CmDatabase() self.col =", "pwd else: directory = self.col.find_one({'type': 'directory', 'cm.name': dirname}) if directory['parent']", "'vdir' directory = 'vdir' else: directory = self.col.find_one({\"cm.name\": dirname, 'type':", "self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name': dir_or_name}) return result except Exception as e:", "= os.path.dirname(endpoint).split(':')[1] file_dict['filename'] = os.path.basename(endpoint) file_dict['provider'] = os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] =", "else: Console.error(\"Directory with that name exists.\") except Exception as e:", "directory = 'vdir' else: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'})", "print(e) def status(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) return", "result except Exception as e: print(e) def status(self, dir_or_name): try:", "dirname, 'type': 'directory'}) filename = os.path.basename(dir_and_name) file = self.col.find_one({\"cm.name\": filename,", "file_dict['filename'] = os.path.basename(endpoint) file_dict['provider'] = os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] = datetime.utcnow() file_dict['cm']['modified']", "self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': self.directory},", "self.col.find_one({'cm.name': name, 'type': 'fileendpoint'}) if doc is not None: self.col.update_one({'cm.name':", "i in range(0, count): entry = cloudmesh[i] if entry['type'] ==", "directory is None: dir_dict = dict() dir_dict['cm'] = { 'name':", "p.get(source, destination, False) return file else: Console.error(\"File not found.\") except", "@DatabaseUpdate() def mkdir(self, dirname): try: directory = self.col.find_one({\"cm.name\": dirname, 'type':", "= Provider(service) file = p.get(source, destination, False) return file else:", "file_dict['cm']['created'] = datetime.utcnow() file_dict['cm']['modified'] = datetime.utcnow() return file_dict elif directory", "cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) count = self.col.count_documents({'$or':", "else: directory = self.col.find_one({'type': 'directory', 'cm.name': dirname}) if directory['parent'] ==", "reached.\") else: cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) self.directory =", "from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common.console import Console from cloudmesh.storage.Provider", "DatabaseUpdate from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common.console import Console from", "if self.directory == '': location = 'Vdir' else: location =", "filename, 'kind': 'vdir', 'cloud': 'local' } file_dict['type'] = 'fileendpoint' file_dict['vdirectory']", "else: Console.error('Directory does not exist at this location.') except Exception", "= 'vdir' else: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) filename", "try: doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'}) if doc is", "'cm.name': self.directory}) return pwd else: Console.error('Directory does not exist at", "dict() file_dict['cm'] = { 'name': filename, 'kind': 'vdir', 'cloud': 'local'", "= os.path.dirname(dir_and_name).split('/')[-1] if dirname == '': dirname = 'vdir' directory", "not None: print(file) Console.error(\"File with that name already exists.\") except", "'directory', 'cm.name': dirname}) if directory['parent'] == self.directory: self.directory = dirname", "dirname, 'kind': 'vdir', 'cloud': 'local' } dir_dict['type'] = 'directory' dir_dict['parent']", "return pwd else: Console.error('Directory does not exist at this location.')", "== self.directory: self.directory = dirname pwd = self.col.find_one({'type': 'directory', 'cm.name':", "None: file_dict = dict() file_dict['cm'] = { 'name': filename, 'kind':", "file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1] file_dict['filename'] = os.path.basename(endpoint) file_dict['provider'] = os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created']", "= entry['provider'] + \":\" + entry['cloud_directory'] + \"/\" + entry['filename']", "db wit Database update from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from cloudmesh.mongo.CmDatabase", "directory is None: Console.error(\"Virtual directory not found.\") elif file is", "import os from datetime import datetime class Vdir(object): def __init__(self):", "== 'vdir': Console.error(\"Root directory reached.\") else: cwd = self.col.find_one({'type': 'directory',", "cloudmesh.common.console import Console from cloudmesh.storage.Provider import Provider import os from", "\"{:<20} {:>}\".format(\"Name\", \"Location\") + \"\\n\" + dash + \"\\n\" for", "cd(self, dirname=None): try: if dirname is None: if self.directory ==", "Exception as e: print(e) @DatabaseUpdate() def add(self, endpoint, dir_and_name): try:", "as e: print(e) def get(self, name, destination=None): try: doc =", "Provider import os from datetime import datetime class Vdir(object): def", "Console.error(\"Root directory reached.\") else: cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})", "= 'fileendpoint' file_dict['vdirectory'] = dirname file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1] file_dict['filename'] =", "if directory is not None: cloudmesh = self.col.find({'$or': [{'vdirectory': directory},", "os from datetime import datetime class Vdir(object): def __init__(self): self.cm", "that name exists.\") except Exception as e: print(e) def ls(self,", "dirname == '': dirname = 'vdir' directory = 'vdir' else:", "'Vdir' else: location = self.directory locations += \"{:<20} {:>}\".format(entry['cm']['name'], location)", "print(source) if destination is None: destination = '~/.cloudmesh/vdir' p =", "'~/.cloudmesh/vdir' p = Provider(service) file = p.get(source, destination, False) return", "'fileendpoint': location = entry['provider'] + \":\" + entry['cloud_directory'] + \"/\"", "'type': 'directory'}) if directory is None: dir_dict = dict() dir_dict['cm']", "'fileendpoint'}) if directory is not None and file is None:", "doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'}) if doc is not", "{ 'name': filename, 'kind': 'vdir', 'cloud': 'local' } file_dict['type'] =", "= '~/.cloudmesh/vdir' p = Provider(service) file = p.get(source, destination, False)", "not None: self.col.update_one({'cm.name': name, 'type': 'fileendpoint'}, {'$set': {'modified': datetime.utcnow()}}) service", "[{'vdirectory': self.directory}, {'parent': self.directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent':", "self.directory == '': location = 'Vdir' else: location = self.directory", "print(e) def ls(self, directory=None): try: dash = '-' * 40", "if entry['type'] == 'fileendpoint': location = entry['provider'] + \":\" +", "is None: destination = '~/.cloudmesh/vdir' p = Provider(service) file =", "filename, 'type': 'fileendpoint'}) if directory is not None and file", "{:>}\".format(\"Name\", \"Location\") + \"\\n\" + dash + \"\\n\" for i", "\"/\" + entry['filename'] else: if self.directory == '': location =", "self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) filename = os.path.basename(dir_and_name) file = self.col.find_one({\"cm.name\":", "try: dash = '-' * 40 if directory is not", "self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) locations = \"{:<20} {:>}\".format(\"Name\", \"Location\")", "e: print(e) def status(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name})", "def delete(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name': dir_or_name})", "as e: print(e) def delete(self, dir_or_name): try: result = self.col.find_one({'cm.name':", "already exists.\") except Exception as e: print(e) def get(self, name,", "'directory', 'cm.name': self.directory}) self.directory = cwd['parent'] pwd = self.col.find_one({'type': 'directory',", "Exception as e: print(e) @DatabaseUpdate() def mkdir(self, dirname): try: directory", "dirname, 'type': 'directory'}) if directory is None: dir_dict = dict()", "file else: Console.error(\"File not found.\") except Exception as e: print(e)", "try: result = self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name': dir_or_name}) return result except", "self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': directory},", "is None: if self.directory == 'vdir': Console.error(\"Root directory reached.\") else:", "file_dict['type'] = 'fileendpoint' file_dict['vdirectory'] = dirname file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1] file_dict['filename']", "'name': filename, 'kind': 'vdir', 'cloud': 'local' } file_dict['type'] = 'fileendpoint'", "name already exists.\") except Exception as e: print(e) def get(self,", "result = self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name': dir_or_name}) return result except Exception", "print(e) @DatabaseUpdate() def mkdir(self, dirname): try: directory = self.col.find_one({\"cm.name\": dirname,", "os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] = datetime.utcnow() file_dict['cm']['modified'] = datetime.utcnow() return file_dict elif", "= { 'name': filename, 'kind': 'vdir', 'cloud': 'local' } file_dict['type']", "40 if directory is not None: cloudmesh = self.col.find({'$or': [{'vdirectory':", "{'parent': self.directory}]}) locations = \"{:<20} {:>}\".format(\"Name\", \"Location\") + \"\\n\" +", "destination, False) return file else: Console.error(\"File not found.\") except Exception", "= '-' * 40 if directory is not None: cloudmesh", "if dirname is None: if self.directory == 'vdir': Console.error(\"Root directory", "CmDatabase() self.col = self.cm.db['local-vdir'] self.directory = 'vdir' def cd(self, dirname=None):", "self.directory == 'vdir': Console.error(\"Root directory reached.\") else: cwd = self.col.find_one({'type':", "self.col.delete_one({'cm.name': dir_or_name}) return result except Exception as e: print(e) def", "is None: dir_dict = dict() dir_dict['cm'] = { 'name': dirname,", "# # this manager stores directly into the db wit", "mkdir(self, dirname): try: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) if", "dash = '-' * 40 if directory is not None:", "self.cm.db['local-vdir'] self.directory = 'vdir' def cd(self, dirname=None): try: if dirname", "cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]}) count = self.col.count_documents({'$or':", "dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name': dir_or_name}) return result", "return locations except Exception as e: print(e) @DatabaseUpdate() def add(self,", "\"\\n\" + dash + \"\\n\" for i in range(0, count):", "else: cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) count =", "'directory'}) filename = os.path.basename(dir_and_name) file = self.col.find_one({\"cm.name\": filename, 'type': 'fileendpoint'})", "with that name already exists.\") except Exception as e: print(e)", "cloudmesh.storage.Provider import Provider import os from datetime import datetime class", "file_dict['vdirectory'] = dirname file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1] file_dict['filename'] = os.path.basename(endpoint) file_dict['provider']", "self.col.update_one({'cm.name': name, 'type': 'fileendpoint'}, {'$set': {'modified': datetime.utcnow()}}) service = doc['provider']", "None: print(file) Console.error(\"File with that name already exists.\") except Exception", "= self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) locations = \"{:<20} {:>}\".format(\"Name\",", "p = Provider(service) file = p.get(source, destination, False) return file", "source = os.path.join(doc['cloud_directory'], doc['filename']) print(source) if destination is None: destination", "dirname is None: if self.directory == 'vdir': Console.error(\"Root directory reached.\")", "None and file is None: file_dict = dict() file_dict['cm'] =", "into the db wit Database update from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate", "directly into the db wit Database update from cloudmesh.mongo.DataBaseDecorator import", "doc['provider'] source = os.path.join(doc['cloud_directory'], doc['filename']) print(source) if destination is None:", "datetime.utcnow() dir_dict['cm']['modified'] = datetime.utcnow() return dir_dict else: Console.error(\"Directory with that", "directory not found.\") elif file is not None: print(file) Console.error(\"File", "None: destination = '~/.cloudmesh/vdir' p = Provider(service) file = p.get(source,", "exist at this location.') except Exception as e: print(e) @DatabaseUpdate()", "e: print(e) @DatabaseUpdate() def mkdir(self, dirname): try: directory = self.col.find_one({\"cm.name\":", "except Exception as e: print(e) @DatabaseUpdate() def mkdir(self, dirname): try:", "self.directory locations += \"{:<20} {:>}\".format(entry['cm']['name'], location) + \"\\n\" print(locations) return", "self.directory = dirname pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return", "'directory'}) if directory is None: dir_dict = dict() dir_dict['cm'] =", "cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common.console import Console from cloudmesh.storage.Provider import", "\"\\n\" for i in range(0, count): entry = cloudmesh[i] if", "+ \"/\" + entry['filename'] else: if self.directory == '': location", "= self.cm.db['local-vdir'] self.directory = 'vdir' def cd(self, dirname=None): try: if", "os.path.dirname(endpoint).split(':')[1] file_dict['filename'] = os.path.basename(endpoint) file_dict['provider'] = os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] = datetime.utcnow()", "exists.\") except Exception as e: print(e) def get(self, name, destination=None):", "'cm.name': dirname}) if directory['parent'] == self.directory: self.directory = dirname pwd", "'type': 'fileendpoint'}) if directory is not None and file is", "Console.error(\"File not found.\") except Exception as e: print(e) def delete(self,", "file = p.get(source, destination, False) return file else: Console.error(\"File not", "from datetime import datetime class Vdir(object): def __init__(self): self.cm =", "== '': location = 'Vdir' else: location = self.directory locations", "from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common.console", "datetime.utcnow() file_dict['cm']['modified'] = datetime.utcnow() return file_dict elif directory is None:", "[{'vdirectory': directory}, {'parent': directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent':", "pwd else: Console.error('Directory does not exist at this location.') except", "= self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else: Console.error('Directory does", "print(e) def delete(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name':", "wit Database update from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from cloudmesh.mongo.CmDatabase import", "= dict() file_dict['cm'] = { 'name': filename, 'kind': 'vdir', 'cloud':", "'directory', 'cm.name': self.directory}) return pwd else: Console.error('Directory does not exist", "= os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] = datetime.utcnow() file_dict['cm']['modified'] = datetime.utcnow() return file_dict", "'directory' dir_dict['parent'] = self.directory dir_dict['cm']['created'] = datetime.utcnow() dir_dict['cm']['modified'] = datetime.utcnow()", "self.col = self.cm.db['local-vdir'] self.directory = 'vdir' def cd(self, dirname=None): try:", "cloudmesh[i] if entry['type'] == 'fileendpoint': location = entry['provider'] + \":\"", "= doc['provider'] source = os.path.join(doc['cloud_directory'], doc['filename']) print(source) if destination is", "directory is not None: cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent':", "at this location.') except Exception as e: print(e) @DatabaseUpdate() def", "try: result = self.col.find_one({'cm.name': dir_or_name}) return result except Exception as", "\":\" + entry['cloud_directory'] + \"/\" + entry['filename'] else: if self.directory", "= self.col.find_one({'cm.name': dir_or_name}) return result except Exception as e: print(e)", "= CmDatabase() self.col = self.cm.db['local-vdir'] self.directory = 'vdir' def cd(self,", "try: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) if directory is", "directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) filename = os.path.basename(dir_and_name) file", "result = self.col.find_one({'cm.name': dir_or_name}) return result except Exception as e:", "\"Location\") + \"\\n\" + dash + \"\\n\" for i in", "as e: print(e) @DatabaseUpdate() def mkdir(self, dirname): try: directory =", "= self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else: directory =", "= cwd['parent'] pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd", "dir_dict['type'] = 'directory' dir_dict['parent'] = self.directory dir_dict['cm']['created'] = datetime.utcnow() dir_dict['cm']['modified']", "elif file is not None: print(file) Console.error(\"File with that name", "CmDatabase from cloudmesh.common.console import Console from cloudmesh.storage.Provider import Provider import", "'name': dirname, 'kind': 'vdir', 'cloud': 'local' } dir_dict['type'] = 'directory'", "Console.error(\"Virtual directory not found.\") elif file is not None: print(file)", "self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else: directory = self.col.find_one({'type':", "= self.col.find_one({'type': 'directory', 'cm.name': self.directory}) self.directory = cwd['parent'] pwd =", "+ \":\" + entry['cloud_directory'] + \"/\" + entry['filename'] else: if", "+ \"\\n\" for i in range(0, count): entry = cloudmesh[i]", "self.directory dir_dict['cm']['created'] = datetime.utcnow() dir_dict['cm']['modified'] = datetime.utcnow() return dir_dict else:", "except Exception as e: print(e) @DatabaseUpdate() def add(self, endpoint, dir_and_name):", "except Exception as e: print(e) def status(self, dir_or_name): try: result", "= self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) if directory is None: dir_dict", "not exist at this location.') except Exception as e: print(e)", "location.') except Exception as e: print(e) @DatabaseUpdate() def mkdir(self, dirname):", "== 'fileendpoint': location = entry['provider'] + \":\" + entry['cloud_directory'] +", "dir_dict['cm']['modified'] = datetime.utcnow() return dir_dict else: Console.error(\"Directory with that name", "get(self, name, destination=None): try: doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'})", "self.directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) locations =", "Exception as e: print(e) def status(self, dir_or_name): try: result =", "def add(self, endpoint, dir_and_name): try: dirname = os.path.dirname(dir_and_name).split('/')[-1] if dirname", "directory is not None and file is None: file_dict =", "entry['filename'] else: if self.directory == '': location = 'Vdir' else:", "print(e) def get(self, name, destination=None): try: doc = self.col.find_one({'cm.name': name,", "found.\") except Exception as e: print(e) def delete(self, dir_or_name): try:", "= datetime.utcnow() file_dict['cm']['modified'] = datetime.utcnow() return file_dict elif directory is", "exists.\") except Exception as e: print(e) def ls(self, directory=None): try:", "try: dirname = os.path.dirname(dir_and_name).split('/')[-1] if dirname == '': dirname =", "if directory is not None and file is None: file_dict", "directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) if directory is None:", "import DatabaseUpdate from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common.console import Console", "self.col.find_one({'type': 'directory', 'cm.name': self.directory}) self.directory = cwd['parent'] pwd = self.col.find_one({'type':", "{ 'name': dirname, 'kind': 'vdir', 'cloud': 'local' } dir_dict['type'] =", "= datetime.utcnow() return dir_dict else: Console.error(\"Directory with that name exists.\")", "as e: print(e) def ls(self, directory=None): try: dash = '-'", "as e: print(e) @DatabaseUpdate() def add(self, endpoint, dir_and_name): try: dirname", "e: print(e) @DatabaseUpdate() def add(self, endpoint, dir_and_name): try: dirname =", "= os.path.basename(endpoint) file_dict['provider'] = os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] = datetime.utcnow() file_dict['cm']['modified'] =", "status(self, dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) return result except", "= os.path.basename(dir_and_name) file = self.col.find_one({\"cm.name\": filename, 'type': 'fileendpoint'}) if directory", "+ \"\\n\" + dash + \"\\n\" for i in range(0,", "os.path.basename(dir_and_name) file = self.col.find_one({\"cm.name\": filename, 'type': 'fileendpoint'}) if directory is", "os.path.basename(endpoint) file_dict['provider'] = os.path.dirname(endpoint).split(':')[0] file_dict['cm']['created'] = datetime.utcnow() file_dict['cm']['modified'] = datetime.utcnow()", "dir_or_name}) self.col.delete_one({'cm.name': dir_or_name}) return result except Exception as e: print(e)", "self.directory = 'vdir' def cd(self, dirname=None): try: if dirname is", "entry['cloud_directory'] + \"/\" + entry['filename'] else: if self.directory == '':", "import Provider import os from datetime import datetime class Vdir(object):", "return pwd else: directory = self.col.find_one({'type': 'directory', 'cm.name': dirname}) if", "} dir_dict['type'] = 'directory' dir_dict['parent'] = self.directory dir_dict['cm']['created'] = datetime.utcnow()", "= self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) filename = os.path.basename(dir_and_name) file =", "manager stores directly into the db wit Database update from", "if destination is None: destination = '~/.cloudmesh/vdir' p = Provider(service)", "from cloudmesh.storage.Provider import Provider import os from datetime import datetime", "dir_dict['parent'] = self.directory dir_dict['cm']['created'] = datetime.utcnow() dir_dict['cm']['modified'] = datetime.utcnow() return", "'vdir', 'cloud': 'local' } file_dict['type'] = 'fileendpoint' file_dict['vdirectory'] = dirname", "for i in range(0, count): entry = cloudmesh[i] if entry['type']", "= datetime.utcnow() dir_dict['cm']['modified'] = datetime.utcnow() return dir_dict else: Console.error(\"Directory with", "found.\") elif file is not None: print(file) Console.error(\"File with that", "'cm.name': self.directory}) self.directory = cwd['parent'] pwd = self.col.find_one({'type': 'directory', 'cm.name':", "= self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]}) else: cloudmesh = self.col.find({'$or':", "= dict() dir_dict['cm'] = { 'name': dirname, 'kind': 'vdir', 'cloud':", "except Exception as e: print(e) def get(self, name, destination=None): try:", "__init__(self): self.cm = CmDatabase() self.col = self.cm.db['local-vdir'] self.directory = 'vdir'", "if dirname == '': dirname = 'vdir' directory = 'vdir'", "name, 'type': 'fileendpoint'}) if doc is not None: self.col.update_one({'cm.name': name,", "class Vdir(object): def __init__(self): self.cm = CmDatabase() self.col = self.cm.db['local-vdir']", "if self.directory == 'vdir': Console.error(\"Root directory reached.\") else: cwd =", "dir_and_name): try: dirname = os.path.dirname(dir_and_name).split('/')[-1] if dirname == '': dirname", "in range(0, count): entry = cloudmesh[i] if entry['type'] == 'fileendpoint':", "endpoint, dir_and_name): try: dirname = os.path.dirname(dir_and_name).split('/')[-1] if dirname == '':", "self.col.find_one({'type': 'directory', 'cm.name': dirname}) if directory['parent'] == self.directory: self.directory =", "return result except Exception as e: print(e) def status(self, dir_or_name):", "self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]}) else: cloudmesh = self.col.find({'$or': [{'vdirectory':", "directory reached.\") else: cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) self.directory", "= cloudmesh[i] if entry['type'] == 'fileendpoint': location = entry['provider'] +", "doc is not None: self.col.update_one({'cm.name': name, 'type': 'fileendpoint'}, {'$set': {'modified':", "dir_dict['cm']['created'] = datetime.utcnow() dir_dict['cm']['modified'] = datetime.utcnow() return dir_dict else: Console.error(\"Directory", "entry = cloudmesh[i] if entry['type'] == 'fileendpoint': location = entry['provider']", "as e: print(e) def status(self, dir_or_name): try: result = self.col.find_one({'cm.name':", "name exists.\") except Exception as e: print(e) def ls(self, directory=None):", "print(file) Console.error(\"File with that name already exists.\") except Exception as", "dirname = os.path.dirname(dir_and_name).split('/')[-1] if dirname == '': dirname = 'vdir'", "file is not None: print(file) Console.error(\"File with that name already", "def cd(self, dirname=None): try: if dirname is None: if self.directory", "name, 'type': 'fileendpoint'}, {'$set': {'modified': datetime.utcnow()}}) service = doc['provider'] source", "'cm.name': self.directory}) return pwd else: directory = self.col.find_one({'type': 'directory', 'cm.name':", "file_dict = dict() file_dict['cm'] = { 'name': filename, 'kind': 'vdir',", "# this manager stores directly into the db wit Database", "this manager stores directly into the db wit Database update", "dict() dir_dict['cm'] = { 'name': dirname, 'kind': 'vdir', 'cloud': 'local'", "{'parent': self.directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) locations", "else: cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) self.directory = cwd['parent']", "name, destination=None): try: doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'}) if", "entry['provider'] + \":\" + entry['cloud_directory'] + \"/\" + entry['filename'] else:", "not None and file is None: file_dict = dict() file_dict['cm']", "= \"{:<20} {:>}\".format(\"Name\", \"Location\") + \"\\n\" + dash + \"\\n\"", "os.path.dirname(dir_and_name).split('/')[-1] if dirname == '': dirname = 'vdir' directory =", "'local' } file_dict['type'] = 'fileendpoint' file_dict['vdirectory'] = dirname file_dict['cloud_directory'] =", "if directory['parent'] == self.directory: self.directory = dirname pwd = self.col.find_one({'type':", "except Exception as e: print(e) def ls(self, directory=None): try: dash", "{'parent': directory}]}) else: cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})", "def __init__(self): self.cm = CmDatabase() self.col = self.cm.db['local-vdir'] self.directory =", "Database update from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from cloudmesh.mongo.CmDatabase import CmDatabase", "is not None: cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]})", "if doc is not None: self.col.update_one({'cm.name': name, 'type': 'fileendpoint'}, {'$set':", "dirname): try: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) if directory", "self.directory}) self.directory = cwd['parent'] pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})", "destination = '~/.cloudmesh/vdir' p = Provider(service) file = p.get(source, destination,", "dir_or_name}) return result except Exception as e: print(e) def status(self,", "{'parent': directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]}) else:", "service = doc['provider'] source = os.path.join(doc['cloud_directory'], doc['filename']) print(source) if destination", "def mkdir(self, dirname): try: directory = self.col.find_one({\"cm.name\": dirname, 'type': 'directory'})", "count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]}) else: cloudmesh =", "is None: Console.error(\"Virtual directory not found.\") elif file is not", "self.directory}, {'parent': self.directory}]}) locations = \"{:<20} {:>}\".format(\"Name\", \"Location\") + \"\\n\"", "'vdir' def cd(self, dirname=None): try: if dirname is None: if", "self.col.find_one({\"cm.name\": filename, 'type': 'fileendpoint'}) if directory is not None and", "'kind': 'vdir', 'cloud': 'local' } dir_dict['type'] = 'directory' dir_dict['parent'] =", "location) + \"\\n\" print(locations) return locations except Exception as e:", "dirname}) if directory['parent'] == self.directory: self.directory = dirname pwd =", "dir_dict else: Console.error(\"Directory with that name exists.\") except Exception as", "+ entry['filename'] else: if self.directory == '': location = 'Vdir'", "file_dict elif directory is None: Console.error(\"Virtual directory not found.\") elif", "return file else: Console.error(\"File not found.\") except Exception as e:", "= self.col.find_one({'type': 'directory', 'cm.name': dirname}) if directory['parent'] == self.directory: self.directory", "Exception as e: print(e) def ls(self, directory=None): try: dash =", "self.directory}, {'parent': self.directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})", "= self.col.find_one({'cm.name': dir_or_name}) self.col.delete_one({'cm.name': dir_or_name}) return result except Exception as", "directory}, {'parent': directory}]}) count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]})", "this location.') except Exception as e: print(e) @DatabaseUpdate() def mkdir(self,", "'local' } dir_dict['type'] = 'directory' dir_dict['parent'] = self.directory dir_dict['cm']['created'] =", "self.col.find_one({\"cm.name\": dirname, 'type': 'directory'}) if directory is None: dir_dict =", "count): entry = cloudmesh[i] if entry['type'] == 'fileendpoint': location =", "else: if self.directory == '': location = 'Vdir' else: location", "dir_or_name): try: result = self.col.find_one({'cm.name': dir_or_name}) return result except Exception", "self.directory}) return pwd else: Console.error('Directory does not exist at this", "datetime.utcnow() return file_dict elif directory is None: Console.error(\"Virtual directory not", "destination is None: destination = '~/.cloudmesh/vdir' p = Provider(service) file", "= 'directory' dir_dict['parent'] = self.directory dir_dict['cm']['created'] = datetime.utcnow() dir_dict['cm']['modified'] =", "directory}]}) else: cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]}) count", "is not None: self.col.update_one({'cm.name': name, 'type': 'fileendpoint'}, {'$set': {'modified': datetime.utcnow()}})", "'-' * 40 if directory is not None: cloudmesh =", "= 'vdir' directory = 'vdir' else: directory = self.col.find_one({\"cm.name\": dirname,", "directory['parent'] == self.directory: self.directory = dirname pwd = self.col.find_one({'type': 'directory',", "not None: cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]}) count", "print(locations) return locations except Exception as e: print(e) @DatabaseUpdate() def", "= 'Vdir' else: location = self.directory locations += \"{:<20} {:>}\".format(entry['cm']['name'],", "if directory is None: dir_dict = dict() dir_dict['cm'] = {", "None: dir_dict = dict() dir_dict['cm'] = { 'name': dirname, 'kind':", "dirname pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory}) return pwd else:", "= p.get(source, destination, False) return file else: Console.error(\"File not found.\")", "file_dict['cm']['modified'] = datetime.utcnow() return file_dict elif directory is None: Console.error(\"Virtual", "{:>}\".format(entry['cm']['name'], location) + \"\\n\" print(locations) return locations except Exception as" ]
[ "except ImportError: enabled = False def _transform_result(results): result_columns = []", "{}) for key in tags.keys(): if key not in result_columns:", "in tags.keys(): if key not in result_columns: result_columns.append(key) for result", "for point in series['values']: result_row = {} for column in", "result_columns: tags = series.get('tags', {}) if column in tags: result_row[column]", "measurements limit 1\" @classmethod def configuration_schema(cls): return { 'type': 'object',", "def type(cls): return \"influxdb\" def run_query(self, query, user): client =", "if key not in result_columns: result_columns.append(key) for result in results:", "return enabled @classmethod def annotate_query(cls): return False @classmethod def type(cls):", "} }, 'required': ['url'] } @classmethod def enabled(cls): return enabled", "ImportError: enabled = False def _transform_result(results): result_columns = [] result_rows", "if not isinstance(results, list): results = [results] json_data = _transform_result(results)", "JSONEncoder logger = logging.getLogger(__name__) try: from influxdb import InfluxDBClusterClient enabled", "'url': { 'type': 'string' } }, 'required': ['url'] } @classmethod", "= series.get('tags', {}) for key in tags.keys(): if key not", "not in result_columns: result_columns.append(column) tags = series.get('tags', {}) for key", "= False def _transform_result(results): result_columns = [] result_rows = []", "def enabled(cls): return enabled @classmethod def annotate_query(cls): return False @classmethod", "import JSONEncoder logger = logging.getLogger(__name__) try: from influxdb import InfluxDBClusterClient", "column not in result_columns: result_columns.append(column) tags = series.get('tags', {}) for", "results: for series in result.raw.get('series', []): for column in series['columns']:", "for series in result.raw.get('series', []): for column in series['columns']: if", "results: for series in result.raw.get('series', []): for point in series['values']:", "['url'] } @classmethod def enabled(cls): return enabled @classmethod def annotate_query(cls):", "result_columns: result_columns.append(column) tags = series.get('tags', {}) for key in tags.keys():", "from influxdb import InfluxDBClusterClient enabled = True except ImportError: enabled", "} @classmethod def enabled(cls): return enabled @classmethod def annotate_query(cls): return", "except Exception as ex: json_data = None error = ex.message", "client.query(query) if not isinstance(results, list): results = [results] json_data =", "= [] for result in results: for series in result.raw.get('series',", "c in result_columns], \"rows\": result_rows }, cls=JSONEncoder) class InfluxDB(BaseQueryRunner): noop_query", "InfluxDB(BaseQueryRunner): noop_query = \"show measurements limit 1\" @classmethod def configuration_schema(cls):", "import InfluxDBClusterClient enabled = True except ImportError: enabled = False", "url: %s\", self.configuration['url']) logger.debug(\"influxdb got query: %s\", query) try: results", "json_data = _transform_result(results) error = None except Exception as ex:", "in results: for series in result.raw.get('series', []): for point in", "enabled = False def _transform_result(results): result_columns = [] result_rows =", "try: from influxdb import InfluxDBClusterClient enabled = True except ImportError:", "result_row = {} for column in result_columns: tags = series.get('tags',", "= series.get('tags', {}) if column in tags: result_row[column] = tags[column]", "= \"show measurements limit 1\" @classmethod def configuration_schema(cls): return {", "\"influxdb\" def run_query(self, query, user): client = InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url:", "self.configuration['url']) logger.debug(\"influxdb got query: %s\", query) try: results = client.query(query)", "enabled = True except ImportError: enabled = False def _transform_result(results):", "error = None except Exception as ex: json_data = None", "not in result_columns: result_columns.append(key) for result in results: for series", "tags = series.get('tags', {}) for key in tags.keys(): if key", "result in results: for series in result.raw.get('series', []): for point", "enabled @classmethod def annotate_query(cls): return False @classmethod def type(cls): return", "c} for c in result_columns], \"rows\": result_rows }, cls=JSONEncoder) class", "= InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url: %s\", self.configuration['url']) logger.debug(\"influxdb got query: %s\",", "@classmethod def annotate_query(cls): return False @classmethod def type(cls): return \"influxdb\"", "import logging from redash.query_runner import * from redash.utils import JSONEncoder", "def annotate_query(cls): return False @classmethod def type(cls): return \"influxdb\" def", "_transform_result(results) error = None except Exception as ex: json_data =", "ex: json_data = None error = ex.message return json_data, error", "redash.utils import JSONEncoder logger = logging.getLogger(__name__) try: from influxdb import", "series['columns']: index = series['columns'].index(column) value = point[index] result_row[column] = value", "{ 'type': 'string' } }, 'required': ['url'] } @classmethod def", "series['values']: result_row = {} for column in result_columns: tags =", "[]): for point in series['values']: result_row = {} for column", "query, user): client = InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url: %s\", self.configuration['url']) logger.debug(\"influxdb", "results = [results] json_data = _transform_result(results) error = None except", "}, 'required': ['url'] } @classmethod def enabled(cls): return enabled @classmethod", "'required': ['url'] } @classmethod def enabled(cls): return enabled @classmethod def", "[]): for column in series['columns']: if column not in result_columns:", "tags: result_row[column] = tags[column] elif column in series['columns']: index =", "[results] json_data = _transform_result(results) error = None except Exception as", "{}) if column in tags: result_row[column] = tags[column] elif column", "result_columns.append(column) tags = series.get('tags', {}) for key in tags.keys(): if", "'type': 'string' } }, 'required': ['url'] } @classmethod def enabled(cls):", "* from redash.utils import JSONEncoder logger = logging.getLogger(__name__) try: from", "json.dumps({ \"columns\": [{'name': c} for c in result_columns], \"rows\": result_rows", "'string' } }, 'required': ['url'] } @classmethod def enabled(cls): return", "= None except Exception as ex: json_data = None error", "list): results = [results] json_data = _transform_result(results) error = None", "Exception as ex: json_data = None error = ex.message return", "import json import logging from redash.query_runner import * from redash.utils", "result.raw.get('series', []): for point in series['values']: result_row = {} for", "= {} for column in result_columns: tags = series.get('tags', {})", "noop_query = \"show measurements limit 1\" @classmethod def configuration_schema(cls): return", "from redash.utils import JSONEncoder logger = logging.getLogger(__name__) try: from influxdb", "in result_columns: tags = series.get('tags', {}) if column in tags:", "in series['columns']: index = series['columns'].index(column) value = point[index] result_row[column] =", "result_rows }, cls=JSONEncoder) class InfluxDB(BaseQueryRunner): noop_query = \"show measurements limit", "cls=JSONEncoder) class InfluxDB(BaseQueryRunner): noop_query = \"show measurements limit 1\" @classmethod", "user): client = InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url: %s\", self.configuration['url']) logger.debug(\"influxdb got", "try: results = client.query(query) if not isinstance(results, list): results =", "query: %s\", query) try: results = client.query(query) if not isinstance(results,", "series in result.raw.get('series', []): for point in series['values']: result_row =", "@classmethod def configuration_schema(cls): return { 'type': 'object', 'properties': { 'url':", "return { 'type': 'object', 'properties': { 'url': { 'type': 'string'", "in result.raw.get('series', []): for point in series['values']: result_row = {}", "= True except ImportError: enabled = False def _transform_result(results): result_columns", "results = client.query(query) if not isinstance(results, list): results = [results]", "\"show measurements limit 1\" @classmethod def configuration_schema(cls): return { 'type':", "in series['values']: result_row = {} for column in result_columns: tags", "client = InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url: %s\", self.configuration['url']) logger.debug(\"influxdb got query:", "series['columns'].index(column) value = point[index] result_row[column] = value result_rows.append(result_row) return json.dumps({", "series.get('tags', {}) for key in tags.keys(): if key not in", "isinstance(results, list): results = [results] json_data = _transform_result(results) error =", "enabled(cls): return enabled @classmethod def annotate_query(cls): return False @classmethod def", "not isinstance(results, list): results = [results] json_data = _transform_result(results) error", "'type': 'object', 'properties': { 'url': { 'type': 'string' } },", "from redash.query_runner import * from redash.utils import JSONEncoder logger =", "logger.debug(\"influxdb got query: %s\", query) try: results = client.query(query) if", "= tags[column] elif column in series['columns']: index = series['columns'].index(column) value", "elif column in series['columns']: index = series['columns'].index(column) value = point[index]", "= [results] json_data = _transform_result(results) error = None except Exception", "for column in result_columns: tags = series.get('tags', {}) if column", "= _transform_result(results) error = None except Exception as ex: json_data", "as ex: json_data = None error = ex.message return json_data,", "= logging.getLogger(__name__) try: from influxdb import InfluxDBClusterClient enabled = True", "\"rows\": result_rows }, cls=JSONEncoder) class InfluxDB(BaseQueryRunner): noop_query = \"show measurements", "'object', 'properties': { 'url': { 'type': 'string' } }, 'required':", "influxdb import InfluxDBClusterClient enabled = True except ImportError: enabled =", "False def _transform_result(results): result_columns = [] result_rows = [] for", "column in series['columns']: if column not in result_columns: result_columns.append(column) tags", "column in result_columns: tags = series.get('tags', {}) if column in", "for c in result_columns], \"rows\": result_rows }, cls=JSONEncoder) class InfluxDB(BaseQueryRunner):", "'properties': { 'url': { 'type': 'string' } }, 'required': ['url']", "for series in result.raw.get('series', []): for point in series['values']: result_row", "%s\", query) try: results = client.query(query) if not isinstance(results, list):", "{} for column in result_columns: tags = series.get('tags', {}) if", "json import logging from redash.query_runner import * from redash.utils import", "= value result_rows.append(result_row) return json.dumps({ \"columns\": [{'name': c} for c", "\"columns\": [{'name': c} for c in result_columns], \"rows\": result_rows },", "return json.dumps({ \"columns\": [{'name': c} for c in result_columns], \"rows\":", "result_row[column] = tags[column] elif column in series['columns']: index = series['columns'].index(column)", "for result in results: for series in result.raw.get('series', []): for", "tags.keys(): if key not in result_columns: result_columns.append(key) for result in", "True except ImportError: enabled = False def _transform_result(results): result_columns =", "in result_columns: result_columns.append(column) tags = series.get('tags', {}) for key in", "if column not in result_columns: result_columns.append(column) tags = series.get('tags', {})", "key not in result_columns: result_columns.append(key) for result in results: for", "value = point[index] result_row[column] = value result_rows.append(result_row) return json.dumps({ \"columns\":", "= point[index] result_row[column] = value result_rows.append(result_row) return json.dumps({ \"columns\": [{'name':", "tags[column] elif column in series['columns']: index = series['columns'].index(column) value =", "{ 'url': { 'type': 'string' } }, 'required': ['url'] }", "InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url: %s\", self.configuration['url']) logger.debug(\"influxdb got query: %s\", query)", "[] result_rows = [] for result in results: for series", "False @classmethod def type(cls): return \"influxdb\" def run_query(self, query, user):", "InfluxDBClusterClient enabled = True except ImportError: enabled = False def", "if column in tags: result_row[column] = tags[column] elif column in", "result.raw.get('series', []): for column in series['columns']: if column not in", "None except Exception as ex: json_data = None error =", "in result.raw.get('series', []): for column in series['columns']: if column not", "[] for result in results: for series in result.raw.get('series', []):", "%s\", self.configuration['url']) logger.debug(\"influxdb got query: %s\", query) try: results =", "def _transform_result(results): result_columns = [] result_rows = [] for result", "query) try: results = client.query(query) if not isinstance(results, list): results", "for column in series['columns']: if column not in result_columns: result_columns.append(column)", "redash.query_runner import * from redash.utils import JSONEncoder logger = logging.getLogger(__name__)", "got query: %s\", query) try: results = client.query(query) if not", "= client.query(query) if not isinstance(results, list): results = [results] json_data", "result_columns: result_columns.append(key) for result in results: for series in result.raw.get('series',", "in tags: result_row[column] = tags[column] elif column in series['columns']: index", "result_row[column] = value result_rows.append(result_row) return json.dumps({ \"columns\": [{'name': c} for", "index = series['columns'].index(column) value = point[index] result_row[column] = value result_rows.append(result_row)", "result_columns = [] result_rows = [] for result in results:", "series['columns']: if column not in result_columns: result_columns.append(column) tags = series.get('tags',", "result_rows = [] for result in results: for series in", "tags = series.get('tags', {}) if column in tags: result_row[column] =", "[{'name': c} for c in result_columns], \"rows\": result_rows }, cls=JSONEncoder)", "def run_query(self, query, user): client = InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url: %s\",", "column in series['columns']: index = series['columns'].index(column) value = point[index] result_row[column]", "in results: for series in result.raw.get('series', []): for column in", "point in series['values']: result_row = {} for column in result_columns:", "in result_columns], \"rows\": result_rows }, cls=JSONEncoder) class InfluxDB(BaseQueryRunner): noop_query =", "result_columns.append(key) for result in results: for series in result.raw.get('series', []):", "}, cls=JSONEncoder) class InfluxDB(BaseQueryRunner): noop_query = \"show measurements limit 1\"", "result_columns], \"rows\": result_rows }, cls=JSONEncoder) class InfluxDB(BaseQueryRunner): noop_query = \"show", "@classmethod def enabled(cls): return enabled @classmethod def annotate_query(cls): return False", "1\" @classmethod def configuration_schema(cls): return { 'type': 'object', 'properties': {", "series in result.raw.get('series', []): for column in series['columns']: if column", "value result_rows.append(result_row) return json.dumps({ \"columns\": [{'name': c} for c in", "import * from redash.utils import JSONEncoder logger = logging.getLogger(__name__) try:", "json_data = None error = ex.message return json_data, error register(InfluxDB)", "run_query(self, query, user): client = InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb url: %s\", self.configuration['url'])", "configuration_schema(cls): return { 'type': 'object', 'properties': { 'url': { 'type':", "type(cls): return \"influxdb\" def run_query(self, query, user): client = InfluxDBClusterClient.from_DSN(self.configuration['url'])", "series.get('tags', {}) if column in tags: result_row[column] = tags[column] elif", "def configuration_schema(cls): return { 'type': 'object', 'properties': { 'url': {", "logger.debug(\"influxdb url: %s\", self.configuration['url']) logger.debug(\"influxdb got query: %s\", query) try:", "return False @classmethod def type(cls): return \"influxdb\" def run_query(self, query,", "@classmethod def type(cls): return \"influxdb\" def run_query(self, query, user): client", "_transform_result(results): result_columns = [] result_rows = [] for result in", "result in results: for series in result.raw.get('series', []): for column", "point[index] result_row[column] = value result_rows.append(result_row) return json.dumps({ \"columns\": [{'name': c}", "logging.getLogger(__name__) try: from influxdb import InfluxDBClusterClient enabled = True except", "return \"influxdb\" def run_query(self, query, user): client = InfluxDBClusterClient.from_DSN(self.configuration['url']) logger.debug(\"influxdb", "class InfluxDB(BaseQueryRunner): noop_query = \"show measurements limit 1\" @classmethod def", "column in tags: result_row[column] = tags[column] elif column in series['columns']:", "annotate_query(cls): return False @classmethod def type(cls): return \"influxdb\" def run_query(self,", "{ 'type': 'object', 'properties': { 'url': { 'type': 'string' }", "result_rows.append(result_row) return json.dumps({ \"columns\": [{'name': c} for c in result_columns],", "= series['columns'].index(column) value = point[index] result_row[column] = value result_rows.append(result_row) return", "= [] result_rows = [] for result in results: for", "in series['columns']: if column not in result_columns: result_columns.append(column) tags =", "for key in tags.keys(): if key not in result_columns: result_columns.append(key)", "key in tags.keys(): if key not in result_columns: result_columns.append(key) for", "logging from redash.query_runner import * from redash.utils import JSONEncoder logger", "logger = logging.getLogger(__name__) try: from influxdb import InfluxDBClusterClient enabled =", "in result_columns: result_columns.append(key) for result in results: for series in", "limit 1\" @classmethod def configuration_schema(cls): return { 'type': 'object', 'properties':" ]
[ "functions_markers_extract.csv\" sbatch -n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt", "tmp = pd.Series(featherLU).to_frame() tmp.loc[:, 'batch'] = bf tmp.loc[:, 'batch_folder'] =", "= partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples, metaCols=metaCols, filters=filters) res =", "parser.parse_args() try: import parmap from multiprocessing import Pool _PARMAP =", "return outFilename def testMatching(dataFolder): out = [] for bf in", "ncpus > 1 and _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc,", "testbatch: batchList = batchList[:1] matchStr = 'gs_*.feather' if ncpus >", "find package \"parmap\", parallelization not enabled.') import itertools import pandas", "batchList = batchList[:1] matchStr = 'gs_*.feather' if ncpus > 1", "import argparse parser = argparse.ArgumentParser(description='Extract features and merge batches into", "--ncpus 4 --out functions_markers_extract.csv\" sbatch -n 1 -t 3-0 -c", "--out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function", "batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name of extraction to apply", "print('Could not find package \"parmap\", parallelization not enabled.') import itertools", "extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'],", "functions --ncpus 4 --out functions_extract.csv sbatch -n 1 -t 3-0", "functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers,", "metaCols=metaCols, filters=filters) res = list(map(func, batchList)) outFilename = mergeFeathers(res, outFile,", "from ics import * if args.matchingonly: metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder,", "testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False): out = [] batchList", "one batch.') parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate", "parmap from multiprocessing import Pool _PARMAP = True except: _PARMAP", "wrote = mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO',", "except: _PARMAP = False print('Could not find package \"parmap\", parallelization", "= testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out)) print('Wrote matching metadata to", "--wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv\" sbatch -n", "merge batches into one CSV.') parser.add_argument('--folder', type=str, help='Data folder containing", "parser.add_argument('--ncpus', type=int, help='Number of CPUs/cores to use for parallelization.', default=1)", "bf in os.listdir(dataFolder): batchFolder = opj(dataFolder, bf) if os.path.isdir(opj(dataFolder, bf)):", "os.path.isdir(opj(dataFolder, bf)): featherLU = matchSamples(batchFolder, test=False) tmp = pd.Series(featherLU).to_frame() tmp.loc[:,", "and merge batches into one CSV.') parser.add_argument('--folder', type=str, help='Data folder", "in os.listdir(dataFolder): batchFolder = opj(dataFolder, bf) if os.path.isdir(opj(dataFolder, bf)): featherLU", "CSV.') parser.add_argument('--folder', type=str, help='Data folder containing all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata')", "outFile: print('Wrote extracted data to %s.' % outFile) else: print('Error", "mergeSamples, matchSamples from ics import * if args.matchingonly: metaDf =", "2), (['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL',", "functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out", "type=str, help='Filename listing subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output", "batch.') parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate metadata.')", "if args.matchingonly: metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out)) print('Wrote", "functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv\" sbatch", "batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, pool=Pool(processes=ncpus)) else: if", "args = parser.parse_args() try: import parmap from multiprocessing import Pool", "opj(args.folder, 'metamatch_' + args.out)) else: subsets, markers, functions, exclude =", "sys import feather \"\"\"Make sure the utils are on path", "\"parmap\", parallelization not enabled.') import itertools import pandas as pd", "-n 1 -t 3-0 -c 4 -o functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py", "bf))] if testbatch: batchList = batchList[:1] matchStr = 'gs_*.feather' if", "--ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function", "sure the utils are on path before importing\"\"\" sys.path.append(args.utils) #", "CPUs/cores to use for parallelization.', default=1) parser.add_argument('--testsamples', action='store_true', help='Only process", "--function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets", "testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out)) print('Wrote matching metadata to %s.'", "4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n 1 -t 3-0", "use: find . -name \\merged_tmp*.feather -type f -delete \"\"\" def", "(['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc,", "all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name of extraction to", "10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus) if", "from each batch.') parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from", "print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_' + args.out))", "functions, exclude = parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)),", "'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus)", "featherLU = matchSamples(batchFolder, test=False) tmp = pd.Series(featherLU).to_frame() tmp.loc[:, 'batch'] =", "\"\"\"Make sure the utils are on path before importing\"\"\" sys.path.append(args.utils)", "== outFile: print('Wrote extracted data to %s.' % outFile) else:", "folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name of extraction to apply (\"functions\")',", "delete all tmp files use: find . -name \\merged_tmp*.feather -type", "--ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python", "metaCols=None, filters=None, useFeather=False): out = [] batchList = [opj(dataFolder, bf)", "functions=functions, downsample=1))} extractionFunc, extractionKwargs = features[args.function] if args.testbatch: print('Test: processing", "3-0 -c 4 -o functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus", "filters=None, useFeather=False): out = [] batchList = [opj(dataFolder, bf) for", "--feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out", "Usage examples: python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv", "if os.path.isdir(opj(dataFolder, bf))] if testbatch: batchList = batchList[:1] matchStr =", "+ args.out)) print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_'", "filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7,", "args.feather: outFile = outFile.replace('.csv', '.feather') wrote = mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs,", "--subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n 1 -t 3-0 -c", "from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples from ics", "_PARMAP = True except: _PARMAP = False print('Could not find", "try: import parmap from multiprocessing import Pool _PARMAP = True", "--function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets", "2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions,", "testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB WCL',", "as np from os.path import join as opj import os", "python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples", "help='Store as feather as oposed to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location", "samples from one batch.') parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching,", "args.testbatch: print('Test: processing samples from one batch') if args.testsamples: print('Test:", "extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples from ics import * if args.matchingonly:", "True except: _PARMAP = False print('Could not find package \"parmap\",", "metaCols, filters, parallel=False) else: func = partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr,", "two samples from each batch.') parser.add_argument('--testbatch', action='store_true', help='Only process twp", "opj import os from functools import partial import time import", "import numpy as np from os.path import join as opj", "-o functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv\"", "* if args.matchingonly: metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out))", "1 -t 3-0 -c 4 -o functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function", "processing samples from one batch') if args.testsamples: print('Test: processing two", "import join as opj import os from functools import partial", "parser = argparse.ArgumentParser(description='Extract features and merge batches into one CSV.')", "examples: python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv sbatch", "else: func = partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples, metaCols=metaCols, filters=filters)", "outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather)) return outFilename def", "parallelization not enabled.') import itertools import pandas as pd import", "return pd.concat(out, axis=0) if __name__ == '__main__': import argparse parser", "metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B',", "-o functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv", "pd import numpy as np from os.path import join as", "python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples", "= parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, pool=Pool(processes=ncpus))", "int(useFeather)) return outFilename def testMatching(dataFolder): out = [] for bf", "package \"parmap\", parallelization not enabled.') import itertools import pandas as", "parser.add_argument('--subsets', type=str, help='Filename listing subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str,", "+ args.out)) else: subsets, markers, functions, exclude = parseSubsets(args.subsets) features", "{'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets,", "parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, parallel=False) else:", "matching metadata to %s.' % opj(args.folder, 'metamatch_' + args.out)) else:", "parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, pool=Pool(processes=ncpus)) else:", "matchStr, testsamples, metaCols, filters, pool=Pool(processes=ncpus)) else: if _PARMAP: res =", "import Pool _PARMAP = True except: _PARMAP = False print('Could", "\"\"\" def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None,", "print('Test: processing samples from one batch') if args.testsamples: print('Test: processing", "-o cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather", "help='Only perform sample matching, to validate metadata.') parser.add_argument('--feather', action='store_true', help='Store", "1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc, extractionKwargs = features[args.function] if", "= argparse.ArgumentParser(description='Extract features and merge batches into one CSV.') parser.add_argument('--folder',", "outFile, metaCols=None, filters=None, useFeather=False): out = [] batchList = [opj(dataFolder,", "--ncpus 4 --out functions_extract.csv sbatch -n 1 -t 3-0 -c", "'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions,", "ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False): out = []", "action='store_true', help='Only process two samples from each batch.') parser.add_argument('--testbatch', action='store_true',", "7, 10, 11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus) if wrote == outFile:", "for bf in os.listdir(dataFolder): batchFolder = opj(dataFolder, bf) if os.path.isdir(opj(dataFolder,", "print('Wrote extracted data to %s.' % outFile) else: print('Error writing", "'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2', 'TNFa'], 1),", "3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus", "-n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py", "bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))] if testbatch: batchList =", "default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name of extraction to apply (\"functions\")', default='functions')", "help='Number of CPUs/cores to use for parallelization.', default=1) parser.add_argument('--testsamples', action='store_true',", "cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions", "--wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\"", "to apply (\"functions\")', default='functions') parser.add_argument('--subsets', type=str, help='Filename listing subsets for", "3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py", "extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, pool=Pool(processes=ncpus)) else: if _PARMAP:", "from os.path import join as opj import os from functools", "not enabled.') import itertools import pandas as pd import numpy", "one CSV.') parser.add_argument('--folder', type=str, help='Data folder containing all batch folders.',", "import itertools import pandas as pd import numpy as np", "func = partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples, metaCols=metaCols, filters=filters) res", "--subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv", "--function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather", "'batch'] = bf tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf) out.append(tmp) return", "if wrote == outFile: print('Wrote extracted data to %s.' %", "if os.path.isdir(opj(dataFolder, bf)): featherLU = matchSamples(batchFolder, test=False) tmp = pd.Series(featherLU).to_frame()", "pd.Series(featherLU).to_frame() tmp.loc[:, 'batch'] = bf tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf)", "help='Only process twp samples from one batch.') parser.add_argument('--matchingonly', action='store_true', help='Only", "if args.testsamples: print('Test: processing two samples per batch') outFile =", "'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL', 2),", "parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate metadata.') parser.add_argument('--feather',", "3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py", "if testbatch: batchList = batchList[:1] matchStr = 'gs_*.feather' if ncpus", "features[args.function] if args.testbatch: print('Test: processing samples from one batch') if", "help='Only process two samples from each batch.') parser.add_argument('--testbatch', action='store_true', help='Only", "<filename>ics/mergeGatingSets.py #!/usr/bin/env python \"\"\" Usage examples: python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions", "testsamples, metaCols, filters, pool=Pool(processes=ncpus)) else: if _PARMAP: res = parmap.map(mergeSamples,", "not find package \"parmap\", parallelization not enabled.') import itertools import", "-t 3-0 -c 4 -o functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions", "functions=functions, markers=markers, compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions,", "import parmap from multiprocessing import Pool _PARMAP = True except:", "filename for CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number of CPUs/cores to", "batchList = [opj(dataFolder, bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder,", "batch') if args.testsamples: print('Test: processing two samples per batch') outFile", "opj(dataFolder, bf) out.append(tmp) return pd.concat(out, axis=0) if __name__ == '__main__':", "args.out)) else: subsets, markers, functions, exclude = parseSubsets(args.subsets) features =", "compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL', 1),", "extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples, metaCols=metaCols, filters=filters) res = list(map(func, batchList)) outFilename", "cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather", "(['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets,", "--function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n", "/home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch", "cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3", "as opj import os from functools import partial import time", "--wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\" sbatch", "-delete \"\"\" def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile,", "multiprocessing import Pool _PARMAP = True except: _PARMAP = False", "compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2),", "all tmp files use: find . -name \\merged_tmp*.feather -type f", "--function functions_markers --ncpus 4 --out functions_markers_extract.csv\" sbatch -n 1 -t", "--out functions_extract.csv\" sbatch -n 1 -t 3-0 -c 4 -o", "'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB", "--out functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n 1 -t 3-0 -c 4 -o", "parallel=False) else: func = partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples, metaCols=metaCols,", "out.append(tmp) return pd.concat(out, axis=0) if __name__ == '__main__': import argparse", "for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))] if testbatch: batchList", "process twp samples from one batch.') parser.add_argument('--matchingonly', action='store_true', help='Only perform", "import os from functools import partial import time import sys", "repo from public github.com') args = parser.parse_args() try: import parmap", "agartland/utils repo from public github.com') args = parser.parse_args() try: import", "= True except: _PARMAP = False print('Could not find package", "--out functions_extract.csv sbatch -n 1 -t 3-0 -c 4 -o", "--testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all tmp files use:", "dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2',", "os from functools import partial import time import sys import", "per batch') outFile = opj(args.folder, args.out) if args.feather: outFile =", "extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False): out", "importing\"\"\" sys.path.append(args.utils) # from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples,", "tmp.loc[:, 'batch'] = bf tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf) out.append(tmp)", "3-0 -c 4 -o cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus", "as oposed to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo", "ncpus=args.ncpus) if wrote == outFile: print('Wrote extracted data to %s.'", "/home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch", "matchStr=matchStr, test=testsamples, metaCols=metaCols, filters=filters) res = list(map(func, batchList)) outFilename =", "parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each batch.') parser.add_argument('--testbatch',", "pd.concat(out, axis=0) if __name__ == '__main__': import argparse parser =", "args.matchingonly: metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out)) print('Wrote matching", "metadata to %s.' % opj(args.folder, 'metamatch_' + args.out)) else: subsets,", "__name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Extract features and", "listing subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output filename for", "import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples from ics import *", "analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output filename for CSV.', default='merged_out.csv') parser.add_argument('--ncpus',", "= [] batchList = [opj(dataFolder, bf) for bf in os.listdir(dataFolder)", "and _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples,", "sys.path.append(args.utils) # from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv\" sbatch -n 1", "sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n 1", "from one batch') if args.testsamples: print('Test: processing two samples per", "functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv\" sbatch", "each batch.') parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one", "ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples from ics import", "= False print('Could not find package \"parmap\", parallelization not enabled.')", "%s.' % outFile) else: print('Error writing file to disk: %s'", "default='functions') parser.add_argument('--subsets', type=str, help='Filename listing subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out',", "outFile.replace('.csv', '.feather') wrote = mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile,", "res = list(map(func, batchList)) outFilename = mergeFeathers(res, outFile, writeCSV=1 -", "--ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To", "subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output filename for CSV.',", "matching, to validate metadata.') parser.add_argument('--feather', action='store_true', help='Store as feather as", "to validate metadata.') parser.add_argument('--feather', action='store_true', help='Store as feather as oposed", "-t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions", "features and merge batches into one CSV.') parser.add_argument('--folder', type=str, help='Data", "--function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets", "for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output filename for CSV.', default='merged_out.csv')", "to use for parallelization.', default=1) parser.add_argument('--testsamples', action='store_true', help='Only process two", "/home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n 1 -t 3-0 -c 4", "/home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch", "subsets, markers, functions, exclude = parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets,", "default=1) parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each batch.')", "useFeather=int(args.feather), ncpus=args.ncpus) if wrote == outFile: print('Wrote extracted data to", "4 -o functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out", "list(map(func, batchList)) outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather)) return", "WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11,", "to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public", "--out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function", "bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all tmp", "else: if _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr,", "'metamatch_' + args.out)) else: subsets, markers, functions, exclude = parseSubsets(args.subsets)", "= mergeFeathers(res, outFile, writeCSV=1 - int(useFeather)) return outFilename def testMatching(dataFolder):", "extraction to apply (\"functions\")', default='functions') parser.add_argument('--subsets', type=str, help='Filename listing subsets", "--function functions --ncpus 4 --out functions_extract.csv sbatch -n 1 -t", "functions --ncpus 4 --out functions_extract.csv\" sbatch -n 1 -t 3-0", "metaCols, filters, pool=Pool(processes=ncpus)) else: if _PARMAP: res = parmap.map(mergeSamples, batchList,", "apply (\"functions\")', default='functions') parser.add_argument('--subsets', type=str, help='Filename listing subsets for analysis.',", "mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL',", "#!/usr/bin/env python \"\"\" Usage examples: python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus", "args.testsamples: print('Test: processing two samples per batch') outFile = opj(args.folder,", "extracted data to %s.' % outFile) else: print('Error writing file", "sbatch -n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap=\"python", "bf tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf) out.append(tmp) return pd.concat(out, axis=0)", "tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf) out.append(tmp) return pd.concat(out, axis=0) if", "'batch_folder'] = opj(dataFolder, bf) out.append(tmp) return pd.concat(out, axis=0) if __name__", "type=str, help='Name of extraction to apply (\"functions\")', default='functions') parser.add_argument('--subsets', type=str,", "'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10,", "import partial import time import sys import feather \"\"\"Make sure", "2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc, extractionKwargs =", "= {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY,", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather", "testsamples, metaCols, filters, parallel=False) else: func = partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs,", "--ncpus 4 --out functions_extract.csv\" sbatch -n 1 -t 3-0 -c", "= list(map(func, batchList)) outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather))", "extractionKwargs = features[args.function] if args.testbatch: print('Test: processing samples from one", "find . -name \\merged_tmp*.feather -type f -delete \"\"\" def mergeBatches(dataFolder,", "default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number of CPUs/cores to use for parallelization.',", "argparse.ArgumentParser(description='Extract features and merge batches into one CSV.') parser.add_argument('--folder', type=str,", "functions=functions, compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'],", "functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'],", "'metamatch_' + args.out)) print('Wrote matching metadata to %s.' % opj(args.folder,", "res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters,", "containing all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name of extraction", "4 -o cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out", "import pandas as pd import numpy as np from os.path", "'.feather') wrote = mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID',", "'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY,", "'__main__': import argparse parser = argparse.ArgumentParser(description='Extract features and merge batches", "6, 7, 10, 11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus) if wrote ==", "matchSamples from ics import * if args.matchingonly: metaDf = testMatching(args.folder)", "1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function", "functions_extract.csv\" sbatch -n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt", "default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com') args =", "oposed to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from", "-c 4 -o cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4", "--testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3", "--ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python", "pool=Pool(processes=ncpus)) else: if _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs,", "help='Data folder containing all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name", "parallelization.', default=1) parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each", "% outFile) else: print('Error writing file to disk: %s' %", "bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))] if testbatch:", "downsample=1))} extractionFunc, extractionKwargs = features[args.function] if args.testbatch: print('Test: processing samples", "feather \"\"\"Make sure the utils are on path before importing\"\"\"", "parser.add_argument('--function', type=str, help='Name of extraction to apply (\"functions\")', default='functions') parser.add_argument('--subsets',", "use for parallelization.', default=1) parser.add_argument('--testsamples', action='store_true', help='Only process two samples", "parseSubsets, mergeSamples, matchSamples from ics import * if args.matchingonly: metaDf", "= parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets,", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv\" sbatch -n 1", "feather as oposed to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils", "functions_markers --ncpus 4 --out functions_markers_extract.csv\" sbatch -n 1 -t 3-0", "sbatch -n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap=\"python", "tmp files use: find . -name \\merged_tmp*.feather -type f -delete", "testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB", "--feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out", "help='Location of agartland/utils repo from public github.com') args = parser.parse_args()", "test=False) tmp = pd.Series(featherLU).to_frame() tmp.loc[:, 'batch'] = bf tmp.loc[:, 'batch_folder']", "ics import * if args.matchingonly: metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_'", "filters, parallel=False) else: func = partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples,", "batchList)) outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather)) return outFilename", "'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'],", "batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, parallel=False) else: func", "'VISITNO':[2, 6, 7, 10, 11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus) if wrote", "mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False):", "4 -o functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out", "for parallelization.', default=1) parser.add_argument('--testsamples', action='store_true', help='Only process two samples from", "process two samples from each batch.') parser.add_argument('--testbatch', action='store_true', help='Only process", "test=testsamples, metaCols=metaCols, filters=filters) res = list(map(func, batchList)) outFilename = mergeFeathers(res,", "if args.feather: outFile = outFile.replace('.csv', '.feather') wrote = mergeBatches(args.folder, extractionFunc=extractionFunc,", "dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets,", "-type f -delete \"\"\" def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples,", "--subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all tmp files use: find .", "samples from each batch.') parser.add_argument('--testbatch', action='store_true', help='Only process twp samples", "extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples from ics import * if", "/home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all tmp files use: find . -name", "bf) out.append(tmp) return pd.concat(out, axis=0) if __name__ == '__main__': import", "testMatching(dataFolder): out = [] for bf in os.listdir(dataFolder): batchFolder =", "out = [] batchList = [opj(dataFolder, bf) for bf in", "writeCSV=1 - int(useFeather)) return outFilename def testMatching(dataFolder): out = []", "samples from one batch') if args.testsamples: print('Test: processing two samples", "4 --out functions_markers_extract.csv\" sbatch -n 1 -t 3-0 -c 4", "'STIM'], filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6,", "4 -o functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets", "= mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id',", "else: subsets, markers, functions, exclude = parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY,", "= opj(args.folder, args.out) if args.feather: outFile = outFile.replace('.csv', '.feather') wrote", "opj(dataFolder, bf) if os.path.isdir(opj(dataFolder, bf)): featherLU = matchSamples(batchFolder, test=False) tmp", "bf)): featherLU = matchSamples(batchFolder, test=False) tmp = pd.Series(featherLU).to_frame() tmp.loc[:, 'batch']", "= opj(dataFolder, bf) out.append(tmp) return pd.concat(out, axis=0) if __name__ ==", "public github.com') args = parser.parse_args() try: import parmap from multiprocessing", "f -delete \"\"\" def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch,", "the utils are on path before importing\"\"\" sys.path.append(args.utils) # from", "10, 11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus) if wrote == outFile: print('Wrote", "[opj(dataFolder, bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))] if", "outFile = opj(args.folder, args.out) if args.feather: outFile = outFile.replace('.csv', '.feather')", "two samples per batch') outFile = opj(args.folder, args.out) if args.feather:", "[] for bf in os.listdir(dataFolder): batchFolder = opj(dataFolder, bf) if", "\"\"\" Usage examples: python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out", "on path before importing\"\"\" sys.path.append(args.utils) # from ics import extractFunctionsGBY,", "CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com')", "= parser.parse_args() try: import parmap from multiprocessing import Pool _PARMAP", "mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'], 2)])),", ". -name \\merged_tmp*.feather -type f -delete \"\"\" def mergeBatches(dataFolder, extractionFunc,", "1), ('ALL', 2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'],", "useFeather=False): out = [] batchList = [opj(dataFolder, bf) for bf", "utils are on path before importing\"\"\" sys.path.append(args.utils) # from ics", "before importing\"\"\" sys.path.append(args.utils) # from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets,", "--function functions --ncpus 4 --out functions_extract.csv\" sbatch -n 1 -t", "validate metadata.') parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather", "parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to CSV') parser.add_argument('--utils',", "4 --out functions_extract.csv sbatch -n 1 -t 3-0 -c 4", "def testMatching(dataFolder): out = [] for bf in os.listdir(dataFolder): batchFolder", "to %s.' % opj(args.folder, 'metamatch_' + args.out)) else: subsets, markers,", "_PARMAP = False print('Could not find package \"parmap\", parallelization not", "import * if args.matchingonly: metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' +", "= batchList[:1] matchStr = 'gs_*.feather' if ncpus > 1 and", "for CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number of CPUs/cores to use", "help='Filename listing subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output filename", "- int(useFeather)) return outFilename def testMatching(dataFolder): out = [] for", "import time import sys import feather \"\"\"Make sure the utils", "opj(args.folder, args.out) if args.feather: outFile = outFile.replace('.csv', '.feather') wrote =", "6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete", "matchStr = 'gs_*.feather' if ncpus > 1 and _PARMAP: res", "extractionKwargs, matchStr, testsamples, metaCols, filters, parallel=False) else: func = partial(mergeSamples,", "features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)),", "mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT',", "def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None,", "extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples, metaCols=metaCols, filters=filters) res = list(map(func, batchList))", "= [opj(dataFolder, bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))]", "of CPUs/cores to use for parallelization.', default=1) parser.add_argument('--testsamples', action='store_true', help='Only", "args.out)) print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_' +", "testbatch, outFile, metaCols=None, filters=None, useFeather=False): out = [] batchList =", "processing two samples per batch') outFile = opj(args.folder, args.out) if", "filters, pool=Pool(processes=ncpus)) else: if _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc,", "sbatch -n 1 -t 3-0 -c 4 -o functions_slurm.txt --wrap=\"python", "batchList[:1] matchStr = 'gs_*.feather' if ncpus > 1 and _PARMAP:", "into one CSV.') parser.add_argument('--folder', type=str, help='Data folder containing all batch", "batchFolder = opj(dataFolder, bf) if os.path.isdir(opj(dataFolder, bf)): featherLU = matchSamples(batchFolder,", "outFile) else: print('Error writing file to disk: %s' % wrote)", "np from os.path import join as opj import os from", "'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc, extractionKwargs = features[args.function] if args.testbatch:", "action='store_true', help='Only process twp samples from one batch.') parser.add_argument('--matchingonly', action='store_true',", "--feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out", "samples per batch') outFile = opj(args.folder, args.out) if args.feather: outFile", "--testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus", "--wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv\" sbatch -n", "1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function", "bf) if os.path.isdir(opj(dataFolder, bf)): featherLU = matchSamples(batchFolder, test=False) tmp =", "1 and _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr,", "cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv", "os.path.isdir(opj(dataFolder, bf))] if testbatch: batchList = batchList[:1] matchStr = 'gs_*.feather'", "functools import partial import time import sys import feather \"\"\"Make", "as feather as oposed to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of", "-t 3-0 -c 4 -o cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions", "--ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n 1 -t", "% opj(args.folder, 'metamatch_' + args.out)) else: subsets, markers, functions, exclude", "# from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples from", "-n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py", "False print('Could not find package \"parmap\", parallelization not enabled.') import", "out = [] for bf in os.listdir(dataFolder): batchFolder = opj(dataFolder,", "help='Output filename for CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number of CPUs/cores", "--feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all tmp files use: find", "= matchSamples(batchFolder, test=False) tmp = pd.Series(featherLU).to_frame() tmp.loc[:, 'batch'] = bf", "--out functions_markers_extract.csv\" sbatch -n 1 -t 3-0 -c 4 -o", "matchStr, testsamples, metaCols, filters, parallel=False) else: func = partial(mergeSamples, extractionFunc=extractionFunc,", "= features[args.function] if args.testbatch: print('Test: processing samples from one batch')", "-o functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv\"", "Pool _PARMAP = True except: _PARMAP = False print('Could not", "python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv sbatch -n", "1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function", "= bf tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf) out.append(tmp) return pd.concat(out,", "parser.add_argument('--out', type=str, help='Output filename for CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number", "os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))] if testbatch: batchList = batchList[:1] matchStr", "4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions", "type=str, help='Data folder containing all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str,", "from multiprocessing import Pool _PARMAP = True except: _PARMAP =", "'gs_*.feather' if ncpus > 1 and _PARMAP: res = parmap.map(mergeSamples,", "metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out)) print('Wrote matching metadata", "axis=0) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Extract", "-n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py", "type=int, help='Number of CPUs/cores to use for parallelization.', default=1) parser.add_argument('--testsamples',", "= opj(dataFolder, bf) if os.path.isdir(opj(dataFolder, bf)): featherLU = matchSamples(batchFolder, test=False)", "'TNFa'], 2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc, extractionKwargs", "'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]},", "12]}, useFeather=int(args.feather), ncpus=args.ncpus) if wrote == outFile: print('Wrote extracted data", "'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2',", "python \"\"\" Usage examples: python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4", "_PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols,", "from one batch.') parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to", "argparse parser = argparse.ArgumentParser(description='Extract features and merge batches into one", "as pd import numpy as np from os.path import join", "> 1 and _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs,", "(['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL', 2),", "('ALL', 2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)])),", "python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples", "batch') outFile = opj(args.folder, args.out) if args.feather: outFile = outFile.replace('.csv',", "itertools import pandas as pd import numpy as np from", "= pd.Series(featherLU).to_frame() tmp.loc[:, 'batch'] = bf tmp.loc[:, 'batch_folder'] = opj(dataFolder,", "action='store_true', help='Store as feather as oposed to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils',", "[] batchList = [opj(dataFolder, bf) for bf in os.listdir(dataFolder) if", "one batch') if args.testsamples: print('Test: processing two samples per batch')", "3-0 -c 4 -o functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus", "os.listdir(dataFolder): batchFolder = opj(dataFolder, bf) if os.path.isdir(opj(dataFolder, bf)): featherLU =", "help='Name of extraction to apply (\"functions\")', default='functions') parser.add_argument('--subsets', type=str, help='Filename", "functions_markers_sparse_24Jul2018_gby.csv\" sbatch -n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt", "dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL', 2), (['IFNg','IL2',", "partial import time import sys import feather \"\"\"Make sure the", "\\merged_tmp*.feather -type f -delete \"\"\" def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus,", "of agartland/utils repo from public github.com') args = parser.parse_args() try:", "CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number of CPUs/cores to use for", "filters=filters) res = list(map(func, batchList)) outFilename = mergeFeathers(res, outFile, writeCSV=1", "enabled.') import itertools import pandas as pd import numpy as", "-c 4 -o functions_markers_sparse_slurm_gby.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4", "(['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc, extractionKwargs = features[args.function]", "extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl',", "sbatch -n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap=\"python", "= outFile.replace('.csv', '.feather') wrote = mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch,", "if ncpus > 1 and _PARMAP: res = parmap.map(mergeSamples, batchList,", "bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv", "extractionKwargs, matchStr, testsamples, metaCols, filters, pool=Pool(processes=ncpus)) else: if _PARMAP: res", "extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, parallel=False) else: func =", "join as opj import os from functools import partial import", "dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc, extractionKwargs = features[args.function] if args.testbatch: print('Test:", "numpy as np from os.path import join as opj import", "import feather \"\"\"Make sure the utils are on path before", "11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus) if wrote == outFile: print('Wrote extracted", "wrote == outFile: print('Wrote extracted data to %s.' % outFile)", "sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\"", "if _PARMAP: res = parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples,", "perform sample matching, to validate metadata.') parser.add_argument('--feather', action='store_true', help='Store as", "extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False): out =", "markers=markers, compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL',", "matchSamples(batchFolder, test=False) tmp = pd.Series(featherLU).to_frame() tmp.loc[:, 'batch'] = bf tmp.loc[:,", "= parmap.map(mergeSamples, batchList, extractionFunc, extractionKwargs, matchStr, testsamples, metaCols, filters, parallel=False)", "To delete all tmp files use: find . -name \\merged_tmp*.feather", "dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions,", "--testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all tmp files", "partial(mergeSamples, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, matchStr=matchStr, test=testsamples, metaCols=metaCols, filters=filters) res = list(map(func,", "github.com') args = parser.parse_args() try: import parmap from multiprocessing import", "4 --out functions_extract.csv\" sbatch -n 1 -t 3-0 -c 4", "twp samples from one batch.') parser.add_argument('--matchingonly', action='store_true', help='Only perform sample", "exclude = parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY,", "time import sys import feather \"\"\"Make sure the utils are", "--testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus", "functions_extract.csv sbatch -n 1 -t 3-0 -c 4 -o functions_slurm.txt", "1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))}", "data to %s.' % outFile) else: print('Error writing file to", "outFilename def testMatching(dataFolder): out = [] for bf in os.listdir(dataFolder):", "default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output filename for CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int,", "outFile = outFile.replace('.csv', '.feather') wrote = mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples,", "to %s.' % outFile) else: print('Error writing file to disk:", "--out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all", "markers, functions, exclude = parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions,", "print('Test: processing two samples per batch') outFile = opj(args.folder, args.out)", "from functools import partial import time import sys import feather", "'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2,", "== '__main__': import argparse parser = argparse.ArgumentParser(description='Extract features and merge", "import sys import feather \"\"\"Make sure the utils are on", "%s.' % opj(args.folder, 'metamatch_' + args.out)) else: subsets, markers, functions,", "files use: find . -name \\merged_tmp*.feather -type f -delete \"\"\"", "type=str, help='Output filename for CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number of", "of extraction to apply (\"functions\")', default='functions') parser.add_argument('--subsets', type=str, help='Filename listing", "2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2', 'TNFa'],", "parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com') args", "os.path import join as opj import os from functools import", "if args.testbatch: print('Test: processing samples from one batch') if args.testsamples:", "--testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6", "/home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv sbatch -n 1", "are on path before importing\"\"\" sys.path.append(args.utils) # from ics import", "outFile, writeCSV=1 - int(useFeather)) return outFilename def testMatching(dataFolder): out =", "-t 3-0 -c 4 -o functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers", "args.out) if args.feather: outFile = outFile.replace('.csv', '.feather') wrote = mergeBatches(args.folder,", "outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur',", "(\"functions\")', default='functions') parser.add_argument('--subsets', type=str, help='Filename listing subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv')", "from public github.com') args = parser.parse_args() try: import parmap from", "= [] for bf in os.listdir(dataFolder): batchFolder = opj(dataFolder, bf)", "parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions,", "extractionFunc, extractionKwargs = features[args.function] if args.testbatch: print('Test: processing samples from", "= 'gs_*.feather' if ncpus > 1 and _PARMAP: res =", "action='store_true', help='Only perform sample matching, to validate metadata.') parser.add_argument('--feather', action='store_true',", "batches into one CSV.') parser.add_argument('--folder', type=str, help='Data folder containing all", "'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]}, useFeather=int(args.feather),", "-c 4 -o functions_markers_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4", "folder containing all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name of", "path before importing\"\"\" sys.path.append(args.utils) # from ics import extractFunctionsGBY, extractFunctionsMarkersGBY,", "--out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus", "if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Extract features", "metadata.') parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to CSV')", "mergeFeathers(res, outFile, writeCSV=1 - int(useFeather)) return outFilename def testMatching(dataFolder): out", "sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv", "-c 4 -o functions_slurm.txt --wrap=\"python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4", "--subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv", "sample matching, to validate metadata.') parser.add_argument('--feather', action='store_true', help='Store as feather", "cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv\" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py", "parser.add_argument('--folder', type=str, help='Data folder containing all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function',", "pandas as pd import numpy as np from os.path import", "in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))] if testbatch: batchList = batchList[:1]", "metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out)) print('Wrote matching metadata to %s.' %", "batch.') parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one batch.')", "parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one batch.') parser.add_argument('--matchingonly',", "--subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv", "-name \\merged_tmp*.feather -type f -delete \"\"\" def mergeBatches(dataFolder, extractionFunc, extractionKwargs," ]
[ "from dataclasses import dataclass @dataclass class Meeting: name: str id:", "dataclasses import dataclass @dataclass class Meeting: name: str id: str" ]
[ "distutils.sysconfig import get_python_lib import glob # if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo')", "if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files = []", "shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files = [] for root, dirs,", "= [] for root, dirs, files in os.walk(\"pysrc/tinygo\"): for f", "os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files = [] for", "f in files: release_files.append(os.path.join(root.replace('pysrc/', ''), f)) # print(release_files) setup( name=\"gscdk\",", "Development Kit\", author='The UUOSIO Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk': 'pysrc'},", "from distutils.core import setup from distutils.sysconfig import get_python_lib import glob", "license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={ # \"\": [\"*\"], 'gscdk':", "package_data={ # \"\": [\"*\"], 'gscdk': release_files, }, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'],", "files: release_files.append(os.path.join(root.replace('pysrc/', ''), f)) # print(release_files) setup( name=\"gscdk\", version=\"0.3.5\", description=\"Go", "[\"*\"], 'gscdk': release_files, }, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], # install_requires=[ #", "Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={ # \"\": [\"*\"],", "}, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], # install_requires=[ # ], # include_package_data=True", "dirs, files in os.walk(\"pysrc/tinygo\"): for f in files: release_files.append(os.path.join(root.replace('pysrc/', ''),", "skbuild import setup from distutils.core import setup from distutils.sysconfig import", "'gscdk': release_files, }, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], # install_requires=[ # ],", "# \"\": [\"*\"], 'gscdk': release_files, }, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], #", "in os.walk(\"pysrc/tinygo\"): for f in files: release_files.append(os.path.join(root.replace('pysrc/', ''), f)) #", "release_files.append(os.path.join(root.replace('pysrc/', ''), f)) # print(release_files) setup( name=\"gscdk\", version=\"0.3.5\", description=\"Go Smart", "os import shutil import setuptools # from skbuild import setup", "'pysrc/tinygo') release_files = [] for root, dirs, files in os.walk(\"pysrc/tinygo\"):", "import os import shutil import setuptools # from skbuild import", "\"\": [\"*\"], 'gscdk': release_files, }, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], # install_requires=[", "files in os.walk(\"pysrc/tinygo\"): for f in files: release_files.append(os.path.join(root.replace('pysrc/', ''), f))", "release_files = [] for root, dirs, files in os.walk(\"pysrc/tinygo\"): for", "Smart Contract Development Kit\", author='The UUOSIO Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'],", "'pysrc'}, package_data={ # \"\": [\"*\"], 'gscdk': release_files, }, setup_requires=['wheel'] #", "version=\"0.3.5\", description=\"Go Smart Contract Development Kit\", author='The UUOSIO Team', license=\"BSD-3-Clause\",", "release_files, }, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], # install_requires=[ # ], #", "# if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files =", "for f in files: release_files.append(os.path.join(root.replace('pysrc/', ''), f)) # print(release_files) setup(", "from skbuild import setup from distutils.core import setup from distutils.sysconfig", "in files: release_files.append(os.path.join(root.replace('pysrc/', ''), f)) # print(release_files) setup( name=\"gscdk\", version=\"0.3.5\",", "# print(release_files) setup( name=\"gscdk\", version=\"0.3.5\", description=\"Go Smart Contract Development Kit\",", "import setuptools # from skbuild import setup from distutils.core import", "# shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files = [] for root, dirs, files", "root, dirs, files in os.walk(\"pysrc/tinygo\"): for f in files: release_files.append(os.path.join(root.replace('pysrc/',", "get_python_lib import glob # if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo',", "for root, dirs, files in os.walk(\"pysrc/tinygo\"): for f in files:", "name=\"gscdk\", version=\"0.3.5\", description=\"Go Smart Contract Development Kit\", author='The UUOSIO Team',", "description=\"Go Smart Contract Development Kit\", author='The UUOSIO Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\",", "Contract Development Kit\", author='The UUOSIO Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk':", "os.walk(\"pysrc/tinygo\"): for f in files: release_files.append(os.path.join(root.replace('pysrc/', ''), f)) # print(release_files)", "Kit\", author='The UUOSIO Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={", "print(release_files) setup( name=\"gscdk\", version=\"0.3.5\", description=\"Go Smart Contract Development Kit\", author='The", "import setup from distutils.core import setup from distutils.sysconfig import get_python_lib", "import glob # if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo')", "packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={ # \"\": [\"*\"], 'gscdk': release_files, },", "setuptools # from skbuild import setup from distutils.core import setup", "[] for root, dirs, files in os.walk(\"pysrc/tinygo\"): for f in", "<filename>setup.py<gh_stars>1-10 import os import shutil import setuptools # from skbuild", "setup from distutils.core import setup from distutils.sysconfig import get_python_lib import", "# from skbuild import setup from distutils.core import setup from", "setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], # install_requires=[ # ], # include_package_data=True )", "import setup from distutils.sysconfig import get_python_lib import glob # if", "import shutil import setuptools # from skbuild import setup from", "author='The UUOSIO Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={ #", "f)) # print(release_files) setup( name=\"gscdk\", version=\"0.3.5\", description=\"Go Smart Contract Development", "distutils.core import setup from distutils.sysconfig import get_python_lib import glob #", "setup from distutils.sysconfig import get_python_lib import glob # if os.path.exists('pysrc/tinygo'):", "package_dir={'gscdk': 'pysrc'}, package_data={ # \"\": [\"*\"], 'gscdk': release_files, }, setup_requires=['wheel']", "# shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files = [] for root,", "shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files = [] for root, dirs, files in", "''), f)) # print(release_files) setup( name=\"gscdk\", version=\"0.3.5\", description=\"Go Smart Contract", "UUOSIO Team', license=\"BSD-3-Clause\", url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={ # \"\":", "glob # if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files", "shutil import setuptools # from skbuild import setup from distutils.core", "import get_python_lib import glob # if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') #", "url=\"https://github.com/uuosio/uuosio.gscdk\", packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={ # \"\": [\"*\"], 'gscdk': release_files,", "setup( name=\"gscdk\", version=\"0.3.5\", description=\"Go Smart Contract Development Kit\", author='The UUOSIO", "from distutils.sysconfig import get_python_lib import glob # if os.path.exists('pysrc/tinygo'): #" ]
[ "__init__(self, func: Callable, priority_for_creation: int = 99, priority_for_removal: int =", "DataCreatorAction: def __init__(self, func: Callable, priority_for_creation: int = 99, priority_for_removal:", "99): self.func = func self.priority_for_creation = priority_for_creation self.priority_for_removal = priority_for_removal", "priority_for_creation: int = 99, priority_for_removal: int = 99): self.func =", "int = 99, priority_for_removal: int = 99): self.func = func", "typing import Callable class DataCreatorAction: def __init__(self, func: Callable, priority_for_creation:", "import Callable class DataCreatorAction: def __init__(self, func: Callable, priority_for_creation: int", "from typing import Callable class DataCreatorAction: def __init__(self, func: Callable,", "int = 99): self.func = func self.priority_for_creation = priority_for_creation self.priority_for_removal", "def __init__(self, func: Callable, priority_for_creation: int = 99, priority_for_removal: int", "Callable class DataCreatorAction: def __init__(self, func: Callable, priority_for_creation: int =", "func: Callable, priority_for_creation: int = 99, priority_for_removal: int = 99):", "class DataCreatorAction: def __init__(self, func: Callable, priority_for_creation: int = 99,", "99, priority_for_removal: int = 99): self.func = func self.priority_for_creation =", "priority_for_removal: int = 99): self.func = func self.priority_for_creation = priority_for_creation", "= 99, priority_for_removal: int = 99): self.func = func self.priority_for_creation", "Callable, priority_for_creation: int = 99, priority_for_removal: int = 99): self.func", "= 99): self.func = func self.priority_for_creation = priority_for_creation self.priority_for_removal =" ]
[ "[] for i in range(h): single_row = list( map(int, input().split()", ") min_of_each_row = np.min( row_list, axis = 1) max_of_min =", "np_row ) min_of_each_row = np.min( row_list, axis = 1) max_of_min", "row_list.append( np_row ) min_of_each_row = np.min( row_list, axis = 1)", "i in range(h): single_row = list( map(int, input().split() ) )", "= np.min( row_list, axis = 1) max_of_min = np.max( min_of_each_row", "__name__ == '__main__': h, w = map( int, input().split() )", "if __name__ == '__main__': h, w = map( int, input().split()", ") ) np_row = np.array( single_row ) row_list.append( np_row )", "single_row ) row_list.append( np_row ) min_of_each_row = np.min( row_list, axis", "row_list = [] for i in range(h): single_row = list(", "np if __name__ == '__main__': h, w = map( int,", "np.min( row_list, axis = 1) max_of_min = np.max( min_of_each_row )", "= 1) max_of_min = np.max( min_of_each_row ) print( max_of_min )", "range(h): single_row = list( map(int, input().split() ) ) np_row =", "row_list, axis = 1) max_of_min = np.max( min_of_each_row ) print(", "list( map(int, input().split() ) ) np_row = np.array( single_row )", "map( int, input().split() ) row_list = [] for i in", ") np_row = np.array( single_row ) row_list.append( np_row ) min_of_each_row", "single_row = list( map(int, input().split() ) ) np_row = np.array(", "np.array( single_row ) row_list.append( np_row ) min_of_each_row = np.min( row_list,", "h, w = map( int, input().split() ) row_list = []", "= [] for i in range(h): single_row = list( map(int,", "input().split() ) row_list = [] for i in range(h): single_row", "int, input().split() ) row_list = [] for i in range(h):", ") row_list = [] for i in range(h): single_row =", "in range(h): single_row = list( map(int, input().split() ) ) np_row", "== '__main__': h, w = map( int, input().split() ) row_list", "= list( map(int, input().split() ) ) np_row = np.array( single_row", ") row_list.append( np_row ) min_of_each_row = np.min( row_list, axis =", "as np if __name__ == '__main__': h, w = map(", "'__main__': h, w = map( int, input().split() ) row_list =", "numpy as np if __name__ == '__main__': h, w =", "for i in range(h): single_row = list( map(int, input().split() )", "w = map( int, input().split() ) row_list = [] for", "import numpy as np if __name__ == '__main__': h, w", "= np.array( single_row ) row_list.append( np_row ) min_of_each_row = np.min(", "axis = 1) max_of_min = np.max( min_of_each_row ) print( max_of_min", "map(int, input().split() ) ) np_row = np.array( single_row ) row_list.append(", "= map( int, input().split() ) row_list = [] for i", "np_row = np.array( single_row ) row_list.append( np_row ) min_of_each_row =", "min_of_each_row = np.min( row_list, axis = 1) max_of_min = np.max(", "input().split() ) ) np_row = np.array( single_row ) row_list.append( np_row" ]
[ "write em down. But first we kinda-unify the test cases.", "self.title = title self.step = None @property def allure(self): listener", "def test_baz(steppy_fixture): assert steppy_fixture \"\"\" if callable(title): return LazyInitStepContext(self, title.__name__)(title)", "doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__ or '', self.environment, self.test)))", "hasattr(listener, 'stack'): return listener class AllureHelper(object): \"\"\" This object holds", "teardown => broken self._fill_case(report, call, status, Status.BROKEN) else: # mark", "given features. \"\"\" return self.label(Label.FEATURE, *features) def story(self, *stories): \"\"\"", "metavar=\"DIR\", default=None, help=\"Generate Allure report in the specified directory (may", "in the makereport hook. # it is here to cope", "if listener has `stack` we are inside a test #", "single line to report. \"\"\" if self._allurelistener: with self.step(text): pass", "limit whatsoever :raises ArgumentTypeError: if `legal_values` are given and there", "{} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms -", "class LazyInitStepContext(StepContext): \"\"\" This is a step context used for", "TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])), description=description, start=now(), attachments=[], labels=labels_of(item), status=None,", "our status here if not report.passed: if self.test.status not in", "if self._allurelistener: self._allurelistener.attach(name, contents, type) def label(self, name, *value): \"\"\"", "self.allure_helper = allure_helper self.title = title self.step = None @property", "called only if `setup` passes. See :py:func:`_pytest.runner.runtestprotocol` for proofs /", "= {} self.test = None # FIXME: that flag makes", "legal_values=set(severities)), help=\"\"\"Comma-separated list of severity names. Tests only with these", "test \"\"\" if self.test: self.test.description = description def start_step(self, name):", "description): \"\"\" Sets description for the test \"\"\" if self._allurelistener:", "important data :param report: py.test's `TestReport` :param call: py.test's `CallInfo`", "(that still possess important info) `setup` and `teardown` are always", "= Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None, None, report), trace=report.longrepr)) def pytest_sessionfinish(self):", "pytest_configure(config): reportdir = config.option.allurereportdir if reportdir: # we actually record", "*issues): \"\"\" Mark test ``issues`` from inside. \"\"\" if self._allurelistener:", "``pytest.allure`` namespace, like ``pytest.allure.attach`` \"\"\" def __init__(self): self._allurelistener = None", "severity names. Tests only with these severities will be run.", "if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues]) def description(self,", "self.stack = [self.test] yield self.test = None self.stack = []", "default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list of story names. Run tests that", "when to actually report things. pytest runs this (naturally) three", "and then gets broken -- to cope with the xdist's", "for s in self.suites.values(): if s.tests: # nobody likes empty", "and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def pytest_runtest_protocol(self, item, nextitem): try: # for", "node do all the important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class", "a `AllureAggegatingListener`-understood way \"\"\" parent = parent_module(item) # we attach", "= Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)), trace=status ==", "'.join(legal_values))) return set((name, v) for v in atoms) return a_label_type", "'.join(atoms - legal_values), ', '.join(legal_values))) return set((name, v) for v", "= config self.environment = {} self.test = None # FIXME:", "self._allurelistener def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment \"\"\" Attaches", "to come from AllureTestListener -- and the have ._id field", "parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated list of", "\"\"\" report = (yield).get_result() status = self.config.hook.pytest_report_teststatus(report=report) status = status", "False @pytest.allure.step('make test data') def make_test_data_bar(): raise ValueError('No data today')", "nextitem): try: # for common items description = item.function.__doc__ except", "i should file an issue to xdist if self._magicaldoublereport: #", "first we kinda-unify the test cases. We expect cases to", "# FIXME: this breaks encapsulation a lot if hasattr(listener, 'stack'):", "report (it wont break, but it will duplicate cases in", "dir(Severity) and not attr.startswith('_'): return self.severity(getattr(Severity, attr)) else: raise AttributeError", "story(self, *stories): \"\"\" A decorator factory that returns ``pytest.mark`` for", "attr)) else: raise AttributeError MASTER_HELPER = AllureHelper() def pytest_namespace(): return", "hooks to generate reports for common tests. \"\"\" def __init__(self,", "to collect.') for fail in self.fails: self.impl.start_case(name=fail.name.split(\".\")[-1]) self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace)", "has `stack` we are inside a test # record steps", "def report_case(self, item, report): \"\"\" Adds `self.test` to the `report`", "TODO: think about that once again self.test.status = Status.BROKEN #", "EXACTLY ONE test report (it wont break, but it will", "test cases. We expect cases to come from AllureTestListener --", "= pickle.loads(report._allure_result) report._allure_result = None # so actual pickled data", "refined_tests = [] for t in s.tests[::-1]: if t.id not", "at least one of the specified story labels.\"\"\") def pytest_configure(config):", "track of what has already happened via self.test.status) Expected behavior", "report -- which is bad. So we work hard to", "via self.test.status) Expected behavior is: FAILED when call fails and", "returns ``pytest.mark`` for a given issues. \"\"\" return self.label(Label.ISSUE, *issues)", "self._allurelistener: self._allurelistener.environment.update(env_dict) @property def attach_type(self): return AttachmentType @property def severity_level(self):", "only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values)))", "s.tests) known_ids = set() refined_tests = [] for t in", "trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail) elif status in SKIPPED_STATUSES:", "return a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated", "report): if not report.passed: if report.failed: status = Status.BROKEN else:", "is bad attachment.source = self.impl._save_attach(attachment.source, attachment.type) attachment.type = attachment.type.mime_type def", "self.impl.store_environment() def write_attach(self, attachment): \"\"\" Writes attachment object from the", "we kinda-unify the test cases. We expect cases to come", "listener class AllureHelper(object): \"\"\" This object holds various utility methods", "text_type from allure.common import AllureImpl, StepContext from allure.constants import Status,", "step at the top of ``self.stack`` \"\"\" step = self.stack.pop()", "title='Collection phase', description='This is the tests collection phase. Failures are", "test fails and then gets broken -- to cope with", "testitem -- report our status here if not report.passed: if", "if any limit whatsoever :raises ArgumentTypeError: if `legal_values` are given", "action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list of story names.", "._id field to manifest their identity. Of all the test", "cope with the xdist's -x behavior we have to have", "steps. TODO: when moving to python 3, rework this with", "for x in parent_down_from_module(item)])), description=description, start=now(), attachments=[], labels=labels_of(item), status=None, steps=[],", "garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for a in testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id,", "here only to work around xdist's stupid -x thing when", "= description def start_step(self, name): \"\"\" Starts an new :py:class:`allure.structure.TestStep`", "set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities) if arg_labels and not item_labels & arg_labels:", "that have at least one of the specified feature labels.\"\"\")", "import parent_module, parent_down_from_module, labels_of, \\ all_of, get_exception_message, now, mangle_testnames from", "value=issue) for issue in issues]) def description(self, description): \"\"\" Sets", "Writes attachment object from the `AllureTestListener` to the FS, fixing", "step = TestStep(name=name, title=name, start=now(), attachments=[], steps=[]) self.stack[-1].steps.append(step) self.stack.append(step) return", "xfailed \"\"\" report = (yield).get_result() status = self.config.hook.pytest_report_teststatus(report=report) status =", "= None # FIXME: that flag makes us pre-report failures", "return LazyInitStepContext(self, title.__name__)(title) else: return LazyInitStepContext(self, title) def single_step(self, text):", "= getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name)) return allure_label(*value) def severity(self,", "self.test.stop = now() self.test.status = status if status in FAILED_STATUSES:", "s in self.suites.values(): if s.tests: # nobody likes empty suites", "= testlistener config.pluginmanager.register(testlistener) if not hasattr(config, 'slaveinput'): # on xdist-master", "', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list", "thing. The per-test reports are handled by `AllureAgregatingListener` at the", "broken (and call may be anything) PENDING if skipped and", "in known_ids: known_ids.add(t.id) refined_tests.append(t) s.tests = refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml' %", "pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\", default=None, help=\"Generate Allure report in", "specified story labels.\"\"\") def pytest_configure(config): reportdir = config.option.allurereportdir if reportdir:", "with report.when being: setup <--- fixtures are to be initialized", "`AllureAgregatingListener.write_attach` \"\"\" attach = Attach(source=contents, # we later re-save those,", "given label. \"\"\" allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name))", "done and have all the results in `self.suites` Lets write", "xdist's begavior regarding -x. # see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport", "for (name, contents) in dict(report.sections).items()] self.test.stop = now() self.test.status =", "None # so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98", "hasattr(config, 'slaveinput'): # on xdist-master node do all the important", "doctests that has no `function` attribute description = item.reportinfo()[2] self.test", "\"\"\" for s in self.suites.values(): if s.tests: # nobody likes", "have at least one of the specified feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories',", "factory that returns ``pytest.mark`` for a given issues. \"\"\" return", "that returns ``pytest.mark`` for a given allure ``level``. \"\"\" return", "a given label. \"\"\" allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT,", "these severities will be run. Possible values are:%s.\"\"\" % ',", "= AllureHelper() def pytest_namespace(): return {'allure': MASTER_HELPER} class AllureAgregatingListener(object): \"\"\"", "re-save those, oh my... title=title, type=attach_type) self.stack[-1].attachments.append(attach) def dynamic_issue(self, *issues):", "`report` in a `AllureAggegatingListener`-understood way \"\"\" parent = parent_module(item) #", "failures if there were any. \"\"\" if self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection", "returns ``pytest.mark`` for a given stories. \"\"\" return self.label(Label.STORY, *stories)", "type (for future TestLabel things) :param legal_values: a `set` of", "config.pluginmanager.register(testlistener) if not hasattr(config, 'slaveinput'): # on xdist-master node do", "whatsoever :raises ArgumentTypeError: if `legal_values` are given and there are", "def description(self, description): \"\"\" Sets description for the test \"\"\"", "return self.severity(getattr(Severity, attr)) else: raise AttributeError MASTER_HELPER = AllureHelper() def", "from allure.utils import parent_module, parent_down_from_module, labels_of, \\ all_of, get_exception_message, now,", "if status in FAILED_STATUSES: self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report), trace=report.longrepr", "pytest_runtest_logreport(self, report): if hasattr(report, '_allure_result'): module_id, module_name, module_doc, environment, testcase", "flag makes us pre-report failures in the makereport hook. #", "it broken so, well, someone has idea of teardown failure", "if a test isn't marked as \"unreported\" or it has", "def __init__(self, impl, config): self.impl = impl # module's nodeid", "1 def test_baz(steppy_fixture): assert steppy_fixture \"\"\" if callable(title): return LazyInitStepContext(self,", "Possible values are:%s.\"\"\" % ', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\",", "to manifest their identity. Of all the test cases in", "pyteststatus: the failed/xfailed/xpassed thing :param status: a :py:class:`allure.constants.Status` entry \"\"\"", "from allure.constants import Status, AttachmentType, Severity, \\ FAILED_STATUSES, Label, SKIPPED_STATUSES", "in-test data and for attaching it to the test report", "either setup OR teardown are broken (and call may be", "type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list of story names. Run tests that have", "now, mangle_testnames from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure,", "have to have tests even at CALL failures) TODO: do", "return self.label(Label.STORY, *stories) def issue(self, *issues): \"\"\" A decorator factory", "things. pytest runs this (naturally) three times -- with report.when", "report.longrepr[2] or report.wasxfail trim_msg_len = 89 short_message = skip_message.split('\\n')[0][:trim_msg_len] #", "def make_test_data_bar(): raise ValueError('No data today') def test_bar(): assert make_test_data_bar()", "fails and then gets broken -- to cope with the", "name): \"\"\" Starts an new :py:class:`allure.structure.TestStep` with given ``name``, pushes", "step context used for decorated steps. It provides a possibility", "we are inside a test # record steps only when", "the FS, fixing it fields :param attachment: a :py:class:`allure.structure.Attach` object", "with xdist's begavior regarding -x. # see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish", "module_name, module_doc, environment, testcase = pickle.loads(report._allure_result) report._allure_result = None #", "and xfailed SKIPPED if skipped and not xfailed \"\"\" report", "ONE test report (it wont break, but it will duplicate", "self.impl.start_suite(name='test_collection_phase', title='Collection phase', description='This is the tests collection phase. Failures", "runs this (naturally) three times -- with report.when being: setup", "self.report_case(item, report) def pytest_runtest_setup(item): item_labels = set((l.name, l.value) for l", "ArgumentTypeError: if `legal_values` are given and there are values that", "item, call): \"\"\" Decides when to actually report things. pytest", "v) for v in atoms) return a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\",", "\"\"\" Stops the step at the top of ``self.stack`` \"\"\"", "and report.longrepr[2] or report.wasxfail trim_msg_len = 89 short_message = skip_message.split('\\n')[0][:trim_msg_len]", "attach(self, title, contents, attach_type): \"\"\" Store attachment object in current", "we work hard to decide exact moment when we call", "a current context with given ``name`` and ``type``. \"\"\" if", "is here only to work around xdist's stupid -x thing", "item.get_marker(\"unreported\") or self.test.status in FAILED_STATUSES: self.report_case(item, report) def pytest_runtest_setup(item): item_labels", "in issues]) def description(self, description): \"\"\" Sets description for the", "self._magicaldoublereport: # to minimize ze impact self.report_case(item, report) elif report.skipped:", "that is bad attachment.source = self.impl._save_attach(attachment.source, attachment.type) attachment.type = attachment.type.mime_type", "a step context used for decorated steps. It provides a", "status in SKIPPED_STATUSES: skip_message = type(report.longrepr) == tuple and report.longrepr[2]", "self._fill_case(report, call, status, Status.BROKEN) else: # mark it broken so,", "values are:%s.\"\"\" % ', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={},", "the top of ``self.stack`` \"\"\" step = self.stack.pop() step.stop =", "think about that once again self.test.status = Status.BROKEN # if", "sent MORE THAN ONE TIME (namely, if the test fails", "by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook. \"\"\" def __init__(self, config):", "stupid -x thing when in exits BEFORE THE TEARDOWN test", "decorator factory that returns ``pytest.mark`` for a given testcases. \"\"\"", "else: status = Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None, None, report), trace=report.longrepr))", "# FIXME: this is here only to work around xdist's", "now() self.test.status = status if status in FAILED_STATUSES: self.test.failure =", "phase', description='This is the tests collection phase. Failures are modules", "def __init__(self, impl): self.impl = impl self.fails = [] def", "def label_type(name, legal_values=set()): \"\"\" argparse-type factory for labelish things. processed", "= allure_helper self.title = title self.step = None @property def", "\"\"\" Per-test listener. Is responsible for recording in-test data and", "self.severity(getattr(Severity, attr)) else: raise AttributeError MASTER_HELPER = AllureHelper() def pytest_namespace():", "< legal_values: raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are", "items description = item.function.__doc__ except AttributeError: # for doctests that", "config): self.impl = impl # module's nodeid => TestSuite object", "if reportdir: # we actually record something allure_impl = AllureImpl(reportdir)", "hooks to generate reports for modules that failed to collect.", "\"\"\" Sets description for the test \"\"\" if self.test: self.test.description", "BEFORE THE TEARDOWN test log. Meh, i should file an", "def stop_step(self): \"\"\" Stops the step at the top of", "uuid.uuid4()) as f: self.impl._write_xml(f, s) self.impl.store_environment() def write_attach(self, attachment): \"\"\"", "work hard to decide exact moment when we call `_stop_case`", "py.test's `TestReport` :param call: py.test's `CallInfo` :param pyteststatus: the failed/xfailed/xpassed", "\"\"\" if self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection phase', description='This is the tests", "the main code has finished teardown <--- tears down fixtures", "= (yield).get_result() status = self.config.hook.pytest_report_teststatus(report=report) status = status and status[0]", "reports for common tests. \"\"\" def __init__(self, impl, config): self.impl", "attachment object from the `AllureTestListener` to the FS, fixing it", "= Status.BROKEN else: status = Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None, None,", "that \"\"\" def a_label_type(string): atoms = set(string.split(',')) if legal_values and", "severity) def feature(self, *features): \"\"\" A decorator factory that returns", "there were any. \"\"\" if self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection phase', description='This", "in current state for later actual write in the `AllureAgregatingListener.write_attach`", "self.report_case(item, report) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call, status,", "it has failed, add it to the report. if not", "the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if attr in dir(Severity) and", "skip_message.split('\\n')[0][:trim_msg_len] # FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure = Failure(message=(short_message + '...'", "in the `AllureAgregatingListener.write_attach` \"\"\" attach = Attach(source=contents, # we later", "self.stack.append(step) return step def stop_step(self): \"\"\" Stops the step at", "FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure = Failure(message=(short_message + '...' * (len(skip_message)", "The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport`", "if skipped and xfailed SKIPPED if skipped and not xfailed", "Writes single line to report. \"\"\" if self._allurelistener: with self.step(text):", "are always called, but `call` is called only if `setup`", "`set` of values that are legal for this label, if", "for severity:: # these are the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\"", "if not hasattr(config, 'slaveinput'): # on xdist-master node do all", "testsuite with collection failures if there were any. \"\"\" if", "\"\"\" if self._allurelistener: with self.step(text): pass def environment(self, **env_dict): if", "story names. Run tests that have at least one of", "isn't marked as \"unreported\" or it has failed, add it", "self.test = None self.stack = [] def attach(self, title, contents,", "-x thing when in exits BEFORE THE TEARDOWN test log.", "# for doctests that has no `function` attribute description =", "_fill_case(self, report, call, pyteststatus, status): \"\"\" Finalizes with important data", "efficient way \"\"\" for s in self.suites.values(): if s.tests: #", "attachment.type) attachment.type = attachment.type.mime_type def pytest_runtest_logreport(self, report): if hasattr(report, '_allure_result'):", "= status and status[0] if report.when == 'call': if report.passed:", "config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object): \"\"\" Per-test listener. Is responsible", "[{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values))) return", "in testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc, tests=[], labels=[], start=testcase.start, #", "import pytest def test_foo(): with pytest.allure.step('mystep'): assert False @pytest.allure.step('make test", "except AttributeError: # for doctests that has no `function` attribute", "to the ``self.stack`` and returns the step. \"\"\" step =", "from FSM (we keep track of what has already happened", "given allure ``level``. \"\"\" return self.label(Label.SEVERITY, severity) def feature(self, *features):", "thing when in exits BEFORE THE TEARDOWN test log. Meh,", "=> TestSuite object self.suites = {} def pytest_sessionfinish(self): \"\"\" We", "hasattr(report, 'wasxfail'): self._fill_case(report, call, status, Status.PENDING) else: self._fill_case(report, call, status,", "return allure_label(*value) def severity(self, severity): \"\"\" A decorator factory that", "def dynamic_issue(self, *issues): \"\"\" Attaches ``issues`` to the current active", "\"\"\" if callable(title): return LazyInitStepContext(self, title.__name__)(title) else: return LazyInitStepContext(self, title)", "may be anything) PENDING if skipped and xfailed SKIPPED if", "even at CALL failures) TODO: do it in a better,", "Is responsible for recording in-test data and for attaching it", "and others OK BROKEN when either setup OR teardown are", "status, Status.CANCELED) elif report.when == 'teardown': # as teardown is", "of values that are legal for this label, if any", "self._allurelistener.environment.update(env_dict) @property def attach_type(self): return AttachmentType @property def severity_level(self): return", "Stops the step at the top of ``self.stack`` \"\"\" step", "values: {}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values),", "def dynamic_issue(self, *issues): \"\"\" Mark test ``issues`` from inside. \"\"\"", "class AllureTestListener(object): \"\"\" Per-test listener. Is responsible for recording in-test", "of story names. Run tests that have at least one", "code has finished teardown <--- tears down fixtures (that still", "impl # module's nodeid => TestSuite object self.suites = {}", "__init__(self, impl, config): self.impl = impl # module's nodeid =>", "\"\"\" def a_label_type(string): atoms = set(string.split(',')) if legal_values and not", "atoms = set(string.split(',')) if legal_values and not atoms < legal_values:", "def pytest_runtest_makereport(self, item, call): \"\"\" Decides when to actually report", "data :param report: py.test's `TestReport` :param call: py.test's `CallInfo` :param", "if not report.passed: if report.failed: status = Status.BROKEN else: status", "behavior we have to have tests even at CALL failures)", "the have ._id field to manifest their identity. Of all", "leave LAST with the same ID -- becase logreport can", "set((name, v) for v in atoms) return a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\",", "allure_label(*value) def severity(self, severity): \"\"\" A decorator factory that returns", "*issues) def dynamic_issue(self, *issues): \"\"\" Mark test ``issues`` from inside.", "status=None, steps=[], id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish self.stack", "def attach_type(self): return AttachmentType @property def severity_level(self): return Severity def", "def __getattr__(self, attr): \"\"\" Provides fancy shortcuts for severity:: #", "name, contents, type=AttachmentType.TEXT): # @ReservedAssignment \"\"\" Attaches ``contents`` to a", "not report.passed: if self.test.status not in FAILED_STATUSES: # if test", "decorator factory that returns ``pytest.mark`` for a given label. \"\"\"", "Expected behavior is: FAILED when call fails and others OK", "can be sent MORE THAN ONE TIME (namely, if the", "def __init__(self, config): self.config = config self.environment = {} self.test", "status): \"\"\" Finalizes with important data :param report: py.test's `TestReport`", "failures) TODO: do it in a better, more efficient way", "l.value) for l in labels_of(item)) # see label_type arg_labels =", "and for attaching it to the test report thing. The", "so, well, someone has idea of teardown failure # still,", "type) def label(self, name, *value): \"\"\" A decorator factory that", "to create step decorators, being initiated before pytest_configure, when no", "self._allurelistener.dynamic_issue(*issues) def description(self, description): \"\"\" Sets description for the test", "== 'setup': # setup / teardown if report.failed: self._fill_case(report, call,", "processed value is set of tuples (name, value). :param name:", "attachment: a :py:class:`allure.structure.Attach` object \"\"\" # OMG, that is bad", "s.tests: # nobody likes empty suites s.stop = max(case.stop for", "# TODO: think about that once again self.test.status = Status.BROKEN", "but it will duplicate cases in the report -- which", "type=attach_type) self.stack[-1].attachments.append(attach) def dynamic_issue(self, *issues): \"\"\" Attaches ``issues`` to the", "(AllureAggregatingListener) expects us to send EXACTLY ONE test report (it", "config self.environment = {} self.test = None # FIXME: that", "Status.BROKEN) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call, status, Status.PENDING)", "active case \"\"\" if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in", "test_baz(steppy_fixture): assert steppy_fixture \"\"\" if callable(title): return LazyInitStepContext(self, title.__name__)(title) else:", "l in sorted(arg_labels))) class LazyInitStepContext(StepContext): \"\"\" This is a step", "as teardown is always called for testitem -- report our", "pytest_runtest_protocol(self, item, nextitem): try: # for common items description =", "``self.stack`` and returns the step. \"\"\" step = TestStep(name=name, title=name,", "to the FS, fixing it fields :param attachment: a :py:class:`allure.structure.Attach`", "self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)), trace=status", "better, more efficient way \"\"\" for s in self.suites.values(): if", "mangle_testnames from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel", "always called, but `call` is called only if `setup` passes.", "@ReservedAssignment \"\"\" Attaches ``contents`` to a current context with given", "# @ReservedAssignment \"\"\" Attaches ``contents`` to a current context with", "severities = [v for (_, v) in all_of(Severity)] def label_type(name,", "report our status here if not report.passed: if self.test.status not", "failed # TODO: think about that once again self.test.status =", "Usage examples:: import pytest def test_foo(): with pytest.allure.step('mystep'): assert False", "(may not exist)\") severities = [v for (_, v) in", "things) :param legal_values: a `set` of values that are legal", "Status.PASSED) elif report.failed: self._fill_case(report, call, status, Status.FAILED) # FIXME: this", "``pytest.allure.attach`` \"\"\" def __init__(self): self._allurelistener = None # FIXME: this", "else: raise AttributeError MASTER_HELPER = AllureHelper() def pytest_namespace(): return {'allure':", "setup <--- fixtures are to be initialized in this one", "if not report.passed: if self.test.status not in FAILED_STATUSES: # if", "phase. Failures are modules that failed to collect.') for fail", "refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f: self.impl._write_xml(f, s) self.impl.store_environment()", "- legal_values), ', '.join(legal_values))) return set((name, v) for v in", "known_ids: known_ids.add(t.id) refined_tests.append(t) s.tests = refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4())", "severity(self, severity): \"\"\" A decorator factory that returns ``pytest.mark`` for", "Status.BROKEN else: status = Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None, None, report),", "status in FAILED_STATUSES: self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report), trace=report.longrepr or", "that are legal for this label, if any limit whatsoever", "data today') def test_baz(): assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture') def", "decide exact moment when we call `_stop_case` to do that.", "\"\"\" def __init__(self, impl, config): self.impl = impl # module's", "\"\"\" if attr in dir(Severity) and not attr.startswith('_'): return self.severity(getattr(Severity,", "Allure report in the specified directory (may not exist)\") severities", "from ``pytest.allure`` namespace, like ``pytest.allure.attach`` \"\"\" def __init__(self): self._allurelistener =", ":py:class:`allure.structure.Attach` object \"\"\" # OMG, that is bad attachment.source =", "(namely, if the test fails and then gets broken --", "% (Label.DEFAULT, name)) return allure_label(*value) def severity(self, severity): \"\"\" A", "pytest.allure.step('mystep'): assert False @pytest.allure.step('make test data') def make_test_data_bar(): raise ValueError('No", "= Attach(source=contents, # we later re-save those, oh my... title=title,", "context used for decorated steps. It provides a possibility to", "== Status.PENDING and report.longrepr or short_message != skip_message and skip_message", "to xdist if self._magicaldoublereport: # to minimize ze impact self.report_case(item,", "This method may benefit from FSM (we keep track of", "make_test_data_baz(): raise ValueError('No data today') def test_baz(): assert make_test_data_baz() @pytest.fixture()", "if report.failed: status = Status.BROKEN else: status = Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1],", "short_message = skip_message.split('\\n')[0][:trim_msg_len] # FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure = Failure(message=(short_message", "hasattr(report, 'wasxfail') and report.wasxfail) elif status in SKIPPED_STATUSES: skip_message =", "def step(self, title): \"\"\" A contextmanager/decorator for steps. TODO: when", "@property def attach_type(self): return AttachmentType @property def severity_level(self): return Severity", "default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated list of severity names. Tests only", "None self.stack = [] def attach(self, title, contents, attach_type): \"\"\"", "we leave LAST with the same ID -- becase logreport", "= None # FIXME: this gets injected elsewhere, like in", "legal_values: raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name,", "This object holds various utility methods used from ``pytest.allure`` namespace,", "it to the report. if not item.get_marker(\"unreported\") or self.test.status in", "self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def pytest_runtest_protocol(self, item,", "pytest_namespace(): return {'allure': MASTER_HELPER} class AllureAgregatingListener(object): \"\"\" Listens to pytest", "environment, testcase = pickle.loads(report._allure_result) report._allure_result = None # so actual", "``pytest.mark`` for a given testcases. \"\"\" return self.label(Label.TESTCASE, *testcases) def", "item.reportinfo()[2] self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])), description=description, start=now(),", "suites s.stop = max(case.stop for case in s.tests) known_ids =", "and not atoms < legal_values: raise argparse.ArgumentTypeError('Illegal {} values: {},", "= set() refined_tests = [] for t in s.tests[::-1]: if", "to work around xdist's stupid -x thing when in exits", "= config.option.allurereportdir if reportdir: # we actually record something allure_impl", "now() def _fill_case(self, report, call, pyteststatus, status): \"\"\" Finalizes with", "TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__ or '', self.environment, self.test))) @pytest.mark.hookwrapper def", "\"\"\" Creates a testsuite with collection failures if there were", "if s.tests: # nobody likes empty suites s.stop = max(case.stop", "attr): \"\"\" Provides fancy shortcuts for severity:: # these are", "``contextlib.ContextDecorator``. Usage examples:: import pytest def test_foo(): with pytest.allure.step('mystep'): assert", "OR teardown are broken (and call may be anything) PENDING", "times -- with report.when being: setup <--- fixtures are to", "Status.PENDING) else: self._fill_case(report, call, status, Status.CANCELED) elif report.when == 'setup':", "title): self.allure_helper = allure_helper self.title = title self.step = None", "environment(self, **env_dict): if self._allurelistener: self._allurelistener.environment.update(env_dict) @property def attach_type(self): return AttachmentType", "see label_type arg_labels = set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities) if arg_labels and", "all the important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object): \"\"\"", "down. But first we kinda-unify the test cases. We expect", "when call fails and others OK BROKEN when either setup", "if hasattr(report, 'wasxfail'): self._fill_case(report, call, status, Status.PENDING) else: self._fill_case(report, call,", "set of tuples (name, value). :param name: of label type", "return self.label(Label.FEATURE, *features) def story(self, *stories): \"\"\" A decorator factory", "report.failed: status = Status.BROKEN else: status = Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status,", "start=testcase.start, # first case starts the suite! stop=None)).tests.append(testcase) CollectFail =", "pytest_runtest_setup(item): item_labels = set((l.name, l.value) for l in labels_of(item)) #", "the test report thing. The per-test reports are handled by", "OMG, that is bad attachment.source = self.impl._save_attach(attachment.source, attachment.type) attachment.type =", "``pytest.mark`` for a given label. \"\"\" allure_label = getattr(pytest.mark, '%s.%s'", "if self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection phase', description='This is the tests collection", "\"\"\" Attaches ``issues`` to the current active case \"\"\" if", "-- which is bad. So we work hard to decide", "`CallInfo` :param pyteststatus: the failed/xfailed/xpassed thing :param status: a :py:class:`allure.constants.Status`", "reports for modules that failed to collect. \"\"\" def __init__(self,", "py.test's `CallInfo` :param pyteststatus: the failed/xfailed/xpassed thing :param status: a", "TestCase, TestStep, Attach, TestSuite, Failure, TestLabel def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\",", "self.impl.environment.update(environment) for a in testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc, tests=[],", "title=title, type=attach_type) self.stack[-1].attachments.append(attach) def dynamic_issue(self, *issues): \"\"\" Attaches ``issues`` to", "pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if attr in dir(Severity) and not attr.startswith('_'): return", "LazyInitStepContext(self, title.__name__)(title) else: return LazyInitStepContext(self, title) def single_step(self, text): \"\"\"", "\"other side\" (AllureAggregatingListener) expects us to send EXACTLY ONE test", "impl): self.impl = impl self.fails = [] def pytest_collectreport(self, report):", "is always called for testitem -- report our status here", "Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)), trace=status == Status.PENDING", "assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture') def steppy_fixture(): return 1 def", "may benefit from FSM (we keep track of what has", "identity. Of all the test cases in suite.testcases we leave", "call fails and others OK BROKEN when either setup OR", "return {'allure': MASTER_HELPER} class AllureAgregatingListener(object): \"\"\" Listens to pytest hooks", "test log. Meh, i should file an issue to xdist", "for (_, v) in all_of(Severity)] def label_type(name, legal_values=set()): \"\"\" argparse-type", "pytest_configure, when no AllureListener initiated yet. \"\"\" def __init__(self, allure_helper,", "None # FIXME: that flag makes us pre-report failures in", "fixing it fields :param attachment: a :py:class:`allure.structure.Attach` object \"\"\" #", "self.label(Label.TESTCASE, *testcases) def step(self, title): \"\"\" A contextmanager/decorator for steps.", "So we work hard to decide exact moment when we", "feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list", "attr in dir(Severity) and not attr.startswith('_'): return self.severity(getattr(Severity, attr)) else:", "call, status, Status.BROKEN) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call,", "broken so, well, someone has idea of teardown failure #", "default=None, help=\"Generate Allure report in the specified directory (may not", "of teardown failure # still, that's no big deal --", "test data') def make_test_data_bar(): raise ValueError('No data today') def test_bar():", "'...' * (len(skip_message) > trim_msg_len)), trace=status == Status.PENDING and report.longrepr", "moment when we call `_stop_case` to do that. This method", "teardown <--- tears down fixtures (that still possess important info)", "set((l.name, l.value) for l in labels_of(item)) # see label_type arg_labels", "no AllureListener initiated yet. \"\"\" def __init__(self, allure_helper, title): self.allure_helper", "a_label_type(string): atoms = set(string.split(',')) if legal_values and not atoms <", "Meh, i should file an issue to xdist if self._magicaldoublereport:", "help=\"\"\"Comma-separated list of story names. Run tests that have at", "xfailed SKIPPED if skipped and not xfailed \"\"\" report =", "context with given ``name`` and ``type``. \"\"\" if self._allurelistener: self._allurelistener.attach(name,", "\"\"\" Listens to pytest collection-related hooks to generate reports for", "Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None, None, report), trace=report.longrepr)) def pytest_sessionfinish(self): \"\"\"", "= [] def pytest_collectreport(self, report): if not report.passed: if report.failed:", "# we actually record something allure_impl = AllureImpl(reportdir) testlistener =", "xdist's -x behavior we have to have tests even at", "description='This is the tests collection phase. Failures are modules that", "to pytest collection-related hooks to generate reports for modules that", "self.label(Label.FEATURE, *features) def story(self, *stories): \"\"\" A decorator factory that", "still possess important info) `setup` and `teardown` are always called,", "Status, AttachmentType, Severity, \\ FAILED_STATUSES, Label, SKIPPED_STATUSES from allure.utils import", "this breaks encapsulation a lot if hasattr(listener, 'stack'): return listener", "for proofs / ideas. The \"other side\" (AllureAggregatingListener) expects us", "= TestStep(name=name, title=name, start=now(), attachments=[], steps=[]) self.stack[-1].steps.append(step) self.stack.append(step) return step", "AllureAgregatingListener.pytest_sessionfinish self.stack = [self.test] yield self.test = None self.stack =", "def get_listener(self): return self._allurelistener def attach(self, name, contents, type=AttachmentType.TEXT): #", "exact moment when we call `_stop_case` to do that. This", "big deal -- test has already failed # TODO: think", "'') def report_case(self, item, report): \"\"\" Adds `self.test` to the", "cases to come from AllureTestListener -- and the have ._id", "self.test.status = Status.BROKEN # if a test isn't marked as", "at least one of the specified feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\",", "fixture') def steppy_fixture(): return 1 def test_baz(steppy_fixture): assert steppy_fixture \"\"\"", "way \"\"\" for s in self.suites.values(): if s.tests: # nobody", "report): if hasattr(report, '_allure_result'): module_id, module_name, module_doc, environment, testcase =", "__init__(self, allure_helper, title): self.allure_helper = allure_helper self.title = title self.step", "{'allure': MASTER_HELPER} class AllureAgregatingListener(object): \"\"\" Listens to pytest hooks to", "and report.wasxfail) elif status in SKIPPED_STATUSES: skip_message = type(report.longrepr) ==", "Of all the test cases in suite.testcases we leave LAST", ":py:class:`allure.constants.Status` entry \"\"\" [self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in", "'.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list of", "severities will be run. Possible values are:%s.\"\"\" % ', '.join(severities))", "to send EXACTLY ONE test report (it wont break, but", "raise ValueError('No data today') def test_bar(): assert make_test_data_bar() @pytest.allure.step def", "self.test.status not in FAILED_STATUSES: # if test was OK but", "\"\"\" step = TestStep(name=name, title=name, start=now(), attachments=[], steps=[]) self.stack[-1].steps.append(step) self.stack.append(step)", "legal_values), ', '.join(legal_values))) return set((name, v) for v in atoms)", "and ``type``. \"\"\" if self._allurelistener: self._allurelistener.attach(name, contents, type) def label(self,", "are done and have all the results in `self.suites` Lets", "the specified story labels.\"\"\") def pytest_configure(config): reportdir = config.option.allurereportdir if", "this gets injected elsewhere, like in the pytest_configure def get_listener(self):", "have ._id field to manifest their identity. Of all the", "LAST with the same ID -- becase logreport can be", "way \"\"\" parent = parent_module(item) # we attach a four-tuple:", "description=module_doc, tests=[], labels=[], start=testcase.start, # first case starts the suite!", "at teardown => broken self._fill_case(report, call, status, Status.BROKEN) else: #", "em down. But first we kinda-unify the test cases. We", "once again self.test.status = Status.BROKEN # if a test isn't", "self.config.hook.pytest_report_teststatus(report=report) status = status and status[0] if report.when == 'call':", "only when that # FIXME: this breaks encapsulation a lot", "@pytest.allure.step('test fixture') def steppy_fixture(): return 1 def test_baz(steppy_fixture): assert steppy_fixture", "value is set of tuples (name, value). :param name: of", "that returns ``pytest.mark`` for a given stories. \"\"\" return self.label(Label.STORY,", "make_test_data_bar() @pytest.allure.step def make_test_data_baz(): raise ValueError('No data today') def test_baz():", "call, status, Status.BROKEN) else: # mark it broken so, well,", "import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir',", "def pytest_namespace(): return {'allure': MASTER_HELPER} class AllureAgregatingListener(object): \"\"\" Listens to", "same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if attr in dir(Severity) and not", "MORE THAN ONE TIME (namely, if the test fails and", "pytest.allure._allurelistener = testlistener config.pluginmanager.register(testlistener) if not hasattr(config, 'slaveinput'): # on", "A decorator factory that returns ``pytest.mark`` for a given testcases.", "allure(self): listener = self.allure_helper.get_listener() # if listener has `stack` we", "likes empty suites s.stop = max(case.stop for case in s.tests)", "names. Tests only with these severities will be run. Possible", "self.test = None # FIXME: that flag makes us pre-report", "report): \"\"\" Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood", "call may be anything) PENDING if skipped and xfailed SKIPPED", "from AllureTestListener -- and the have ._id field to manifest", "from six import text_type from allure.common import AllureImpl, StepContext from", "kinda-unify the test cases. We expect cases to come from", "Provides fancy shortcuts for severity:: # these are the same", "steps only when that # FIXME: this breaks encapsulation a", "have tests even at CALL failures) TODO: do it in", "# see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport = hasattr(self.config, 'slaveinput') and", "return step def stop_step(self): \"\"\" Stops the step at the", "@pytest.mark.hookwrapper def pytest_runtest_protocol(self, item, nextitem): try: # for common items", "modules that failed to collect.') for fail in self.fails: self.impl.start_case(name=fail.name.split(\".\")[-1])", "self.stack[-1].steps.append(step) self.stack.append(step) return step def stop_step(self): \"\"\" Stops the step", "def pytest_runtest_protocol(self, item, nextitem): try: # for common items description", "specified directory (may not exist)\") severities = [v for (_,", "when moving to python 3, rework this with ``contextlib.ContextDecorator``. Usage", "# FIXME: that flag makes us pre-report failures in the", "we later re-save those, oh my... title=title, type=attach_type) self.stack[-1].attachments.append(attach) def", "for attaching it to the test report thing. The per-test", "ID, test module name, test module doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid,", "pyteststatus, report), trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail) elif status", "not in FAILED_STATUSES: # if test was OK but failed", "\"\"\" return self.label(Label.SEVERITY, severity) def feature(self, *features): \"\"\" A decorator", "def pytest_sessionfinish(self): \"\"\" We are done and have all the", "the test \"\"\" if self.test: self.test.description = description def start_step(self,", "proofs / ideas. The \"other side\" (AllureAggregatingListener) expects us to", ":py:func:`_pytest.runner.runtestprotocol` for proofs / ideas. The \"other side\" (AllureAggregatingListener) expects", "AttributeError MASTER_HELPER = AllureHelper() def pytest_namespace(): return {'allure': MASTER_HELPER} class", "this finishes the main code has finished teardown <--- tears", "A decorator factory that returns ``pytest.mark`` for a given label.", "that's no big deal -- test has already failed #", "provides a possibility to create step decorators, being initiated before", "description(self, description): \"\"\" Sets description for the test \"\"\" if", "# on xdist-master node do all the important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl,", "do all the important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object):", "reportdir: # we actually record something allure_impl = AllureImpl(reportdir) testlistener", "in FAILED_STATUSES: self.report_case(item, report) def pytest_runtest_setup(item): item_labels = set((l.name, l.value)", "call, status, Status.FAILED) # FIXME: this is here only to", "attachment.type.mime_type def pytest_runtest_logreport(self, report): if hasattr(report, '_allure_result'): module_id, module_name, module_doc,", "help=\"\"\"Comma-separated list of severity names. Tests only with these severities", "utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach`` \"\"\" def", "test has already failed # TODO: think about that once", "call <--- when this finishes the main code has finished", "*testcases) def step(self, title): \"\"\" A contextmanager/decorator for steps. TODO:", "top of ``self.stack`` \"\"\" step = self.stack.pop() step.stop = now()", "[v for (_, v) in all_of(Severity)] def label_type(name, legal_values=set()): \"\"\"", "https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for a in testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc,", "AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()] self.test.stop = now() self.test.status", "the `pytest_runtest_logreport` hook. \"\"\" def __init__(self, config): self.config = config", "item.function.__doc__ except AttributeError: # for doctests that has no `function`", "def attach(self, title, contents, attach_type): \"\"\" Store attachment object in", "cope with xdist's begavior regarding -x. # see self.pytest_runtest_makereport and", "should file an issue to xdist if self._magicaldoublereport: # to", "fancy shortcuts for severity:: # these are the same pytest.allure.CRITICAL", "hook. # it is here to cope with xdist's begavior", "factory that returns ``pytest.mark`` for a given allure ``level``. \"\"\"", "the same ID -- becase logreport can be sent MORE", "def _fill_case(self, report, call, pyteststatus, status): \"\"\" Finalizes with important", "happened via self.test.status) Expected behavior is: FAILED when call fails", "Status.BROKEN # if a test isn't marked as \"unreported\" or", "skip_message and skip_message or '') def report_case(self, item, report): \"\"\"", "else: # mark it broken so, well, someone has idea", "when either setup OR teardown are broken (and call may", "A decorator factory that returns ``pytest.mark`` for a given issues.", "thing :param status: a :py:class:`allure.constants.Status` entry \"\"\" [self.attach(name, contents, AttachmentType.TEXT)", "Starts an new :py:class:`allure.structure.TestStep` with given ``name``, pushes it to", "``self.stack`` \"\"\" step = self.stack.pop() step.stop = now() def _fill_case(self,", "report. if not item.get_marker(\"unreported\") or self.test.status in FAILED_STATUSES: self.report_case(item, report)", "Attaches ``contents`` to a current context with given ``name`` and", "attachments=[], labels=labels_of(item), status=None, steps=[], id=str(uuid.uuid4())) # for later resolution in", "with given ``name``, pushes it to the ``self.stack`` and returns", "and not attr.startswith('_'): return self.severity(getattr(Severity, attr)) else: raise AttributeError MASTER_HELPER", "call `_stop_case` to do that. This method may benefit from", "# for later resolution in AllureAgregatingListener.pytest_sessionfinish self.stack = [self.test] yield", "run. Possible values are:%s.\"\"\" % ', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\",", "step.stop = now() def _fill_case(self, report, call, pyteststatus, status): \"\"\"", "here to cope with xdist's begavior regarding -x. # see", "if self._magicaldoublereport: # to minimize ze impact self.report_case(item, report) elif", "step def stop_step(self): \"\"\" Stops the step at the top", "s.tests[::-1]: if t.id not in known_ids: known_ids.add(t.id) refined_tests.append(t) s.tests =", "others OK BROKEN when either setup OR teardown are broken", "we call `_stop_case` to do that. This method may benefit", "record something allure_impl = AllureImpl(reportdir) testlistener = AllureTestListener(config) pytest.allure._allurelistener =", "at CALL failures) TODO: do it in a better, more", "that has no `function` attribute description = item.reportinfo()[2] self.test =", "that fall out of that \"\"\" def a_label_type(string): atoms =", "responsible for recording in-test data and for attaching it to", "testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc, tests=[], labels=[], start=testcase.start, # first", "their identity. Of all the test cases in suite.testcases we", "/ teardown if report.failed: self._fill_case(report, call, status, Status.BROKEN) elif report.skipped:", "returns ``pytest.mark`` for a given label. \"\"\" allure_label = getattr(pytest.mark,", "= item.function.__doc__ except AttributeError: # for doctests that has no", "already failed # TODO: think about that once again self.test.status", "a testsuite with collection failures if there were any. \"\"\"", "self.test.status = status if status in FAILED_STATUSES: self.test.failure = Failure(message=get_exception_message(call.excinfo,", "= self.stack.pop() step.stop = now() def _fill_case(self, report, call, pyteststatus,", "trim_msg_len)), trace=status == Status.PENDING and report.longrepr or short_message != skip_message", "or it has failed, add it to the report. if", "that # FIXME: this breaks encapsulation a lot if hasattr(listener,", "contents, type=AttachmentType.TEXT): # @ReservedAssignment \"\"\" Attaches ``contents`` to a current", "message trace') class AllureCollectionListener(object): \"\"\" Listens to pytest collection-related hooks", "a lot if hasattr(listener, 'stack'): return listener class AllureHelper(object): \"\"\"", "pass def environment(self, **env_dict): if self._allurelistener: self._allurelistener.environment.update(env_dict) @property def attach_type(self):", "is a step context used for decorated steps. It provides", "for the test \"\"\" if self._allurelistener: self._allurelistener.description(description) def testcase(self, *testcases):", "TestLabel things) :param legal_values: a `set` of values that are", "parent_down_from_module, labels_of, \\ all_of, get_exception_message, now, mangle_testnames from allure.structure import", "parent_down_from_module(item)])), description=description, start=now(), attachments=[], labels=labels_of(item), status=None, steps=[], id=str(uuid.uuid4())) # for", "`TestReport` :param call: py.test's `CallInfo` :param pyteststatus: the failed/xfailed/xpassed thing", "and skip_message or '') def report_case(self, item, report): \"\"\" Adds", "LazyInitStepContext(self, title) def single_step(self, text): \"\"\" Writes single line to", "are legal for this label, if any limit whatsoever :raises", "the `AllureAgregatingListener.write_attach` \"\"\" attach = Attach(source=contents, # we later re-save", "the tests collection phase. Failures are modules that failed to", "of that \"\"\" def a_label_type(string): atoms = set(string.split(',')) if legal_values", "MASTER_HELPER = AllureHelper() def pytest_namespace(): return {'allure': MASTER_HELPER} class AllureAgregatingListener(object):", "possess important info) `setup` and `teardown` are always called, but", "is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for a in testcase.iter_attachments(): self.write_attach(a)", "arg_labels and not item_labels & arg_labels: pytest.skip('Not suitable with selected", "the results in `self.suites` Lets write em down. But first", "'wasxfail') and report.wasxfail) elif status in SKIPPED_STATUSES: skip_message = type(report.longrepr)", "be run. Possible values are:%s.\"\"\" % ', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\",", "f: self.impl._write_xml(f, s) self.impl.store_environment() def write_attach(self, attachment): \"\"\" Writes attachment", "elif report.when == 'teardown': # as teardown is always called", "to be initialized in this one call <--- when this", "A decorator factory that returns ``pytest.mark`` for a given features.", "*stories): \"\"\" A decorator factory that returns ``pytest.mark`` for a", "= now() def _fill_case(self, report, call, pyteststatus, status): \"\"\" Finalizes", "def make_test_data_baz(): raise ValueError('No data today') def test_baz(): assert make_test_data_baz()", "line to report. \"\"\" if self._allurelistener: with self.step(text): pass def", "if attr in dir(Severity) and not attr.startswith('_'): return self.severity(getattr(Severity, attr))", "import namedtuple from six import text_type from allure.common import AllureImpl,", "when in exits BEFORE THE TEARDOWN test log. Meh, i", "us pre-report failures in the makereport hook. # it is", "yield self.test = None self.stack = [] def attach(self, title,", "``pytest.mark`` for a given features. \"\"\" return self.label(Label.FEATURE, *features) def", "with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f: self.impl._write_xml(f, s) self.impl.store_environment() def", "as f: self.impl._write_xml(f, s) self.impl.store_environment() def write_attach(self, attachment): \"\"\" Writes", "self.allure_helper.get_listener() # if listener has `stack` we are inside a", "pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for a in", "fails and others OK BROKEN when either setup OR teardown", "attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment \"\"\" Attaches ``contents`` to", "to the report. if not item.get_marker(\"unreported\") or self.test.status in FAILED_STATUSES:", "uuid import pickle import pytest import argparse from collections import", "are to be initialized in this one call <--- when", "atoms < legal_values: raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}]", "tests that have at least one of the specified story", "Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way \"\"\"", "to generate reports for common tests. \"\"\" def __init__(self, impl,", "`self.suites` Lets write em down. But first we kinda-unify the", "PENDING if skipped and xfailed SKIPPED if skipped and not", "Mark test ``issues`` from inside. \"\"\" if self._allurelistener: self._allurelistener.dynamic_issue(*issues) def", "FIXME: that flag makes us pre-report failures in the makereport", "SKIPPED_STATUSES: skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail", "pytest_sessionfinish(self): \"\"\" We are done and have all the results", "still, that's no big deal -- test has already failed", "injected elsewhere, like in the pytest_configure def get_listener(self): return self._allurelistener", "def a_label_type(string): atoms = set(string.split(',')) if legal_values and not atoms", "import AllureImpl, StepContext from allure.constants import Status, AttachmentType, Severity, \\", "finished teardown <--- tears down fixtures (that still possess important", "description): \"\"\" Sets description for the test \"\"\" if self.test:", "pyteststatus, status): \"\"\" Finalizes with important data :param report: py.test's", ":param attachment: a :py:class:`allure.structure.Attach` object \"\"\" # OMG, that is", "fixtures (that still possess important info) `setup` and `teardown` are", "dest=\"allurestories\", metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list of story names. Run", "has already failed # TODO: think about that once again", "report_case(self, item, report): \"\"\" Adds `self.test` to the `report` in", "Sets description for the test \"\"\" if self._allurelistener: self._allurelistener.description(description) def", "like ``pytest.allure.attach`` \"\"\" def __init__(self): self._allurelistener = None # FIXME:", "module name, test module doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__", "title, contents, attach_type): \"\"\" Store attachment object in current state", "call, pyteststatus, status): \"\"\" Finalizes with important data :param report:", "Attach(source=contents, # we later re-save those, oh my... title=title, type=attach_type)", "attach = Attach(source=contents, # we later re-save those, oh my...", "/ ideas. The \"other side\" (AllureAggregatingListener) expects us to send", "self.label(Label.ISSUE, *issues) def dynamic_issue(self, *issues): \"\"\" Mark test ``issues`` from", "that returns ``pytest.mark`` for a given testcases. \"\"\" return self.label(Label.TESTCASE,", "89 short_message = skip_message.split('\\n')[0][:trim_msg_len] # FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure =", "\"\"\" Sets description for the test \"\"\" if self._allurelistener: self._allurelistener.description(description)", "-- report our status here if not report.passed: if self.test.status", "# see label_type arg_labels = set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities) if arg_labels", "object self.suites = {} def pytest_sessionfinish(self): \"\"\" We are done", "are the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if attr in dir(Severity)", "def feature(self, *features): \"\"\" A decorator factory that returns ``pytest.mark``", "return listener class AllureHelper(object): \"\"\" This object holds various utility", "== 'call': if report.passed: self._fill_case(report, call, status, Status.PASSED) elif report.failed:", "self.stack.pop() step.stop = now() def _fill_case(self, report, call, pyteststatus, status):", "'slaveinput') and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def pytest_runtest_protocol(self, item, nextitem): try: #", "StepContext from allure.constants import Status, AttachmentType, Severity, \\ FAILED_STATUSES, Label,", "report) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call, status, Status.PENDING)", "= set((l.name, l.value) for l in labels_of(item)) # see label_type", "= AllureTestListener(config) pytest.allure._allurelistener = testlistener config.pluginmanager.register(testlistener) if not hasattr(config, 'slaveinput'):", "\"\"\" Writes attachment object from the `AllureTestListener` to the FS,", "Severity def __getattr__(self, attr): \"\"\" Provides fancy shortcuts for severity::", "work around xdist's stupid -x thing when in exits BEFORE", "if self._allurelistener: self._allurelistener.description(description) def testcase(self, *testcases): \"\"\" A decorator factory", ":param status: a :py:class:`allure.constants.Status` entry \"\"\" [self.attach(name, contents, AttachmentType.TEXT) for", "case starts the suite! stop=None)).tests.append(testcase) CollectFail = namedtuple('CollectFail', 'name status", "if hasattr(listener, 'stack'): return listener class AllureHelper(object): \"\"\" This object", "data') def make_test_data_bar(): raise ValueError('No data today') def test_bar(): assert", "title=name, start=now(), attachments=[], steps=[]) self.stack[-1].steps.append(step) self.stack.append(step) return step def stop_step(self):", "in the specified directory (may not exist)\") severities = [v", "on xdist-master node do all the important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config))", "AllureTestListener -- and the have ._id field to manifest their", "collection phase. Failures are modules that failed to collect.') for", "AttachmentType @property def severity_level(self): return Severity def __getattr__(self, attr): \"\"\"", "``contents`` to a current context with given ``name`` and ``type``.", "teardown is always called for testitem -- report our status", "list of feature names. Run tests that have at least", "is bad. So we work hard to decide exact moment", "someone has idea of teardown failure # still, that's no", "write_attach(self, attachment): \"\"\" Writes attachment object from the `AllureTestListener` to", "in self.suites.values(): if s.tests: # nobody likes empty suites s.stop", "'setup': # setup / teardown if report.failed: self._fill_case(report, call, status,", "feature names. Run tests that have at least one of", "all the test cases in suite.testcases we leave LAST with", "config): self.config = config self.environment = {} self.test = None", "report things. pytest runs this (naturally) three times -- with", "# as teardown is always called for testitem -- report", "None # FIXME: this gets injected elsewhere, like in the", "elif report.failed: self._fill_case(report, call, status, Status.FAILED) # FIXME: this is", "for a in testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc, tests=[], labels=[],", "be initialized in this one call <--- when this finishes", "``type``. \"\"\" if self._allurelistener: self._allurelistener.attach(name, contents, type) def label(self, name,", "when no AllureListener initiated yet. \"\"\" def __init__(self, allure_helper, title):", "pytest_collectreport(self, report): if not report.passed: if report.failed: status = Status.BROKEN", "factory that returns ``pytest.mark`` for a given features. \"\"\" return", "have all the results in `self.suites` Lets write em down.", "we actually record something allure_impl = AllureImpl(reportdir) testlistener = AllureTestListener(config)", "\"\"\" if self._allurelistener: self._allurelistener.dynamic_issue(*issues) def description(self, description): \"\"\" Sets description", "for testitem -- report our status here if not report.passed:", "that have at least one of the specified story labels.\"\"\")", "out of that \"\"\" def a_label_type(string): atoms = set(string.split(',')) if", "@pytest.mark.hookwrapper def pytest_runtest_makereport(self, item, call): \"\"\" Decides when to actually", "<--- tears down fixtures (that still possess important info) `setup`", "callable(title): return LazyInitStepContext(self, title.__name__)(title) else: return LazyInitStepContext(self, title) def single_step(self,", "in this one call <--- when this finishes the main", "common tests. \"\"\" def __init__(self, impl, config): self.impl = impl", "def test_foo(): with pytest.allure.step('mystep'): assert False @pytest.allure.step('make test data') def", "steps=[], id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish self.stack =", "those, oh my... title=title, type=attach_type) self.stack[-1].attachments.append(attach) def dynamic_issue(self, *issues): \"\"\"", "these are the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if attr in", "in sorted(arg_labels))) class LazyInitStepContext(StepContext): \"\"\" This is a step context", "argparse from collections import namedtuple from six import text_type from", "returns ``pytest.mark`` for a given allure ``level``. \"\"\" return self.label(Label.SEVERITY,", "tests=[], labels=[], start=testcase.start, # first case starts the suite! stop=None)).tests.append(testcase)", "and not item_labels & arg_labels: pytest.skip('Not suitable with selected labels:", "known_ids = set() refined_tests = [] for t in s.tests[::-1]:", "attaching it to the test report thing. The per-test reports", "for a given label. \"\"\" allure_label = getattr(pytest.mark, '%s.%s' %", "with selected labels: %s.' % ', '.join(text_type(l) for l in", "We expect cases to come from AllureTestListener -- and the", "testcase = pickle.loads(report._allure_result) report._allure_result = None # so actual pickled", "Listens to pytest collection-related hooks to generate reports for modules", "one of the specified feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\",", "\"\"\" Mark test ``issues`` from inside. \"\"\" if self._allurelistener: self._allurelistener.dynamic_issue(*issues)", "def story(self, *stories): \"\"\" A decorator factory that returns ``pytest.mark``", "start=now(), attachments=[], steps=[]) self.stack[-1].steps.append(step) self.stack.append(step) return step def stop_step(self): \"\"\"", "for labelish things. processed value is set of tuples (name,", "factory that returns ``pytest.mark`` for a given label. \"\"\" allure_label", "specified feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated", "label. \"\"\" allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name)) return", "labels=[], start=testcase.start, # first case starts the suite! stop=None)).tests.append(testcase) CollectFail", "def issue(self, *issues): \"\"\" A decorator factory that returns ``pytest.mark``", "have at least one of the specified story labels.\"\"\") def", "attach_type): \"\"\" Store attachment object in current state for later", "being: setup <--- fixtures are to be initialized in this", "[] for t in s.tests[::-1]: if t.id not in known_ids:", "= status if status in FAILED_STATUSES: self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus,", "t in s.tests[::-1]: if t.id not in known_ids: known_ids.add(t.id) refined_tests.append(t)", "generate reports for modules that failed to collect. \"\"\" def", "*features): \"\"\" A decorator factory that returns ``pytest.mark`` for a", "the step at the top of ``self.stack`` \"\"\" step =", "or self.test.status in FAILED_STATUSES: self.report_case(item, report) def pytest_runtest_setup(item): item_labels =", "about that once again self.test.status = Status.BROKEN # if a", "initiated yet. \"\"\" def __init__(self, allure_helper, title): self.allure_helper = allure_helper", "= [] for t in s.tests[::-1]: if t.id not in", "== 'teardown': # as teardown is always called for testitem", "in dict(report.sections).items()] self.test.stop = now() self.test.status = status if status", "are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook. \"\"\" def", "report, call, pyteststatus, status): \"\"\" Finalizes with important data :param", "tears down fixtures (that still possess important info) `setup` and", "parent = parent_module(item) # we attach a four-tuple: (test module", "this (naturally) three times -- with report.when being: setup <---", "this one call <--- when this finishes the main code", "steps=[]) self.stack[-1].steps.append(step) self.stack.append(step) return step def stop_step(self): \"\"\" Stops the", "\"\"\" # OMG, that is bad attachment.source = self.impl._save_attach(attachment.source, attachment.type)", "(name, value). :param name: of label type (for future TestLabel", "FAILED_STATUSES, Label, SKIPPED_STATUSES from allure.utils import parent_module, parent_down_from_module, labels_of, \\", "label type (for future TestLabel things) :param legal_values: a `set`", "from inside. \"\"\" if self._allurelistener: self._allurelistener.dynamic_issue(*issues) def description(self, description): \"\"\"", "import text_type from allure.common import AllureImpl, StepContext from allure.constants import", "a test isn't marked as \"unreported\" or it has failed,", "OK BROKEN when either setup OR teardown are broken (and", "\\ all_of, get_exception_message, now, mangle_testnames from allure.structure import TestCase, TestStep,", "self._fill_case(report, call, status, Status.CANCELED) elif report.when == 'setup': # setup", "a test # record steps only when that # FIXME:", "teardown failure # still, that's no big deal -- test", "step decorators, being initiated before pytest_configure, when no AllureListener initiated", "item.config.option.allureseverities) if arg_labels and not item_labels & arg_labels: pytest.skip('Not suitable", "def allure(self): listener = self.allure_helper.get_listener() # if listener has `stack`", "inside. \"\"\" if self._allurelistener: self._allurelistener.dynamic_issue(*issues) def description(self, description): \"\"\" Sets", "same ID -- becase logreport can be sent MORE THAN", "Failures are modules that failed to collect.') for fail in", "== tuple and report.longrepr[2] or report.wasxfail trim_msg_len = 89 short_message", ":param legal_values: a `set` of values that are legal for", "benefit from FSM (we keep track of what has already", "FS, fixing it fields :param attachment: a :py:class:`allure.structure.Attach` object \"\"\"", "= [self.test] yield self.test = None self.stack = [] def", "if there were any. \"\"\" if self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection phase',", "Tests only with these severities will be run. Possible values", "namedtuple from six import text_type from allure.common import AllureImpl, StepContext", "help=\"\"\"Comma-separated list of feature names. Run tests that have at", "methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach`` \"\"\" def __init__(self):", "A decorator factory that returns ``pytest.mark`` for a given stories.", "def pytest_collectreport(self, report): if not report.passed: if report.failed: status =", "skipped and not xfailed \"\"\" report = (yield).get_result() status =", "are broken (and call may be anything) PENDING if skipped", "not attr.startswith('_'): return self.severity(getattr(Severity, attr)) else: raise AttributeError MASTER_HELPER =", "the test fails and then gets broken -- to cope", "data and for attaching it to the test report thing.", "features. \"\"\" return self.label(Label.FEATURE, *features) def story(self, *stories): \"\"\" A", "in the pytest_configure def get_listener(self): return self._allurelistener def attach(self, name,", "self.environment, self.test))) @pytest.mark.hookwrapper def pytest_runtest_makereport(self, item, call): \"\"\" Decides when", "only to work around xdist's stupid -x thing when in", "xdist's stupid -x thing when in exits BEFORE THE TEARDOWN", "# mark it broken so, well, someone has idea of", "from the `AllureTestListener` to the FS, fixing it fields :param", "an issue to xdist if self._magicaldoublereport: # to minimize ze", "the `report` in a `AllureAggegatingListener`-understood way \"\"\" parent = parent_module(item)", "\"\"\" We are done and have all the results in", "v in atoms) return a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={},", "AllureImpl, StepContext from allure.constants import Status, AttachmentType, Severity, \\ FAILED_STATUSES,", "in a better, more efficient way \"\"\" for s in", "in FAILED_STATUSES: # if test was OK but failed at", "in SKIPPED_STATUSES: skip_message = type(report.longrepr) == tuple and report.longrepr[2] or", "raise AttributeError MASTER_HELPER = AllureHelper() def pytest_namespace(): return {'allure': MASTER_HELPER}", "not hasattr(config, 'slaveinput'): # on xdist-master node do all the", "that flag makes us pre-report failures in the makereport hook.", "things. processed value is set of tuples (name, value). :param", "trace=report.longrepr)) def pytest_sessionfinish(self): \"\"\" Creates a testsuite with collection failures", "again self.test.status = Status.BROKEN # if a test isn't marked", "actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for a", "item_labels = set((l.name, l.value) for l in labels_of(item)) # see", "it to the test report thing. The per-test reports are", "in s.tests) known_ids = set() refined_tests = [] for t", "labels.\"\"\") def pytest_configure(config): reportdir = config.option.allurereportdir if reportdir: # we", "that. This method may benefit from FSM (we keep track", "type=AttachmentType.TEXT): # @ReservedAssignment \"\"\" Attaches ``contents`` to a current context", "to python 3, rework this with ``contextlib.ContextDecorator``. Usage examples:: import", "per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook.", "-x. # see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport = hasattr(self.config, 'slaveinput')", "dict(report.sections).items()] self.test.stop = now() self.test.status = status if status in", "when this finishes the main code has finished teardown <---", "if self._allurelistener: with self.step(text): pass def environment(self, **env_dict): if self._allurelistener:", "@pytest.allure.step('make test data') def make_test_data_bar(): raise ValueError('No data today') def", "Attaches ``issues`` to the current active case \"\"\" if self.test:", "attachment.type = attachment.type.mime_type def pytest_runtest_logreport(self, report): if hasattr(report, '_allure_result'): module_id,", "setup OR teardown are broken (and call may be anything)", "'teardown': # as teardown is always called for testitem --", "pushes it to the ``self.stack`` and returns the step. \"\"\"", "skip_message or '') def report_case(self, item, report): \"\"\" Adds `self.test`", "method may benefit from FSM (we keep track of what", "s.tests = refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f: self.impl._write_xml(f,", "collect.') for fail in self.fails: self.impl.start_case(name=fail.name.split(\".\")[-1]) self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace) self.impl.stop_suite()", "= skip_message.split('\\n')[0][:trim_msg_len] # FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure = Failure(message=(short_message +", "do that. This method may benefit from FSM (we keep", "\"\"\" def __init__(self, allure_helper, title): self.allure_helper = allure_helper self.title =", "AllureTestListener(object): \"\"\" Per-test listener. Is responsible for recording in-test data", "cases. We expect cases to come from AllureTestListener -- and", "\"\"\" Writes single line to report. \"\"\" if self._allurelistener: with", "TEARDOWN test log. Meh, i should file an issue to", "self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc, tests=[], labels=[], start=testcase.start, # first case", "MASTER_HELPER} class AllureAgregatingListener(object): \"\"\" Listens to pytest hooks to generate", "action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\", default=None, help=\"Generate Allure report in the specified", "self._fill_case(report, call, status, Status.BROKEN) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report,", "@property def allure(self): listener = self.allure_helper.get_listener() # if listener has", "item.config.option.allurestories, item.config.option.allureseverities) if arg_labels and not item_labels & arg_labels: pytest.skip('Not", "__init__(self): self._allurelistener = None # FIXME: this gets injected elsewhere,", "'', self.environment, self.test))) @pytest.mark.hookwrapper def pytest_runtest_makereport(self, item, call): \"\"\" Decides", "a given testcases. \"\"\" return self.label(Label.TESTCASE, *testcases) def step(self, title):", "called for testitem -- report our status here if not", "elif status in SKIPPED_STATUSES: skip_message = type(report.longrepr) == tuple and", "get_listener(self): return self._allurelistener def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment", "# module's nodeid => TestSuite object self.suites = {} def", "assert False @pytest.allure.step('make test data') def make_test_data_bar(): raise ValueError('No data", "record steps only when that # FIXME: this breaks encapsulation", "contextmanager/decorator for steps. TODO: when moving to python 3, rework", "report.failed: self._fill_case(report, call, status, Status.FAILED) # FIXME: this is here", "import pytest import argparse from collections import namedtuple from six", "\"\"\" Provides fancy shortcuts for severity:: # these are the", "to minimize ze impact self.report_case(item, report) elif report.skipped: if hasattr(report,", "not exist)\") severities = [v for (_, v) in all_of(Severity)]", "failed, add it to the report. if not item.get_marker(\"unreported\") or", "contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()] self.test.stop = now()", "cases in suite.testcases we leave LAST with the same ID", "self._fill_case(report, call, status, Status.PENDING) else: self._fill_case(report, call, status, Status.CANCELED) elif", "namedtuple('CollectFail', 'name status message trace') class AllureCollectionListener(object): \"\"\" Listens to", "\"\"\" return self.label(Label.ISSUE, *issues) def dynamic_issue(self, *issues): \"\"\" Mark test", "*value): \"\"\" A decorator factory that returns ``pytest.mark`` for a", "`call` is called only if `setup` passes. See :py:func:`_pytest.runner.runtestprotocol` for", "def __init__(self, allure_helper, title): self.allure_helper = allure_helper self.title = title", "of label type (for future TestLabel things) :param legal_values: a", "report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call, status, Status.PENDING) else: self._fill_case(report,", "self.step = None @property def allure(self): listener = self.allure_helper.get_listener() #", "self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc, tests=[], labels=[], start=testcase.start, # first case starts", "TestStep, Attach, TestSuite, Failure, TestLabel def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\",", "skipped and xfailed SKIPPED if skipped and not xfailed \"\"\"", "given ``name``, pushes it to the ``self.stack`` and returns the", "AllureHelper(object): \"\"\" This object holds various utility methods used from", "title.__name__)(title) else: return LazyInitStepContext(self, title) def single_step(self, text): \"\"\" Writes", "allure_helper, title): self.allure_helper = allure_helper self.title = title self.step =", "'_allure_result'): module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result) report._allure_result =", "\"\"\" Listens to pytest hooks to generate reports for common", "failed to collect.') for fail in self.fails: self.impl.start_case(name=fail.name.split(\".\")[-1]) self.impl.stop_case(status=fail.status, message=fail.message,", "issue to xdist if self._magicaldoublereport: # to minimize ze impact", "status = Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None, None, report), trace=report.longrepr)) def", "{}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ',", "with ``contextlib.ContextDecorator``. Usage examples:: import pytest def test_foo(): with pytest.allure.step('mystep'):", "has no `function` attribute description = item.reportinfo()[2] self.test = TestCase(name='.'.join(mangle_testnames([x.name", "`AllureTestListener` to the FS, fixing it fields :param attachment: a", "= {} def pytest_sessionfinish(self): \"\"\" We are done and have", "to the test report thing. The per-test reports are handled", "trim_msg_len = 89 short_message = skip_message.split('\\n')[0][:trim_msg_len] # FIXME: see pytest.runner.pytest_runtest_makereport", "then gets broken -- to cope with the xdist's -x", "config.option.allurereportdir if reportdir: # we actually record something allure_impl =", "(_, v) in all_of(Severity)] def label_type(name, legal_values=set()): \"\"\" argparse-type factory", "in FAILED_STATUSES: self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report), trace=report.longrepr or hasattr(report,", "testcase(self, *testcases): \"\"\" A decorator factory that returns ``pytest.mark`` for", "case \"\"\" if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues])", "# record steps only when that # FIXME: this breaks", "reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook. \"\"\"", "listener. Is responsible for recording in-test data and for attaching", "a better, more efficient way \"\"\" for s in self.suites.values():", "# we attach a four-tuple: (test module ID, test module", "', '.join(text_type(l) for l in sorted(arg_labels))) class LazyInitStepContext(StepContext): \"\"\" This", "python 3, rework this with ``contextlib.ContextDecorator``. Usage examples:: import pytest", "report.passed: self._fill_case(report, call, status, Status.PASSED) elif report.failed: self._fill_case(report, call, status,", "test # record steps only when that # FIXME: this", "self._allurelistener: self._allurelistener.dynamic_issue(*issues) def description(self, description): \"\"\" Sets description for the", "report.longrepr or short_message != skip_message and skip_message or '') def", "\"\"\" if self._allurelistener: self._allurelistener.description(description) def testcase(self, *testcases): \"\"\" A decorator", "in suite.testcases we leave LAST with the same ID --", "self.test.status in FAILED_STATUSES: self.report_case(item, report) def pytest_runtest_setup(item): item_labels = set((l.name,", "send EXACTLY ONE test report (it wont break, but it", "OK but failed at teardown => broken self._fill_case(report, call, status,", "self.label(Label.SEVERITY, severity) def feature(self, *features): \"\"\" A decorator factory that", "not xfailed \"\"\" report = (yield).get_result() status = self.config.hook.pytest_report_teststatus(report=report) status", "if not item.get_marker(\"unreported\") or self.test.status in FAILED_STATUSES: self.report_case(item, report) def", "not item_labels & arg_labels: pytest.skip('Not suitable with selected labels: %s.'", "if `legal_values` are given and there are values that fall", "the pytest_configure def get_listener(self): return self._allurelistener def attach(self, name, contents,", "of feature names. Run tests that have at least one", "(test module ID, test module name, test module doc, environment,", "with the same ID -- becase logreport can be sent", "= [] def attach(self, title, contents, attach_type): \"\"\" Store attachment", "used from ``pytest.allure`` namespace, like ``pytest.allure.attach`` \"\"\" def __init__(self): self._allurelistener", "= set(string.split(',')) if legal_values and not atoms < legal_values: raise", "one call <--- when this finishes the main code has", "story labels.\"\"\") def pytest_configure(config): reportdir = config.option.allurereportdir if reportdir: #", "Status.CANCELED) elif report.when == 'setup': # setup / teardown if", "makereport hook. # it is here to cope with xdist's", "are modules that failed to collect.') for fail in self.fails:", "BROKEN when either setup OR teardown are broken (and call", "only with these severities will be run. Possible values are:%s.\"\"\"", "def __init__(self): self._allurelistener = None # FIXME: this gets injected", "and not xfailed \"\"\" report = (yield).get_result() status = self.config.hook.pytest_report_teststatus(report=report)", "failure # still, that's no big deal -- test has", "FIXME: this breaks encapsulation a lot if hasattr(listener, 'stack'): return", "not atoms < legal_values: raise argparse.ArgumentTypeError('Illegal {} values: {}, only", "generate reports for common tests. \"\"\" def __init__(self, impl, config):", "fields :param attachment: a :py:class:`allure.structure.Attach` object \"\"\" # OMG, that", "side\" (AllureAggregatingListener) expects us to send EXACTLY ONE test report", "do it in a better, more efficient way \"\"\" for", "report.failed: self._fill_case(report, call, status, Status.BROKEN) elif report.skipped: if hasattr(report, 'wasxfail'):", "= title self.step = None @property def allure(self): listener =", "test_baz(): assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture') def steppy_fixture(): return 1", "becase logreport can be sent MORE THAN ONE TIME (namely,", "description=description, start=now(), attachments=[], labels=labels_of(item), status=None, steps=[], id=str(uuid.uuid4())) # for later", "for decorated steps. It provides a possibility to create step", "collect. \"\"\" def __init__(self, impl): self.impl = impl self.fails =", "self._fill_case(report, call, status, Status.CANCELED) elif report.when == 'teardown': # as", "action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list of feature names.", ":param name: of label type (for future TestLabel things) :param", "important info) `setup` and `teardown` are always called, but `call`", "self._allurelistener = None # FIXME: this gets injected elsewhere, like", "called, but `call` is called only if `setup` passes. See", "to decide exact moment when we call `_stop_case` to do", "SKIPPED if skipped and not xfailed \"\"\" report = (yield).get_result()", "in dir(Severity) and not attr.startswith('_'): return self.severity(getattr(Severity, attr)) else: raise", "hard to decide exact moment when we call `_stop_case` to", "@pytest.allure.step def make_test_data_baz(): raise ValueError('No data today') def test_baz(): assert", "See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas. The \"other side\" (AllureAggregatingListener)", "attach a four-tuple: (test module ID, test module name, test", "LazyInitStepContext(StepContext): \"\"\" This is a step context used for decorated", "\"\"\" A decorator factory that returns ``pytest.mark`` for a given", "my... title=title, type=attach_type) self.stack[-1].attachments.append(attach) def dynamic_issue(self, *issues): \"\"\" Attaches ``issues``", "import Status, AttachmentType, Severity, \\ FAILED_STATUSES, Label, SKIPPED_STATUSES from allure.utils", "raise ValueError('No data today') def test_baz(): assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test", "data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for a in testcase.iter_attachments():", "AllureCollectionListener(object): \"\"\" Listens to pytest collection-related hooks to generate reports", "tests that have at least one of the specified feature", "% ', '.join(text_type(l) for l in sorted(arg_labels))) class LazyInitStepContext(StepContext): \"\"\"", "starts the suite! stop=None)).tests.append(testcase) CollectFail = namedtuple('CollectFail', 'name status message", "# FIXME: this gets injected elsewhere, like in the pytest_configure", "to do that. This method may benefit from FSM (we", "\"\"\" if self._allurelistener: self._allurelistener.attach(name, contents, type) def label(self, name, *value):", "later re-save those, oh my... title=title, type=attach_type) self.stack[-1].attachments.append(attach) def dynamic_issue(self,", "call, status, Status.CANCELED) elif report.when == 'setup': # setup /", "down fixtures (that still possess important info) `setup` and `teardown`", "Label, SKIPPED_STATUSES from allure.utils import parent_module, parent_down_from_module, labels_of, \\ all_of,", "ideas. The \"other side\" (AllureAggregatingListener) expects us to send EXACTLY", "collection-related hooks to generate reports for modules that failed to", "parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list of story", "status, Status.BROKEN) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call, status,", "pytest_sessionfinish(self): \"\"\" Creates a testsuite with collection failures if there", "xdist-master node do all the important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl))", "tuples (name, value). :param name: of label type (for future", "self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper", "no `function` attribute description = item.reportinfo()[2] self.test = TestCase(name='.'.join(mangle_testnames([x.name for", "given and there are values that fall out of that", "self._allurelistener: with self.step(text): pass def environment(self, **env_dict): if self._allurelistener: self._allurelistener.environment.update(env_dict)", "\"\"\" Finalizes with important data :param report: py.test's `TestReport` :param", "severity:: # these are the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if", "description = item.reportinfo()[2] self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])),", "decorator factory that returns ``pytest.mark`` for a given allure ``level``.", "are inside a test # record steps only when that", "issues]) def description(self, description): \"\"\" Sets description for the test", "*testcases): \"\"\" A decorator factory that returns ``pytest.mark`` for a", "and there are values that fall out of that \"\"\"", "THE TEARDOWN test log. Meh, i should file an issue", "report.when == 'call': if report.passed: self._fill_case(report, call, status, Status.PASSED) elif", "labels_of(item)) # see label_type arg_labels = set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities) if", "testcases. \"\"\" return self.label(Label.TESTCASE, *testcases) def step(self, title): \"\"\" A", "*issues): \"\"\" A decorator factory that returns ``pytest.mark`` for a", "of tuples (name, value). :param name: of label type (for", "the failed/xfailed/xpassed thing :param status: a :py:class:`allure.constants.Status` entry \"\"\" [self.attach(name,", "in labels_of(item)) # see label_type arg_labels = set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities)", "`setup` passes. See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas. The \"other", "message=get_exception_message(None, None, report), trace=report.longrepr)) def pytest_sessionfinish(self): \"\"\" Creates a testsuite", "pytest_configure def get_listener(self): return self._allurelistener def attach(self, name, contents, type=AttachmentType.TEXT):", "if self._allurelistener: self._allurelistener.dynamic_issue(*issues) def description(self, description): \"\"\" Sets description for", "\"\"\" Starts an new :py:class:`allure.structure.TestStep` with given ``name``, pushes it", "oh my... title=title, type=attach_type) self.stack[-1].attachments.append(attach) def dynamic_issue(self, *issues): \"\"\" Attaches", "ONE TIME (namely, if the test fails and then gets", "\"\"\" return self.label(Label.STORY, *stories) def issue(self, *issues): \"\"\" A decorator", "field to manifest their identity. Of all the test cases", "returns the step. \"\"\" step = TestStep(name=name, title=name, start=now(), attachments=[],", "test isn't marked as \"unreported\" or it has failed, add", "for a given issues. \"\"\" return self.label(Label.ISSUE, *issues) def dynamic_issue(self,", "(naturally) three times -- with report.when being: setup <--- fixtures", "if the test fails and then gets broken -- to", "given stories. \"\"\" return self.label(Label.STORY, *stories) def issue(self, *issues): \"\"\"", "= namedtuple('CollectFail', 'name status message trace') class AllureCollectionListener(object): \"\"\" Listens", "future TestLabel things) :param legal_values: a `set` of values that", "if `setup` passes. See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas. The", "see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue(\"maxfail\")", "# so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment)", "if report.when == 'call': if report.passed: self._fill_case(report, call, status, Status.PASSED)", "see pytest.runner.pytest_runtest_makereport self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) >", "Listens to pytest hooks to generate reports for common tests.", "elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call, status, Status.PENDING) else:", "TIME (namely, if the test fails and then gets broken", "def environment(self, **env_dict): if self._allurelistener: self._allurelistener.environment.update(env_dict) @property def attach_type(self): return", "impl self.fails = [] def pytest_collectreport(self, report): if not report.passed:", "if hasattr(report, '_allure_result'): module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result)", "call, status, Status.PENDING) else: self._fill_case(report, call, status, Status.CANCELED) elif report.when", "FIXME: this is here only to work around xdist's stupid", "with self.step(text): pass def environment(self, **env_dict): if self._allurelistener: self._allurelistener.environment.update(env_dict) @property", "object in current state for later actual write in the", "AttachmentType, Severity, \\ FAILED_STATUSES, Label, SKIPPED_STATUSES from allure.utils import parent_module,", "if test was OK but failed at teardown => broken", "self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues]) def description(self, description):", "`pytest_runtest_logreport` hook. \"\"\" def __init__(self, config): self.config = config self.environment", "allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name)) return allure_label(*value) def", "= max(case.stop for case in s.tests) known_ids = set() refined_tests", "self.impl = impl self.fails = [] def pytest_collectreport(self, report): if", "stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object): \"\"\" Per-test listener. Is", "if arg_labels and not item_labels & arg_labels: pytest.skip('Not suitable with", "Decides when to actually report things. pytest runs this (naturally)", "= AllureImpl(reportdir) testlistener = AllureTestListener(config) pytest.allure._allurelistener = testlistener config.pluginmanager.register(testlistener) if", "status, Status.CANCELED) elif report.when == 'setup': # setup / teardown", "collection failures if there were any. \"\"\" if self.fails: self.impl.start_suite(name='test_collection_phase',", "legal_values and not atoms < legal_values: raise argparse.ArgumentTypeError('Illegal {} values:", "= self.config.hook.pytest_report_teststatus(report=report) status = status and status[0] if report.when ==", "for a given features. \"\"\" return self.label(Label.FEATURE, *features) def story(self,", "pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if attr in dir(Severity) and not attr.startswith('_'):", "feature(self, *features): \"\"\" A decorator factory that returns ``pytest.mark`` for", "type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated list of severity names. Tests only with", "duplicate cases in the report -- which is bad. So", "xdist if self._magicaldoublereport: # to minimize ze impact self.report_case(item, report)", "set(string.split(',')) if legal_values and not atoms < legal_values: raise argparse.ArgumentTypeError('Illegal", "all_of, get_exception_message, now, mangle_testnames from allure.structure import TestCase, TestStep, Attach,", "`function` attribute description = item.reportinfo()[2] self.test = TestCase(name='.'.join(mangle_testnames([x.name for x", "*issues): \"\"\" Attaches ``issues`` to the current active case \"\"\"", "to the current active case \"\"\" if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue)", "self.test.description = description def start_step(self, name): \"\"\" Starts an new", "an new :py:class:`allure.structure.TestStep` with given ``name``, pushes it to the", "with important data :param report: py.test's `TestReport` :param call: py.test's", "anything) PENDING if skipped and xfailed SKIPPED if skipped and", "``name``, pushes it to the ``self.stack`` and returns the step.", "= None # so actual pickled data is garbage-collected, see", "is set of tuples (name, value). :param name: of label", "[] def attach(self, title, contents, attach_type): \"\"\" Store attachment object", "and have all the results in `self.suites` Lets write em", "the makereport hook. # it is here to cope with", "arg_labels: pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l)", "[self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()] self.test.stop =", "teardown if report.failed: self._fill_case(report, call, status, Status.BROKEN) elif report.skipped: if", "parent.module.__doc__ or '', self.environment, self.test))) @pytest.mark.hookwrapper def pytest_runtest_makereport(self, item, call):", "self._allurelistener.description(description) def testcase(self, *testcases): \"\"\" A decorator factory that returns", "status: a :py:class:`allure.constants.Status` entry \"\"\" [self.attach(name, contents, AttachmentType.TEXT) for (name,", "but failed at teardown => broken self._fill_case(report, call, status, Status.BROKEN)", "used for decorated steps. It provides a possibility to create", "with the xdist's -x behavior we have to have tests", "listener has `stack` we are inside a test # record", "x in parent_down_from_module(item)])), description=description, start=now(), attachments=[], labels=labels_of(item), status=None, steps=[], id=str(uuid.uuid4()))", "if t.id not in known_ids: known_ids.add(t.id) refined_tests.append(t) s.tests = refined_tests[::-1]", "def start_step(self, name): \"\"\" Starts an new :py:class:`allure.structure.TestStep` with given", "attachments=[], steps=[]) self.stack[-1].steps.append(step) self.stack.append(step) return step def stop_step(self): \"\"\" Stops", "# it is here to cope with xdist's begavior regarding", "of severity names. Tests only with these severities will be", "``issues`` to the current active case \"\"\" if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE,", "\"\"\" return self.label(Label.FEATURE, *features) def story(self, *stories): \"\"\" A decorator", "report._allure_result = None # so actual pickled data is garbage-collected,", "begavior regarding -x. # see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport =", "cases in the report -- which is bad. So we", "None @property def allure(self): listener = self.allure_helper.get_listener() # if listener", "\"\"\" if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues]) def", "environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__ or '', self.environment, self.test))) @pytest.mark.hookwrapper", "decorated steps. It provides a possibility to create step decorators,", "the report. if not item.get_marker(\"unreported\") or self.test.status in FAILED_STATUSES: self.report_case(item,", "initialized in this one call <--- when this finishes the", "(we keep track of what has already happened via self.test.status)", "def pytest_runtest_logreport(self, report): if hasattr(report, '_allure_result'): module_id, module_name, module_doc, environment,", "for a given allure ``level``. \"\"\" return self.label(Label.SEVERITY, severity) def", "pytest def test_foo(): with pytest.allure.step('mystep'): assert False @pytest.allure.step('make test data')", "breaks encapsulation a lot if hasattr(listener, 'stack'): return listener class", "start=now(), attachments=[], labels=labels_of(item), status=None, steps=[], id=str(uuid.uuid4())) # for later resolution", "def write_attach(self, attachment): \"\"\" Writes attachment object from the `AllureTestListener`", "# we later re-save those, oh my... title=title, type=attach_type) self.stack[-1].attachments.append(attach)", "stop_step(self): \"\"\" Stops the step at the top of ``self.stack``", "if report.passed: self._fill_case(report, call, status, Status.PASSED) elif report.failed: self._fill_case(report, call,", "CALL failures) TODO: do it in a better, more efficient", "logreport can be sent MORE THAN ONE TIME (namely, if", "bad attachment.source = self.impl._save_attach(attachment.source, attachment.type) attachment.type = attachment.type.mime_type def pytest_runtest_logreport(self,", "def test_bar(): assert make_test_data_bar() @pytest.allure.step def make_test_data_baz(): raise ValueError('No data", "\"\"\" return self.label(Label.TESTCASE, *testcases) def step(self, title): \"\"\" A contextmanager/decorator", "ValueError('No data today') def test_bar(): assert make_test_data_bar() @pytest.allure.step def make_test_data_baz():", "Status.BROKEN) else: # mark it broken so, well, someone has", "title): \"\"\" A contextmanager/decorator for steps. TODO: when moving to", "and report.longrepr or short_message != skip_message and skip_message or '')", "self.config = config self.environment = {} self.test = None #", "initiated before pytest_configure, when no AllureListener initiated yet. \"\"\" def", "least one of the specified story labels.\"\"\") def pytest_configure(config): reportdir", "text): \"\"\" Writes single line to report. \"\"\" if self._allurelistener:", "AllureTestListener(config) pytest.allure._allurelistener = testlistener config.pluginmanager.register(testlistener) if not hasattr(config, 'slaveinput'): #", "item, nextitem): try: # for common items description = item.function.__doc__", "label(self, name, *value): \"\"\" A decorator factory that returns ``pytest.mark``", "severity_level(self): return Severity def __getattr__(self, attr): \"\"\" Provides fancy shortcuts", "self._fill_case(report, call, status, Status.FAILED) # FIXME: this is here only", "in atoms) return a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY,", "class AllureAgregatingListener(object): \"\"\" Listens to pytest hooks to generate reports", "of the specified feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\", default={},", "pytest runs this (naturally) three times -- with report.when being:", "attachment object in current state for later actual write in", "None, report), trace=report.longrepr)) def pytest_sessionfinish(self): \"\"\" Creates a testsuite with", "return self.label(Label.TESTCASE, *testcases) def step(self, title): \"\"\" A contextmanager/decorator for", "parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list of feature", "value). :param name: of label type (for future TestLabel things)", "data today') def test_bar(): assert make_test_data_bar() @pytest.allure.step def make_test_data_baz(): raise", ":param pyteststatus: the failed/xfailed/xpassed thing :param status: a :py:class:`allure.constants.Status` entry", "add it to the report. if not item.get_marker(\"unreported\") or self.test.status", "(name, contents) in dict(report.sections).items()] self.test.stop = now() self.test.status = status", "It provides a possibility to create step decorators, being initiated", "= now() self.test.status = status if status in FAILED_STATUSES: self.test.failure", "for common tests. \"\"\" def __init__(self, impl, config): self.impl =", "`legal_values` are given and there are values that fall out", "for later actual write in the `AllureAgregatingListener.write_attach` \"\"\" attach =", "this is here only to work around xdist's stupid -x", "to pytest hooks to generate reports for common tests. \"\"\"", "ValueError('No data today') def test_baz(): assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture')", "def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment \"\"\" Attaches ``contents``", "# OMG, that is bad attachment.source = self.impl._save_attach(attachment.source, attachment.type) attachment.type", "a given stories. \"\"\" return self.label(Label.STORY, *stories) def issue(self, *issues):", "self.impl = impl # module's nodeid => TestSuite object self.suites", "idea of teardown failure # still, that's no big deal", "& arg_labels: pytest.skip('Not suitable with selected labels: %s.' % ',", "examples:: import pytest def test_foo(): with pytest.allure.step('mystep'): assert False @pytest.allure.step('make", "parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\", default=None, help=\"Generate Allure report in the", "report.wasxfail) elif status in SKIPPED_STATUSES: skip_message = type(report.longrepr) == tuple", "or report.wasxfail trim_msg_len = 89 short_message = skip_message.split('\\n')[0][:trim_msg_len] # FIXME:", "def severity_level(self): return Severity def __getattr__(self, attr): \"\"\" Provides fancy", "for a given stories. \"\"\" return self.label(Label.STORY, *stories) def issue(self,", "help=\"Generate Allure report in the specified directory (may not exist)\")", "call, status, Status.CANCELED) elif report.when == 'teardown': # as teardown", "create step decorators, being initiated before pytest_configure, when no AllureListener", "tests. \"\"\" def __init__(self, impl, config): self.impl = impl #", "attribute description = item.reportinfo()[2] self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in", "handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook. \"\"\" def __init__(self,", "assert steppy_fixture \"\"\" if callable(title): return LazyInitStepContext(self, title.__name__)(title) else: return", "when that # FIXME: this breaks encapsulation a lot if", "description def start_step(self, name): \"\"\" Starts an new :py:class:`allure.structure.TestStep` with", "Status.PENDING) else: self._fill_case(report, call, status, Status.CANCELED) elif report.when == 'teardown':", "FAILED when call fails and others OK BROKEN when either", "given testcases. \"\"\" return self.label(Label.TESTCASE, *testcases) def step(self, title): \"\"\"", "in s.tests[::-1]: if t.id not in known_ids: known_ids.add(t.id) refined_tests.append(t) s.tests", "for t in s.tests[::-1]: if t.id not in known_ids: known_ids.add(t.id)", "= None self.stack = [] def attach(self, title, contents, attach_type):", "raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ',", "that returns ``pytest.mark`` for a given label. \"\"\" allure_label =", "important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object): \"\"\" Per-test listener.", "trace=status == Status.PENDING and report.longrepr or short_message != skip_message and", "!= skip_message and skip_message or '') def report_case(self, item, report):", "\"\"\" A contextmanager/decorator for steps. TODO: when moving to python", "to the `report` in a `AllureAggegatingListener`-understood way \"\"\" parent =", "\"\"\" Store attachment object in current state for later actual", "test_bar(): assert make_test_data_bar() @pytest.allure.step def make_test_data_baz(): raise ValueError('No data today')", "AllureListener initiated yet. \"\"\" def __init__(self, allure_helper, title): self.allure_helper =", "which is bad. So we work hard to decide exact", "contents, type) def label(self, name, *value): \"\"\" A decorator factory", "for v in atoms) return a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\",", "= item.reportinfo()[2] self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])), description=description,", "return LazyInitStepContext(self, title) def single_step(self, text): \"\"\" Writes single line", "shortcuts for severity:: # these are the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL)", "attachment.source = self.impl._save_attach(attachment.source, attachment.type) attachment.type = attachment.type.mime_type def pytest_runtest_logreport(self, report):", "config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object): \"\"\" Per-test listener. Is responsible for", "import uuid import pickle import pytest import argparse from collections", "a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated list", "test report (it wont break, but it will duplicate cases", "nobody likes empty suites s.stop = max(case.stop for case in", "report thing. The per-test reports are handled by `AllureAgregatingListener` at", "@pytest.fixture() @pytest.allure.step('test fixture') def steppy_fixture(): return 1 def test_baz(steppy_fixture): assert", "`_stop_case` to do that. This method may benefit from FSM", "one of the specified story labels.\"\"\") def pytest_configure(config): reportdir =", "TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``.", "that returns ``pytest.mark`` for a given issues. \"\"\" return self.label(Label.ISSUE,", "see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for a in testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name,", "report.passed: if self.test.status not in FAILED_STATUSES: # if test was", "labelish things. processed value is set of tuples (name, value).", "module ID, test module name, test module doc, environment, TestCase)", "suite.testcases we leave LAST with the same ID -- becase", "-x behavior we have to have tests even at CALL", "test cases in suite.testcases we leave LAST with the same", "around xdist's stupid -x thing when in exits BEFORE THE", "come from AllureTestListener -- and the have ._id field to", "[] def pytest_collectreport(self, report): if not report.passed: if report.failed: status", "testlistener = AllureTestListener(config) pytest.allure._allurelistener = testlistener config.pluginmanager.register(testlistener) if not hasattr(config,", "call): \"\"\" Decides when to actually report things. pytest runs", "it in a better, more efficient way \"\"\" for s", "\"\"\" def __init__(self, config): self.config = config self.environment = {}", "report. \"\"\" if self._allurelistener: with self.step(text): pass def environment(self, **env_dict):", "self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def pytest_runtest_protocol(self, item, nextitem): try: # for common", "if self._allurelistener: self._allurelistener.environment.update(env_dict) @property def attach_type(self): return AttachmentType @property def", "module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result) report._allure_result = None", "a given allure ``level``. \"\"\" return self.label(Label.SEVERITY, severity) def feature(self,", "pytest collection-related hooks to generate reports for modules that failed", "hasattr(self.config, 'slaveinput') and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def pytest_runtest_protocol(self, item, nextitem): try:", "various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach`` \"\"\"", "``pytest.mark`` for a given allure ``level``. \"\"\" return self.label(Label.SEVERITY, severity)", "lot if hasattr(listener, 'stack'): return listener class AllureHelper(object): \"\"\" This", "def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\", default=None, help=\"Generate Allure report", "has finished teardown <--- tears down fixtures (that still possess", "current active case \"\"\" if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue", "with given ``name`` and ``type``. \"\"\" if self._allurelistener: self._allurelistener.attach(name, contents,", "for this label, if any limit whatsoever :raises ArgumentTypeError: if", "try: # for common items description = item.function.__doc__ except AttributeError:", "def steppy_fixture(): return 1 def test_baz(steppy_fixture): assert steppy_fixture \"\"\" if", "``pytest.mark`` for a given stories. \"\"\" return self.label(Label.STORY, *stories) def", "FAILED_STATUSES: self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report), trace=report.longrepr or hasattr(report, 'wasxfail')", "values that fall out of that \"\"\" def a_label_type(string): atoms", "parent_module, parent_down_from_module, labels_of, \\ all_of, get_exception_message, now, mangle_testnames from allure.structure", "def test_baz(): assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture') def steppy_fixture(): return", "\"\"\" [self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()] self.test.stop", "title self.step = None @property def allure(self): listener = self.allure_helper.get_listener()", "are:%s.\"\"\" % ', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE),", "t.id not in known_ids: known_ids.add(t.id) refined_tests.append(t) s.tests = refined_tests[::-1] with", "# if test was OK but failed at teardown =>", "`self.test` to the `report` in a `AllureAggegatingListener`-understood way \"\"\" parent", "we have to have tests even at CALL failures) TODO:", "s.stop = max(case.stop for case in s.tests) known_ids = set()", "not item.get_marker(\"unreported\") or self.test.status in FAILED_STATUSES: self.report_case(item, report) def pytest_runtest_setup(item):", "# still, that's no big deal -- test has already", "being initiated before pytest_configure, when no AllureListener initiated yet. \"\"\"", "\"\"\" This object holds various utility methods used from ``pytest.allure``", "what has already happened via self.test.status) Expected behavior is: FAILED", "stop=None)).tests.append(testcase) CollectFail = namedtuple('CollectFail', 'name status message trace') class AllureCollectionListener(object):", "def single_step(self, text): \"\"\" Writes single line to report. \"\"\"", "if report.failed: self._fill_case(report, call, status, Status.BROKEN) elif report.skipped: if hasattr(report,", "marked as \"unreported\" or it has failed, add it to", "moving to python 3, rework this with ``contextlib.ContextDecorator``. Usage examples::", "allure.common import AllureImpl, StepContext from allure.constants import Status, AttachmentType, Severity,", "dest=\"allurereportdir\", metavar=\"DIR\", default=None, help=\"Generate Allure report in the specified directory", "self.stack = [] def attach(self, title, contents, attach_type): \"\"\" Store", "step(self, title): \"\"\" A contextmanager/decorator for steps. TODO: when moving", "exits BEFORE THE TEARDOWN test log. Meh, i should file", "FSM (we keep track of what has already happened via", "name, test module doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__ or", "that once again self.test.status = Status.BROKEN # if a test", "make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture') def steppy_fixture(): return 1 def test_baz(steppy_fixture):", "class AllureHelper(object): \"\"\" This object holds various utility methods used", "to cope with xdist's begavior regarding -x. # see self.pytest_runtest_makereport", "at the top of ``self.stack`` \"\"\" step = self.stack.pop() step.stop", "resolution in AllureAgregatingListener.pytest_sessionfinish self.stack = [self.test] yield self.test = None", "This is a step context used for decorated steps. It", "so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98 self.impl.environment.update(environment) for", "def pytest_configure(config): reportdir = config.option.allurereportdir if reportdir: # we actually", "TestStep(name=name, title=name, start=now(), attachments=[], steps=[]) self.stack[-1].steps.append(step) self.stack.append(step) return step def", "atoms) return a_label_type parser.getgroup(\"general\").addoption('--allure_severities', action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)),", "dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated list of severity names.", "later resolution in AllureAgregatingListener.pytest_sessionfinish self.stack = [self.test] yield self.test =", "list of severity names. Tests only with these severities will", "# if a test isn't marked as \"unreported\" or it", "# first case starts the suite! stop=None)).tests.append(testcase) CollectFail = namedtuple('CollectFail',", "names. Run tests that have at least one of the", "%s.' % ', '.join(text_type(l) for l in sorted(arg_labels))) class LazyInitStepContext(StepContext):", "status = self.config.hook.pytest_report_teststatus(report=report) status = status and status[0] if report.when", "yet. \"\"\" def __init__(self, allure_helper, title): self.allure_helper = allure_helper self.title", "self.suites = {} def pytest_sessionfinish(self): \"\"\" We are done and", "labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels))) class", "for case in s.tests) known_ids = set() refined_tests = []", "report.passed: if report.failed: status = Status.BROKEN else: status = Status.CANCELED", "\"\"\" attach = Attach(source=contents, # we later re-save those, oh", "= type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail trim_msg_len =", "decorator factory that returns ``pytest.mark`` for a given issues. \"\"\"", "report = (yield).get_result() status = self.config.hook.pytest_report_teststatus(report=report) status = status and", "will be run. Possible values are:%s.\"\"\" % ', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features',", "always called for testitem -- report our status here if", "`AllureAggegatingListener`-understood way \"\"\" parent = parent_module(item) # we attach a", "something allure_impl = AllureImpl(reportdir) testlistener = AllureTestListener(config) pytest.allure._allurelistener = testlistener", "steppy_fixture(): return 1 def test_baz(steppy_fixture): assert steppy_fixture \"\"\" if callable(title):", "TestSuite(name=module_name, description=module_doc, tests=[], labels=[], start=testcase.start, # first case starts the", "status message trace') class AllureCollectionListener(object): \"\"\" Listens to pytest collection-related", "Status.PENDING and report.longrepr or short_message != skip_message and skip_message or", "modules that failed to collect. \"\"\" def __init__(self, impl): self.impl", "<--- when this finishes the main code has finished teardown", "assert make_test_data_bar() @pytest.allure.step def make_test_data_baz(): raise ValueError('No data today') def", "the specified feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY),", "status = Status.BROKEN else: status = Status.CANCELED self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None,", "legal_values=set()): \"\"\" argparse-type factory for labelish things. processed value is", "least one of the specified feature labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\",", "for common items description = item.function.__doc__ except AttributeError: # for", "not in known_ids: known_ids.add(t.id) refined_tests.append(t) s.tests = refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml'", "self._allurelistener: self._allurelistener.attach(name, contents, type) def label(self, name, *value): \"\"\" A", "all_of(Severity)] def label_type(name, legal_values=set()): \"\"\" argparse-type factory for labelish things.", "is: FAILED when call fails and others OK BROKEN when", "empty suites s.stop = max(case.stop for case in s.tests) known_ids", "that failed to collect. \"\"\" def __init__(self, impl): self.impl =", "start_step(self, name): \"\"\" Starts an new :py:class:`allure.structure.TestStep` with given ``name``,", "a :py:class:`allure.constants.Status` entry \"\"\" [self.attach(name, contents, AttachmentType.TEXT) for (name, contents)", "__init__(self, config): self.config = config self.environment = {} self.test =", "AllureAgregatingListener(object): \"\"\" Listens to pytest hooks to generate reports for", "were any. \"\"\" if self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection phase', description='This is", "for a given testcases. \"\"\" return self.label(Label.TESTCASE, *testcases) def step(self,", "= TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])), description=description, start=now(), attachments=[], labels=labels_of(item),", "# if listener has `stack` we are inside a test", "deal -- test has already failed # TODO: think about", "this label, if any limit whatsoever :raises ArgumentTypeError: if `legal_values`", "= self.impl._save_attach(attachment.source, attachment.type) attachment.type = attachment.type.mime_type def pytest_runtest_logreport(self, report): if", "test ``issues`` from inside. \"\"\" if self._allurelistener: self._allurelistener.dynamic_issue(*issues) def description(self,", "failed/xfailed/xpassed thing :param status: a :py:class:`allure.constants.Status` entry \"\"\" [self.attach(name, contents,", "info) `setup` and `teardown` are always called, but `call` is", "decorator factory that returns ``pytest.mark`` for a given features. \"\"\"", "for issue in issues]) def description(self, description): \"\"\" Sets description", "new :py:class:`allure.structure.TestStep` with given ``name``, pushes it to the ``self.stack``", "> trim_msg_len)), trace=status == Status.PENDING and report.longrepr or short_message !=", "from allure.common import AllureImpl, StepContext from allure.constants import Status, AttachmentType,", "def pytest_runtest_setup(item): item_labels = set((l.name, l.value) for l in labels_of(item))", "and the have ._id field to manifest their identity. Of", "metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated list of severity names. Tests", "four-tuple: (test module ID, test module name, test module doc,", "self.step(text): pass def environment(self, **env_dict): if self._allurelistener: self._allurelistener.environment.update(env_dict) @property def", "a :py:class:`allure.structure.Attach` object \"\"\" # OMG, that is bad attachment.source", "ID -- becase logreport can be sent MORE THAN ONE", "actual write in the `AllureAgregatingListener.write_attach` \"\"\" attach = Attach(source=contents, #", "return 1 def test_baz(steppy_fixture): assert steppy_fixture \"\"\" if callable(title): return", "any limit whatsoever :raises ArgumentTypeError: if `legal_values` are given and", "attach_type(self): return AttachmentType @property def severity_level(self): return Severity def __getattr__(self,", "to actually report things. pytest runs this (naturally) three times", "getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name)) return allure_label(*value) def severity(self, severity):", "return AttachmentType @property def severity_level(self): return Severity def __getattr__(self, attr):", "inside a test # record steps only when that #", "rework this with ``contextlib.ContextDecorator``. Usage examples:: import pytest def test_foo():", "status = status and status[0] if report.when == 'call': if", "exist)\") severities = [v for (_, v) in all_of(Severity)] def", "to report. \"\"\" if self._allurelistener: with self.step(text): pass def environment(self,", "today') def test_bar(): assert make_test_data_bar() @pytest.allure.step def make_test_data_baz(): raise ValueError('No", "l in labels_of(item)) # see label_type arg_labels = set().union(item.config.option.allurefeatures, item.config.option.allurestories,", "'stack'): return listener class AllureHelper(object): \"\"\" This object holds various", "= self.allure_helper.get_listener() # if listener has `stack` we are inside", "suitable with selected labels: %s.' % ', '.join(text_type(l) for l", "pytest import argparse from collections import namedtuple from six import", "was OK but failed at teardown => broken self._fill_case(report, call,", "= attachment.type.mime_type def pytest_runtest_logreport(self, report): if hasattr(report, '_allure_result'): module_id, module_name,", "item_labels & arg_labels: pytest.skip('Not suitable with selected labels: %s.' %", "pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l) for", "FAILED_STATUSES: self.report_case(item, report) def pytest_runtest_setup(item): item_labels = set((l.name, l.value) for", "factory that returns ``pytest.mark`` for a given testcases. \"\"\" return", "in the report -- which is bad. So we work", "{} def pytest_sessionfinish(self): \"\"\" We are done and have all", "name: of label type (for future TestLabel things) :param legal_values:", "regarding -x. # see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport = hasattr(self.config,", "get_exception_message, now, mangle_testnames from allure.structure import TestCase, TestStep, Attach, TestSuite,", "report: py.test's `TestReport` :param call: py.test's `CallInfo` :param pyteststatus: the", "parent_module(item) # we attach a four-tuple: (test module ID, test", "def label(self, name, *value): \"\"\" A decorator factory that returns", "returns ``pytest.mark`` for a given testcases. \"\"\" return self.label(Label.TESTCASE, *testcases)", "\"\"\" step = self.stack.pop() step.stop = now() def _fill_case(self, report,", "the suite! stop=None)).tests.append(testcase) CollectFail = namedtuple('CollectFail', 'name status message trace')", "report.when == 'teardown': # as teardown is always called for", "AllureHelper() def pytest_namespace(): return {'allure': MASTER_HELPER} class AllureAgregatingListener(object): \"\"\" Listens", "', '.join(legal_values))) return set((name, v) for v in atoms) return", "trace') class AllureCollectionListener(object): \"\"\" Listens to pytest collection-related hooks to", "a given features. \"\"\" return self.label(Label.FEATURE, *features) def story(self, *stories):", "gets broken -- to cope with the xdist's -x behavior", "file an issue to xdist if self._magicaldoublereport: # to minimize", "returns ``pytest.mark`` for a given features. \"\"\" return self.label(Label.FEATURE, *features)", "impact self.report_case(item, report) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report, call,", "testlistener config.pluginmanager.register(testlistener) if not hasattr(config, 'slaveinput'): # on xdist-master node", "\"\"\" Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way", "self.impl._write_xml(f, s) self.impl.store_environment() def write_attach(self, attachment): \"\"\" Writes attachment object", "name, *value): \"\"\" A decorator factory that returns ``pytest.mark`` for", "import pickle import pytest import argparse from collections import namedtuple", "impl, config): self.impl = impl # module's nodeid => TestSuite", "dynamic_issue(self, *issues): \"\"\" Attaches ``issues`` to the current active case", "config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object): \"\"\" Per-test listener. Is responsible for recording", "self.label(Label.STORY, *stories) def issue(self, *issues): \"\"\" A decorator factory that", "setup / teardown if report.failed: self._fill_case(report, call, status, Status.BROKEN) elif", "holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach``", "contents) in dict(report.sections).items()] self.test.stop = now() self.test.status = status if", "labels.\"\"\") parser.getgroup(\"general\").addoption('--allure_stories', action=\"store\", dest=\"allurestories\", metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list of", "entry \"\"\" [self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()]", "TODO: do it in a better, more efficient way \"\"\"", "object \"\"\" # OMG, that is bad attachment.source = self.impl._save_attach(attachment.source,", "namespace, like ``pytest.allure.attach`` \"\"\" def __init__(self): self._allurelistener = None #", "for later resolution in AllureAgregatingListener.pytest_sessionfinish self.stack = [self.test] yield self.test", "collections import namedtuple from six import text_type from allure.common import", "legal_values: a `set` of values that are legal for this", "pickle import pytest import argparse from collections import namedtuple from", "test module doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__ or '',", "*stories) def issue(self, *issues): \"\"\" A decorator factory that returns", "* (len(skip_message) > trim_msg_len)), trace=status == Status.PENDING and report.longrepr or", "expects us to send EXACTLY ONE test report (it wont", "AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def pytest_runtest_protocol(self,", "def severity(self, severity): \"\"\" A decorator factory that returns ``pytest.mark``", "in exits BEFORE THE TEARDOWN test log. Meh, i should", "is the tests collection phase. Failures are modules that failed", "us to send EXACTLY ONE test report (it wont break,", "type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list of feature names. Run tests that have", "CollectFail = namedtuple('CollectFail', 'name status message trace') class AllureCollectionListener(object): \"\"\"", "self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report), trace=report.longrepr or hasattr(report, 'wasxfail') and", "the important stuff config.pluginmanager.register(AllureAgregatingListener(allure_impl, config)) config.pluginmanager.register(AllureCollectionListener(allure_impl)) class AllureTestListener(object): \"\"\" Per-test", "label_type(name, legal_values=set()): \"\"\" argparse-type factory for labelish things. processed value", "argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms", "steps. It provides a possibility to create step decorators, being", "is called only if `setup` passes. See :py:func:`_pytest.runner.runtestprotocol` for proofs", "\"\"\" allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name)) return allure_label(*value)", "the report -- which is bad. So we work hard", "to collect. \"\"\" def __init__(self, impl): self.impl = impl self.fails", "We are done and have all the results in `self.suites`", "the specified directory (may not exist)\") severities = [v for", "But first we kinda-unify the test cases. We expect cases", "test was OK but failed at teardown => broken self._fill_case(report,", "object holds various utility methods used from ``pytest.allure`` namespace, like", "results in `self.suites` Lets write em down. But first we", "# nobody likes empty suites s.stop = max(case.stop for case", "self.impl._save_attach(attachment.source, attachment.type) attachment.type = attachment.type.mime_type def pytest_runtest_logreport(self, report): if hasattr(report,", "allure ``level``. \"\"\" return self.label(Label.SEVERITY, severity) def feature(self, *features): \"\"\"", "if self.test: self.test.description = description def start_step(self, name): \"\"\" Starts", "passes. See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas. The \"other side\"", "= impl self.fails = [] def pytest_collectreport(self, report): if not", "A contextmanager/decorator for steps. TODO: when moving to python 3,", "a given issues. \"\"\" return self.label(Label.ISSUE, *issues) def dynamic_issue(self, *issues):", "in AllureAgregatingListener.pytest_sessionfinish self.stack = [self.test] yield self.test = None self.stack", "directory (may not exist)\") severities = [v for (_, v)", "attachment): \"\"\" Writes attachment object from the `AllureTestListener` to the", "and `teardown` are always called, but `call` is called only", "return self._allurelistener def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment \"\"\"", "to generate reports for modules that failed to collect. \"\"\"", "allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values))) return set((name, v)", "it will duplicate cases in the report -- which is", "list of story names. Run tests that have at least", "log. Meh, i should file an issue to xdist if", "listener = self.allure_helper.get_listener() # if listener has `stack` we are", "a `set` of values that are legal for this label,", "``pytest.mark`` for a given issues. \"\"\" return self.label(Label.ISSUE, *issues) def", "\"\"\" def __init__(self, impl): self.impl = impl self.fails = []", "but `call` is called only if `setup` passes. See :py:func:`_pytest.runner.runtestprotocol`", "return self.label(Label.SEVERITY, severity) def feature(self, *features): \"\"\" A decorator factory", "self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues]) def description(self, description): \"\"\"", "Failure(message=get_exception_message(call.excinfo, pyteststatus, report), trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail) elif", "actually record something allure_impl = AllureImpl(reportdir) testlistener = AllureTestListener(config) pytest.allure._allurelistener", "be anything) PENDING if skipped and xfailed SKIPPED if skipped", "*features) def story(self, *stories): \"\"\" A decorator factory that returns", "status=status, message=get_exception_message(None, None, report), trace=report.longrepr)) def pytest_sessionfinish(self): \"\"\" Creates a", "label_type arg_labels = set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities) if arg_labels and not", "\"\"\" def __init__(self): self._allurelistener = None # FIXME: this gets", "\"\"\" if self.test: self.test.description = description def start_step(self, name): \"\"\"", "def pytest_sessionfinish(self): \"\"\" Creates a testsuite with collection failures if", "self._fill_case(report, call, status, Status.PASSED) elif report.failed: self._fill_case(report, call, status, Status.FAILED)", "or hasattr(report, 'wasxfail') and report.wasxfail) elif status in SKIPPED_STATUSES: skip_message", "else: self._fill_case(report, call, status, Status.CANCELED) elif report.when == 'setup': #", "if callable(title): return LazyInitStepContext(self, title.__name__)(title) else: return LazyInitStepContext(self, title) def", "failed to collect. \"\"\" def __init__(self, impl): self.impl = impl", "the test cases. We expect cases to come from AllureTestListener", "Per-test listener. Is responsible for recording in-test data and for", "'%s.%s' % (Label.DEFAULT, name)) return allure_label(*value) def severity(self, severity): \"\"\"", "issue in issues]) def description(self, description): \"\"\" Sets description for", "for the test \"\"\" if self.test: self.test.description = description def", "allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel def pytest_addoption(parser):", "self.environment = {} self.test = None # FIXME: that flag", "makes us pre-report failures in the makereport hook. # it", "step. \"\"\" step = TestStep(name=name, title=name, start=now(), attachments=[], steps=[]) self.stack[-1].steps.append(step)", "actually report things. pytest runs this (naturally) three times --", "behavior is: FAILED when call fails and others OK BROKEN", "class AllureCollectionListener(object): \"\"\" Listens to pytest collection-related hooks to generate", "values that are legal for this label, if any limit", "description for the test \"\"\" if self.test: self.test.description = description", "issue(self, *issues): \"\"\" A decorator factory that returns ``pytest.mark`` for", "+ '...' * (len(skip_message) > trim_msg_len)), trace=status == Status.PENDING and", ":py:class:`allure.structure.TestStep` with given ``name``, pushes it to the ``self.stack`` and", "Attach, TestSuite, Failure, TestLabel def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\",", "known_ids.add(t.id) refined_tests.append(t) s.tests = refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as", "current state for later actual write in the `AllureAgregatingListener.write_attach` \"\"\"", "Finalizes with important data :param report: py.test's `TestReport` :param call:", "in all_of(Severity)] def label_type(name, legal_values=set()): \"\"\" argparse-type factory for labelish", "Run tests that have at least one of the specified", "self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection phase', description='This is the tests collection phase.", "of ``self.stack`` \"\"\" step = self.stack.pop() step.stop = now() def", "and returns the step. \"\"\" step = TestStep(name=name, title=name, start=now(),", "`setup` and `teardown` are always called, but `call` is called", "{} self.test = None # FIXME: that flag makes us", "minimize ze impact self.report_case(item, report) elif report.skipped: if hasattr(report, 'wasxfail'):", "Status.CANCELED) elif report.when == 'teardown': # as teardown is always", "return set((name, v) for v in atoms) return a_label_type parser.getgroup(\"general\").addoption('--allure_severities',", "broken self._fill_case(report, call, status, Status.BROKEN) else: # mark it broken", "gets injected elsewhere, like in the pytest_configure def get_listener(self): return", "test module name, test module doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__,", "item, report): \"\"\" Adds `self.test` to the `report` in a", "at the `pytest_runtest_logreport` hook. \"\"\" def __init__(self, config): self.config =", "if skipped and not xfailed \"\"\" report = (yield).get_result() status", "are values that fall out of that \"\"\" def a_label_type(string):", "possibility to create step decorators, being initiated before pytest_configure, when", "report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__ or '', self.environment, self.test))) @pytest.mark.hookwrapper def pytest_runtest_makereport(self,", "and status[0] if report.when == 'call': if report.passed: self._fill_case(report, call,", "if self.test.status not in FAILED_STATUSES: # if test was OK", "allure.constants import Status, AttachmentType, Severity, \\ FAILED_STATUSES, Label, SKIPPED_STATUSES from", "attr.startswith('_'): return self.severity(getattr(Severity, attr)) else: raise AttributeError MASTER_HELPER = AllureHelper()", "THAN ONE TIME (namely, if the test fails and then", "pytest_runtest_makereport(self, item, call): \"\"\" Decides when to actually report things.", "keep track of what has already happened via self.test.status) Expected", "that failed to collect.') for fail in self.fails: self.impl.start_case(name=fail.name.split(\".\")[-1]) self.impl.stop_case(status=fail.status,", "the `AllureTestListener` to the FS, fixing it fields :param attachment:", "it fields :param attachment: a :py:class:`allure.structure.Attach` object \"\"\" # OMG,", "write in the `AllureAgregatingListener.write_attach` \"\"\" attach = Attach(source=contents, # we", "self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])), description=description, start=now(), attachments=[],", "with these severities will be run. Possible values are:%s.\"\"\" %", "`teardown` are always called, but `call` is called only if", "this with ``contextlib.ContextDecorator``. Usage examples:: import pytest def test_foo(): with", "and AllureAgregatingListener.pytest_sessionfinish self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def", "\"\"\" Decides when to actually report things. pytest runs this", "else: self._fill_case(report, call, status, Status.CANCELED) elif report.when == 'teardown': #", "max(case.stop for case in s.tests) known_ids = set() refined_tests =", "tests collection phase. Failures are modules that failed to collect.')", "nodeid => TestSuite object self.suites = {} def pytest_sessionfinish(self): \"\"\"", "self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split(\"::\"))[-1], status=status, message=get_exception_message(None, None, report), trace=report.longrepr)) def pytest_sessionfinish(self): \"\"\" Creates", "allure_helper self.title = title self.step = None @property def allure(self):", "[self.test] yield self.test = None self.stack = [] def attach(self,", "argparse-type factory for labelish things. processed value is set of", "self.test.status) Expected behavior is: FAILED when call fails and others", "TestSuite object self.suites = {} def pytest_sessionfinish(self): \"\"\" We are", "# to minimize ze impact self.report_case(item, report) elif report.skipped: if", "a four-tuple: (test module ID, test module name, test module", "severity): \"\"\" A decorator factory that returns ``pytest.mark`` for a", "make_test_data_bar(): raise ValueError('No data today') def test_bar(): assert make_test_data_bar() @pytest.allure.step", "module doc, environment, TestCase) report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid, parent.module.__name__, parent.module.__doc__ or '', self.environment,", "self.test: self.test.description = description def start_step(self, name): \"\"\" Starts an", "object from the `AllureTestListener` to the FS, fixing it fields", "before pytest_configure, when no AllureListener initiated yet. \"\"\" def __init__(self,", ":raises ArgumentTypeError: if `legal_values` are given and there are values", "skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail trim_msg_len", "broken -- to cope with the xdist's -x behavior we", "TestSuite, Failure, TestLabel def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\", default=None,", "it is here to cope with xdist's begavior regarding -x.", "the step. \"\"\" step = TestStep(name=name, title=name, start=now(), attachments=[], steps=[])", "dynamic_issue(self, *issues): \"\"\" Mark test ``issues`` from inside. \"\"\" if", "elsewhere, like in the pytest_configure def get_listener(self): return self._allurelistener def", "status[0] if report.when == 'call': if report.passed: self._fill_case(report, call, status,", "state for later actual write in the `AllureAgregatingListener.write_attach` \"\"\" attach", "# for common items description = item.function.__doc__ except AttributeError: #", "= parent_module(item) # we attach a four-tuple: (test module ID,", "AllureImpl(reportdir) testlistener = AllureTestListener(config) pytest.allure._allurelistener = testlistener config.pluginmanager.register(testlistener) if not", "label, if any limit whatsoever :raises ArgumentTypeError: if `legal_values` are", "'name status message trace') class AllureCollectionListener(object): \"\"\" Listens to pytest", "status, Status.BROKEN) else: # mark it broken so, well, someone", "# FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure = Failure(message=(short_message + '...' *", "dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list of feature names. Run", "set() refined_tests = [] for t in s.tests[::-1]: if t.id", "SKIPPED_STATUSES from allure.utils import parent_module, parent_down_from_module, labels_of, \\ all_of, get_exception_message,", "report), trace=report.longrepr)) def pytest_sessionfinish(self): \"\"\" Creates a testsuite with collection", "-- to cope with the xdist's -x behavior we have", "= hasattr(self.config, 'slaveinput') and self.config.getvalue(\"maxfail\") @pytest.mark.hookwrapper def pytest_runtest_protocol(self, item, nextitem):", "arg_labels = set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities) if arg_labels and not item_labels", "(it wont break, but it will duplicate cases in the", "default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list of feature names. Run tests that", "ze impact self.report_case(item, report) elif report.skipped: if hasattr(report, 'wasxfail'): self._fill_case(report,", "as \"unreported\" or it has failed, add it to the", "report.wasxfail trim_msg_len = 89 short_message = skip_message.split('\\n')[0][:trim_msg_len] # FIXME: see", "the xdist's -x behavior we have to have tests even", "in a `AllureAggegatingListener`-understood way \"\"\" parent = parent_module(item) # we", ":param report: py.test's `TestReport` :param call: py.test's `CallInfo` :param pyteststatus:", "teardown are broken (and call may be anything) PENDING if", "self.stack[-1].attachments.append(attach) def dynamic_issue(self, *issues): \"\"\" Attaches ``issues`` to the current", "action=\"store\", dest=\"allureseverities\", metavar=\"SEVERITIES_SET\", default={}, type=label_type(name=Label.SEVERITY, legal_values=set(severities)), help=\"\"\"Comma-separated list of severity", "given ``name`` and ``type``. \"\"\" if self._allurelistener: self._allurelistener.attach(name, contents, type)", "Status.FAILED) # FIXME: this is here only to work around", "tuple and report.longrepr[2] or report.wasxfail trim_msg_len = 89 short_message =", "status here if not report.passed: if self.test.status not in FAILED_STATUSES:", "allure_impl = AllureImpl(reportdir) testlistener = AllureTestListener(config) pytest.allure._allurelistener = testlistener config.pluginmanager.register(testlistener)", "manifest their identity. Of all the test cases in suite.testcases", "'slaveinput'): # on xdist-master node do all the important stuff", "``issues`` from inside. \"\"\" if self._allurelistener: self._allurelistener.dynamic_issue(*issues) def description(self, description):", "Sets description for the test \"\"\" if self.test: self.test.description =", "wont break, but it will duplicate cases in the report", "test \"\"\" if self._allurelistener: self._allurelistener.description(description) def testcase(self, *testcases): \"\"\" A", "(for future TestLabel things) :param legal_values: a `set` of values", "for l in labels_of(item)) # see label_type arg_labels = set().union(item.config.option.allurefeatures,", "any. \"\"\" if self.fails: self.impl.start_suite(name='test_collection_phase', title='Collection phase', description='This is the", "or '', self.environment, self.test))) @pytest.mark.hookwrapper def pytest_runtest_makereport(self, item, call): \"\"\"", "failed at teardown => broken self._fill_case(report, call, status, Status.BROKEN) else:", "with collection failures if there were any. \"\"\" if self.fails:", "report in the specified directory (may not exist)\") severities =", "self.test))) @pytest.mark.hookwrapper def pytest_runtest_makereport(self, item, call): \"\"\" Decides when to", "if legal_values and not atoms < legal_values: raise argparse.ArgumentTypeError('Illegal {}", "three times -- with report.when being: setup <--- fixtures are", "Creates a testsuite with collection failures if there were any.", "of what has already happened via self.test.status) Expected behavior is:", "bad. So we work hard to decide exact moment when", "status, Status.FAILED) # FIXME: this is here only to work", "of the specified story labels.\"\"\") def pytest_configure(config): reportdir = config.option.allurereportdir", "for doctests that has no `function` attribute description = item.reportinfo()[2]", "contents, attach_type): \"\"\" Store attachment object in current state for", "has already happened via self.test.status) Expected behavior is: FAILED when", "self.suites.values(): if s.tests: # nobody likes empty suites s.stop =", "(Label.DEFAULT, name)) return allure_label(*value) def severity(self, severity): \"\"\" A decorator", "Lets write em down. But first we kinda-unify the test", "\"\"\" This is a step context used for decorated steps.", "in parent_down_from_module(item)])), description=description, start=now(), attachments=[], labels=labels_of(item), status=None, steps=[], id=str(uuid.uuid4())) #", "(and call may be anything) PENDING if skipped and xfailed", "self._allurelistener.attach(name, contents, type) def label(self, name, *value): \"\"\" A decorator", "finishes the main code has finished teardown <--- tears down", "# setup / teardown if report.failed: self._fill_case(report, call, status, Status.BROKEN)", "= impl # module's nodeid => TestSuite object self.suites =", "status and status[0] if report.when == 'call': if report.passed: self._fill_case(report,", "'wasxfail'): self._fill_case(report, call, status, Status.PENDING) else: self._fill_case(report, call, status, Status.CANCELED)", "later actual write in the `AllureAgregatingListener.write_attach` \"\"\" attach = Attach(source=contents,", "status if status in FAILED_STATUSES: self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report),", "case in s.tests) known_ids = set() refined_tests = [] for", "for modules that failed to collect. \"\"\" def __init__(self, impl):", "id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish self.stack = [self.test]", "status, Status.PASSED) elif report.failed: self._fill_case(report, call, status, Status.FAILED) # FIXME:", "status, Status.PENDING) else: self._fill_case(report, call, status, Status.CANCELED) elif report.when ==", "self._allurelistener: self._allurelistener.description(description) def testcase(self, *testcases): \"\"\" A decorator factory that", "else: return LazyInitStepContext(self, title) def single_step(self, text): \"\"\" Writes single", "fixtures are to be initialized in this one call <---", "\\ FAILED_STATUSES, Label, SKIPPED_STATUSES from allure.utils import parent_module, parent_down_from_module, labels_of,", "are given and there are values that fall out of", "@property def severity_level(self): return Severity def __getattr__(self, attr): \"\"\" Provides", "return self.label(Label.ISSUE, *issues) def dynamic_issue(self, *issues): \"\"\" Mark test ``issues``", "will duplicate cases in the report -- which is bad.", "selected labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels)))", "factory that returns ``pytest.mark`` for a given stories. \"\"\" return", "factory for labelish things. processed value is set of tuples", "AttributeError: # for doctests that has no `function` attribute description", "pickle.loads(report._allure_result) report._allure_result = None # so actual pickled data is", "already happened via self.test.status) Expected behavior is: FAILED when call", "current context with given ``name`` and ``type``. \"\"\" if self._allurelistener:", "Failure, TestLabel def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\", default=None, help=\"Generate", "description = item.function.__doc__ except AttributeError: # for doctests that has", "the current active case \"\"\" if self.test: self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for", "for steps. TODO: when moving to python 3, rework this", "to cope with the xdist's -x behavior we have to", "we attach a four-tuple: (test module ID, test module name,", "not report.passed: if report.failed: status = Status.BROKEN else: status =", "from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel def", "call: py.test's `CallInfo` :param pyteststatus: the failed/xfailed/xpassed thing :param status:", "\"\"\" argparse-type factory for labelish things. processed value is set", "return Severity def __getattr__(self, attr): \"\"\" Provides fancy shortcuts for", "-- becase logreport can be sent MORE THAN ONE TIME", "import argparse from collections import namedtuple from six import text_type", "it to the ``self.stack`` and returns the step. \"\"\" step", "expect cases to come from AllureTestListener -- and the have", "__init__(self, impl): self.impl = impl self.fails = [] def pytest_collectreport(self,", "description for the test \"\"\" if self._allurelistener: self._allurelistener.description(description) def testcase(self,", "= 89 short_message = skip_message.split('\\n')[0][:trim_msg_len] # FIXME: see pytest.runner.pytest_runtest_makereport self.test.failure", "report.when being: setup <--- fixtures are to be initialized in", "= refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f: self.impl._write_xml(f, s)", "the test cases in suite.testcases we leave LAST with the", "', '.join(atoms - legal_values), ', '.join(legal_values))) return set((name, v) for", "hook. \"\"\" def __init__(self, config): self.config = config self.environment =", "(len(skip_message) > trim_msg_len)), trace=status == Status.PENDING and report.longrepr or short_message", "`stack` we are inside a test # record steps only", "like in the pytest_configure def get_listener(self): return self._allurelistener def attach(self,", "the ``self.stack`` and returns the step. \"\"\" step = TestStep(name=name,", "FIXME: this gets injected elsewhere, like in the pytest_configure def", "call, status, Status.PASSED) elif report.failed: self._fill_case(report, call, status, Status.FAILED) #", "a in testcase.iter_attachments(): self.write_attach(a) self.suites.setdefault(module_id, TestSuite(name=module_name, description=module_doc, tests=[], labels=[], start=testcase.start,", "refined_tests.append(t) s.tests = refined_tests[::-1] with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f:", "there are values that fall out of that \"\"\" def", "the test \"\"\" if self._allurelistener: self._allurelistener.description(description) def testcase(self, *testcases): \"\"\"", "-- and the have ._id field to manifest their identity.", "type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail trim_msg_len = 89", "metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated list of feature names. Run tests", "FAILED_STATUSES: # if test was OK but failed at teardown", "sorted(arg_labels))) class LazyInitStepContext(StepContext): \"\"\" This is a step context used", "v) in all_of(Severity)] def label_type(name, legal_values=set()): \"\"\" argparse-type factory for", "'.join(text_type(l) for l in sorted(arg_labels))) class LazyInitStepContext(StepContext): \"\"\" This is", "well, someone has idea of teardown failure # still, that's", "break, but it will duplicate cases in the report --", "% ', '.join(severities)) parser.getgroup(\"general\").addoption('--allure_features', action=\"store\", dest=\"allurefeatures\", metavar=\"FEATURES_SET\", default={}, type=label_type(name=Label.FEATURE), help=\"\"\"Comma-separated", "main code has finished teardown <--- tears down fixtures (that", "when we call `_stop_case` to do that. This method may", "stories. \"\"\" return self.label(Label.STORY, *stories) def issue(self, *issues): \"\"\" A", "here if not report.passed: if self.test.status not in FAILED_STATUSES: #", "= Status.BROKEN # if a test isn't marked as \"unreported\"", "only if `setup` passes. See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas.", "`AllureAgregatingListener` at the `pytest_runtest_logreport` hook. \"\"\" def __init__(self, config): self.config", "``level``. \"\"\" return self.label(Label.SEVERITY, severity) def feature(self, *features): \"\"\" A", "for l in sorted(arg_labels))) class LazyInitStepContext(StepContext): \"\"\" This is a", "in `self.suites` Lets write em down. But first we kinda-unify", "labels=labels_of(item), status=None, steps=[], id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish", "% uuid.uuid4()) as f: self.impl._write_xml(f, s) self.impl.store_environment() def write_attach(self, attachment):", "labels_of, \\ all_of, get_exception_message, now, mangle_testnames from allure.structure import TestCase,", "name)) return allure_label(*value) def severity(self, severity): \"\"\" A decorator factory", "that returns ``pytest.mark`` for a given features. \"\"\" return self.label(Label.FEATURE,", "step = self.stack.pop() step.stop = now() def _fill_case(self, report, call,", "Store attachment object in current state for later actual write", "A decorator factory that returns ``pytest.mark`` for a given allure", "The \"other side\" (AllureAggregatingListener) expects us to send EXACTLY ONE", "failures in the makereport hook. # it is here to", "issues. \"\"\" return self.label(Label.ISSUE, *issues) def dynamic_issue(self, *issues): \"\"\" Mark", "from collections import namedtuple from six import text_type from allure.common", "<--- fixtures are to be initialized in this one call", "first case starts the suite! stop=None)).tests.append(testcase) CollectFail = namedtuple('CollectFail', 'name", "-- test has already failed # TODO: think about that", "legal for this label, if any limit whatsoever :raises ArgumentTypeError:", "fall out of that \"\"\" def a_label_type(string): atoms = set(string.split(','))", "= set().union(item.config.option.allurefeatures, item.config.option.allurestories, item.config.option.allureseverities) if arg_labels and not item_labels &", ":param call: py.test's `CallInfo` :param pyteststatus: the failed/xfailed/xpassed thing :param", "= Failure(message=get_exception_message(call.excinfo, pyteststatus, report), trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail)", "\"unreported\" or it has failed, add it to the report.", "test_foo(): with pytest.allure.step('mystep'): assert False @pytest.allure.step('make test data') def make_test_data_bar():", "\"\"\" Attaches ``contents`` to a current context with given ``name``", "today') def test_baz(): assert make_test_data_baz() @pytest.fixture() @pytest.allure.step('test fixture') def steppy_fixture():", "recording in-test data and for attaching it to the test", "(yield).get_result() status = self.config.hook.pytest_report_teststatus(report=report) status = status and status[0] if", "= [v for (_, v) in all_of(Severity)] def label_type(name, legal_values=set()):", "encapsulation a lot if hasattr(listener, 'stack'): return listener class AllureHelper(object):", "for recording in-test data and for attaching it to the", "short_message != skip_message and skip_message or '') def report_case(self, item,", "-- with report.when being: setup <--- fixtures are to be", "TestLabel def pytest_addoption(parser): parser.getgroup(\"reporting\").addoption('--alluredir', action=\"store\", dest=\"allurereportdir\", metavar=\"DIR\", default=None, help=\"Generate Allure", "decorators, being initiated before pytest_configure, when no AllureListener initiated yet.", "has idea of teardown failure # still, that's no big", "report) def pytest_runtest_setup(item): item_labels = set((l.name, l.value) for l in", "report), trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail) elif status in", "= None @property def allure(self): listener = self.allure_helper.get_listener() # if", "no big deal -- test has already failed # TODO:", "pytest.runner.pytest_runtest_makereport self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)),", "title) def single_step(self, text): \"\"\" Writes single line to report.", "self.fails = [] def pytest_collectreport(self, report): if not report.passed: if", "hasattr(report, '_allure_result'): module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result) report._allure_result", "common items description = item.function.__doc__ except AttributeError: # for doctests", "test report thing. The per-test reports are handled by `AllureAgregatingListener`", "def testcase(self, *testcases): \"\"\" A decorator factory that returns ``pytest.mark``", "or '') def report_case(self, item, report): \"\"\" Adds `self.test` to", "is here to cope with xdist's begavior regarding -x. #", "allure.utils import parent_module, parent_down_from_module, labels_of, \\ all_of, get_exception_message, now, mangle_testnames", "or short_message != skip_message and skip_message or '') def report_case(self,", "to a current context with given ``name`` and ``type``. \"\"\"", "=> broken self._fill_case(report, call, status, Status.BROKEN) else: # mark it", "self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f: self.impl._write_xml(f, s) self.impl.store_environment() def write_attach(self,", "reportdir = config.option.allurereportdir if reportdir: # we actually record something", "more efficient way \"\"\" for s in self.suites.values(): if s.tests:", "mark it broken so, well, someone has idea of teardown", "all the results in `self.suites` Lets write em down. But", "has failed, add it to the report. if not item.get_marker(\"unreported\")", "module_doc, environment, testcase = pickle.loads(report._allure_result) report._allure_result = None # so", "__getattr__(self, attr): \"\"\" Provides fancy shortcuts for severity:: # these", "``name`` and ``type``. \"\"\" if self._allurelistener: self._allurelistener.attach(name, contents, type) def", "**env_dict): if self._allurelistener: self._allurelistener.environment.update(env_dict) @property def attach_type(self): return AttachmentType @property", "to have tests even at CALL failures) TODO: do it", "six import text_type from allure.common import AllureImpl, StepContext from allure.constants", "parent.module.__name__, parent.module.__doc__ or '', self.environment, self.test))) @pytest.mark.hookwrapper def pytest_runtest_makereport(self, item,", "are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values))) return set((name,", "a possibility to create step decorators, being initiated before pytest_configure,", "elif report.when == 'setup': # setup / teardown if report.failed:", "given issues. \"\"\" return self.label(Label.ISSUE, *issues) def dynamic_issue(self, *issues): \"\"\"", "'call': if report.passed: self._fill_case(report, call, status, Status.PASSED) elif report.failed: self._fill_case(report,", "# these are the same pytest.allure.CRITICAL pytest.allure.severity(pytest.allure.severity_level.CRITICAL) \"\"\" if attr", "3, rework this with ``contextlib.ContextDecorator``. Usage examples:: import pytest def", "decorator factory that returns ``pytest.mark`` for a given stories. \"\"\"", "module's nodeid => TestSuite object self.suites = {} def pytest_sessionfinish(self):", "with pytest.allure.step('mystep'): assert False @pytest.allure.step('make test data') def make_test_data_bar(): raise", "pytest hooks to generate reports for common tests. \"\"\" def", "s) self.impl.store_environment() def write_attach(self, attachment): \"\"\" Writes attachment object from", "\"\"\" parent = parent_module(item) # we attach a four-tuple: (test", "metavar=\"STORIES_SET\", default={}, type=label_type(name=Label.STORY), help=\"\"\"Comma-separated list of story names. Run tests", "single_step(self, text): \"\"\" Writes single line to report. \"\"\" if", "Severity, \\ FAILED_STATUSES, Label, SKIPPED_STATUSES from allure.utils import parent_module, parent_down_from_module,", "steppy_fixture \"\"\" if callable(title): return LazyInitStepContext(self, title.__name__)(title) else: return LazyInitStepContext(self,", "be sent MORE THAN ONE TIME (namely, if the test", "suite! stop=None)).tests.append(testcase) CollectFail = namedtuple('CollectFail', 'name status message trace') class", "report.when == 'setup': # setup / teardown if report.failed: self._fill_case(report,", "pre-report failures in the makereport hook. # it is here", "tests even at CALL failures) TODO: do it in a" ]
[ "= message_prefix @staticmethod def _get_queries(): return len(connection.queries) @staticmethod def _get_timer():", "self.__init_timer) if settings.DEBUG: message = '\\r%sexecuted %s %s in %ss.'", "type_, value, traceback): queries = (debug_performance._get_queries() - self.__init_queries) timer =", "traceback): queries = (debug_performance._get_queries() - self.__init_queries) timer = (debug_performance._get_timer() -", "( self.__message_prefix, queries, 'query' if queries == 1 else 'queries',", "from django.conf import settings from django.db import connection import logging", "message_prefix=''): super(debug_performance, self).__init__() self.__message_prefix = message_prefix @staticmethod def _get_queries(): return", "%ss.' % ( self.__message_prefix, queries, 'query' if queries == 1", "= (debug_performance._get_timer() - self.__init_timer) if settings.DEBUG: message = '\\r%sexecuted %s", "utf-8 -*- from django.conf import settings from django.db import connection", "= '\\r%sexecuted %s %s in %ss.' % ( self.__message_prefix, queries,", "return None def __exit__(self, type_, value, traceback): queries = (debug_performance._get_queries()", "def _get_queries(): return len(connection.queries) @staticmethod def _get_timer(): return timeit.default_timer() def", "settings from django.db import connection import logging import timeit logger", "def _get_timer(): return timeit.default_timer() def __enter__(self): self.__init_queries = debug_performance._get_queries() self.__init_timer", "django.db import connection import logging import timeit logger = logging.getLogger(__name__)", "timeit logger = logging.getLogger(__name__) class debug_performance(object): def __init__(self, message_prefix=''): super(debug_performance,", "_get_timer(): return timeit.default_timer() def __enter__(self): self.__init_queries = debug_performance._get_queries() self.__init_timer =", "self).__init__() self.__message_prefix = message_prefix @staticmethod def _get_queries(): return len(connection.queries) @staticmethod", "logging import timeit logger = logging.getLogger(__name__) class debug_performance(object): def __init__(self,", "self.__message_prefix, queries, 'query' if queries == 1 else 'queries', timer,", "%s in %ss.' % ( self.__message_prefix, queries, 'query' if queries", "from django.db import connection import logging import timeit logger =", "= debug_performance._get_queries() self.__init_timer = debug_performance._get_timer() return None def __exit__(self, type_,", "@staticmethod def _get_queries(): return len(connection.queries) @staticmethod def _get_timer(): return timeit.default_timer()", "- self.__init_timer) if settings.DEBUG: message = '\\r%sexecuted %s %s in", "logging.getLogger(__name__) class debug_performance(object): def __init__(self, message_prefix=''): super(debug_performance, self).__init__() self.__message_prefix =", "(debug_performance._get_queries() - self.__init_queries) timer = (debug_performance._get_timer() - self.__init_timer) if settings.DEBUG:", "% ( self.__message_prefix, queries, 'query' if queries == 1 else", "connection import logging import timeit logger = logging.getLogger(__name__) class debug_performance(object):", "in %ss.' % ( self.__message_prefix, queries, 'query' if queries ==", "coding: utf-8 -*- from django.conf import settings from django.db import", "self.__init_timer = debug_performance._get_timer() return None def __exit__(self, type_, value, traceback):", "(debug_performance._get_timer() - self.__init_timer) if settings.DEBUG: message = '\\r%sexecuted %s %s", "__init__(self, message_prefix=''): super(debug_performance, self).__init__() self.__message_prefix = message_prefix @staticmethod def _get_queries():", "class debug_performance(object): def __init__(self, message_prefix=''): super(debug_performance, self).__init__() self.__message_prefix = message_prefix", "message = '\\r%sexecuted %s %s in %ss.' % ( self.__message_prefix,", "'\\r%sexecuted %s %s in %ss.' % ( self.__message_prefix, queries, 'query'", "-*- coding: utf-8 -*- from django.conf import settings from django.db", "@staticmethod def _get_timer(): return timeit.default_timer() def __enter__(self): self.__init_queries = debug_performance._get_queries()", "None def __exit__(self, type_, value, traceback): queries = (debug_performance._get_queries() -", "return timeit.default_timer() def __enter__(self): self.__init_queries = debug_performance._get_queries() self.__init_timer = debug_performance._get_timer()", "debug_performance._get_timer() return None def __exit__(self, type_, value, traceback): queries =", "- self.__init_queries) timer = (debug_performance._get_timer() - self.__init_timer) if settings.DEBUG: message", "import logging import timeit logger = logging.getLogger(__name__) class debug_performance(object): def", "-*- from django.conf import settings from django.db import connection import", "self.__init_queries = debug_performance._get_queries() self.__init_timer = debug_performance._get_timer() return None def __exit__(self,", "def __init__(self, message_prefix=''): super(debug_performance, self).__init__() self.__message_prefix = message_prefix @staticmethod def", "logger = logging.getLogger(__name__) class debug_performance(object): def __init__(self, message_prefix=''): super(debug_performance, self).__init__()", "%s %s in %ss.' % ( self.__message_prefix, queries, 'query' if", "return len(connection.queries) @staticmethod def _get_timer(): return timeit.default_timer() def __enter__(self): self.__init_queries", "def __enter__(self): self.__init_queries = debug_performance._get_queries() self.__init_timer = debug_performance._get_timer() return None", "def __exit__(self, type_, value, traceback): queries = (debug_performance._get_queries() - self.__init_queries)", "__exit__(self, type_, value, traceback): queries = (debug_performance._get_queries() - self.__init_queries) timer", "timer = (debug_performance._get_timer() - self.__init_timer) if settings.DEBUG: message = '\\r%sexecuted", "django.conf import settings from django.db import connection import logging import", "if settings.DEBUG: message = '\\r%sexecuted %s %s in %ss.' %", "super(debug_performance, self).__init__() self.__message_prefix = message_prefix @staticmethod def _get_queries(): return len(connection.queries)", "queries, 'query' if queries == 1 else 'queries', timer, )", "= debug_performance._get_timer() return None def __exit__(self, type_, value, traceback): queries", "timeit.default_timer() def __enter__(self): self.__init_queries = debug_performance._get_queries() self.__init_timer = debug_performance._get_timer() return", "'query' if queries == 1 else 'queries', timer, ) print(message)", "len(connection.queries) @staticmethod def _get_timer(): return timeit.default_timer() def __enter__(self): self.__init_queries =", "import timeit logger = logging.getLogger(__name__) class debug_performance(object): def __init__(self, message_prefix=''):", "# -*- coding: utf-8 -*- from django.conf import settings from", "self.__message_prefix = message_prefix @staticmethod def _get_queries(): return len(connection.queries) @staticmethod def", "= logging.getLogger(__name__) class debug_performance(object): def __init__(self, message_prefix=''): super(debug_performance, self).__init__() self.__message_prefix", "value, traceback): queries = (debug_performance._get_queries() - self.__init_queries) timer = (debug_performance._get_timer()", "settings.DEBUG: message = '\\r%sexecuted %s %s in %ss.' % (", "_get_queries(): return len(connection.queries) @staticmethod def _get_timer(): return timeit.default_timer() def __enter__(self):", "import settings from django.db import connection import logging import timeit", "__enter__(self): self.__init_queries = debug_performance._get_queries() self.__init_timer = debug_performance._get_timer() return None def", "= (debug_performance._get_queries() - self.__init_queries) timer = (debug_performance._get_timer() - self.__init_timer) if", "self.__init_queries) timer = (debug_performance._get_timer() - self.__init_timer) if settings.DEBUG: message =", "debug_performance(object): def __init__(self, message_prefix=''): super(debug_performance, self).__init__() self.__message_prefix = message_prefix @staticmethod", "debug_performance._get_queries() self.__init_timer = debug_performance._get_timer() return None def __exit__(self, type_, value,", "import connection import logging import timeit logger = logging.getLogger(__name__) class", "queries = (debug_performance._get_queries() - self.__init_queries) timer = (debug_performance._get_timer() - self.__init_timer)", "<reponame>domlysi/django-treenode # -*- coding: utf-8 -*- from django.conf import settings", "message_prefix @staticmethod def _get_queries(): return len(connection.queries) @staticmethod def _get_timer(): return" ]
[ "vertices using the current mesh data into bmesh pipe =", "x < y : Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object) return edges def", "0 else: return 1 oblist = bpy.context.selected_objects Edgelist = string(oblist)", "# Use this as a tooltip for menu items and", "the property values that should be used when the user", "script #################### #to create an edge between two given objects", "edges def piper(xlist): bpy.ops.object.select_all(action='DESELECT') for x in xlist: x.select_set(True) bpy.ops.object.join()", "self.layout props = layout.operator('object.stringtool_ot') def register(): bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING)", "using the current mesh data into bmesh pipe = bpy.context.object.data", "property values that should be used when the user #", "Moving Script\"\"\" # Use this as a tooltip for menu", "bl_category = \"newprop\" def draw(self, context): # You can set", "bpy.ops.object.shade_smooth() ######################## return {'FINISHED'} # Lets Blender know the operator", "{ \"name\": \"STRING\", \"blender\": (2, 80, 0), \"category\": \"Object\", 'Author'", "operator finished successfully. class STRING_PT(bpy.types.Panel): bl_idname = \"object_stringtool_pt\" bl_label =", "draw(self, context): # You can set the property values that", "bmesh pipe = bpy.context.object.data bm = bmesh.new() for v in", "1 oblist = bpy.context.selected_objects Edgelist = string(oblist) piper(Edgelist) actob =", "as a tooltip for menu items and buttons. bl_idname =", "reference. bl_label = \"String\" # Display name in the interface.", "The original script #################### #to create an edge between two", "for y in range(l): if y != x and x", "bmesh.new() for v in verts: bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle()", "bl_label = \"String\" # Display name in the interface. bl_options", "layout = self.layout props = layout.operator('object.stringtool_ot') def register(): bpy.utils.register_class(STRING) def", "bpy.ops.mesh.delete(type='VERT') #creating the vertices using the current mesh data into", "operator. bdepth: bpy.props.FloatProperty(name = \"String Thickness\", min = 0.1, max", "create an edge between two given objects def Edgify(ob1,ob2): loc1", "bl_idname = \"object_stringtool_pt\" bl_label = \"String\" bl_space_type = 'VIEW_3D' bl_region_type", "and buttons. bl_idname = \"object.stringtool_ot\" # Unique identifier for buttons", "to reference. bl_label = \"String\" # Display name in the", "== 0: self.report({'INFO'},'NONE SELECTED OBJECTS') return 0 else: return 1", "= 'UI' bl_category = \"newprop\" def draw(self, context): # You", "tooltip for menu items and buttons. bl_idname = \"object.stringtool_ot\" #", "for buttons and menu items to reference. bl_label = \"String\"", "# to test the add-on without having to install it.", "# This allows you to run the script directly from", "def Edgify(ob1,ob2): loc1 = ob1.location loc2 = ob2.location verts =", "bpy.context.object.data bm = bmesh.new() for v in verts: bm.verts.new(v) bpy.ops.object.editmode_toggle()", "actob = bpy.context.active_object actob.data.bevel_depth = self.bdepth bpy.ops.object.shade_smooth() ######################## return {'FINISHED'}", "for menu items and buttons. bl_idname = \"object.stringtool_ot\" # Unique", "props = layout.operator('object.stringtool_ot') def register(): bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING) #", "bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = \"newprop\" def", "Blender's Text editor # to test the add-on without having", "original script #################### #to create an edge between two given", "class STRING_PT(bpy.types.Panel): bl_idname = \"object_stringtool_pt\" bl_label = \"String\" bl_space_type =", "def register(): bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING) # This allows you", "bpy.utils.unregister_class(STRING) # This allows you to run the script directly", "interface. bl_options = {'REGISTER', 'UNDO'} # Enable undo for the", "= bpy.context.object.data bm = bmesh.new() for v in verts: bm.verts.new(v)", "bpy.ops.object.select_all(action='DESELECT') for x in xlist: x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def check(olist):", "else: return 1 oblist = bpy.context.selected_objects Edgelist = string(oblist) piper(Edgelist)", "script directly from Blender's Text editor # to test the", "def piper(xlist): bpy.ops.object.select_all(action='DESELECT') for x in xlist: x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE')", "an edge between two given objects def Edgify(ob1,ob2): loc1 =", "objects def Edgify(ob1,ob2): loc1 = ob1.location loc2 = ob2.location verts", "can set the property values that should be used when", "piper(Edgelist) actob = bpy.context.active_object actob.data.bevel_depth = self.bdepth bpy.ops.object.shade_smooth() ######################## return", "from Blender's Text editor # to test the add-on without", "= (0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating the vertices using the", "Object Moving Script\"\"\" # Use this as a tooltip for", "used when the user # presses the button in the", "the operator finished successfully. class STRING_PT(bpy.types.Panel): bl_idname = \"object_stringtool_pt\" bl_label", "'Author' : '<NAME>' } import bpy import bmesh class STRING(bpy.types.Operator):", "= [] l = len(olist) for x in range(l): for", "verts: bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def", "\"STRING\", \"blender\": (2, 80, 0), \"category\": \"Object\", 'Author' : '<NAME>'", "in verts: bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle()", "= { \"name\": \"STRING\", \"blender\": (2, 80, 0), \"category\": \"Object\",", "loc2 = ob2.location verts = [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location = (0,0,0)) bpy.ops.object.editmode_toggle()", "UI. layout = self.layout props = layout.operator('object.stringtool_ot') def register(): bpy.utils.register_class(STRING)", "the vertices using the current mesh data into bmesh pipe", "edge between two given objects def Edgify(ob1,ob2): loc1 = ob1.location", "min = 0.1, max = 5, precision = 2 )", "v in verts: bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add()", "finished successfully. class STRING_PT(bpy.types.Panel): bl_idname = \"object_stringtool_pt\" bl_label = \"String\"", "bl_info = { \"name\": \"STRING\", \"blender\": (2, 80, 0), \"category\":", "Display name in the interface. bl_options = {'REGISTER', 'UNDO'} #", "the current mesh data into bmesh pipe = bpy.context.object.data bm", "bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def string(olist): edges = [] l", "OBJECTS') return 0 else: return 1 oblist = bpy.context.selected_objects Edgelist", "'<NAME>' } import bpy import bmesh class STRING(bpy.types.Operator): \"\"\"My Object", "edges.append(bpy.context.active_object) return edges def piper(xlist): bpy.ops.object.select_all(action='DESELECT') for x in xlist:", "when the user # presses the button in the UI.", "STRING_PT(bpy.types.Panel): bl_idname = \"object_stringtool_pt\" bl_label = \"String\" bl_space_type = 'VIEW_3D'", "class STRING(bpy.types.Operator): \"\"\"My Object Moving Script\"\"\" # Use this as", "import bpy import bmesh class STRING(bpy.types.Operator): \"\"\"My Object Moving Script\"\"\"", "presses the button in the UI. layout = self.layout props", "ob1.location loc2 = ob2.location verts = [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location = (0,0,0))", "len(olist) for x in range(l): for y in range(l): if", "{'REGISTER', 'UNDO'} # Enable undo for the operator. bdepth: bpy.props.FloatProperty(name", "\"\"\"My Object Moving Script\"\"\" # Use this as a tooltip", "bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def string(olist):", "edges = [] l = len(olist) for x in range(l):", "range(l): if y != x and x < y :", ": Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object) return edges def piper(xlist): bpy.ops.object.select_all(action='DESELECT') for", "= bpy.context.selected_objects Edgelist = string(oblist) piper(Edgelist) actob = bpy.context.active_object actob.data.bevel_depth", "this as a tooltip for menu items and buttons. bl_idname", "= \"newprop\" def draw(self, context): # You can set the", "in range(l): if y != x and x < y", "Enable undo for the operator. bdepth: bpy.props.FloatProperty(name = \"String Thickness\",", "Unique identifier for buttons and menu items to reference. bl_label", "string(olist): edges = [] l = len(olist) for x in", "def execute(self, context): # The original script #################### #to create", "and menu items to reference. bl_label = \"String\" # Display", "actob.data.bevel_depth = self.bdepth bpy.ops.object.shade_smooth() ######################## return {'FINISHED'} # Lets Blender", "Edgify(ob1,ob2): loc1 = ob1.location loc2 = ob2.location verts = [loc1,loc2]", "mesh data into bmesh pipe = bpy.context.object.data bm = bmesh.new()", "user # presses the button in the UI. layout =", "SELECTED OBJECTS') return 0 else: return 1 oblist = bpy.context.selected_objects", "test the add-on without having to install it. if __name__", "self.report({'INFO'},'NONE SELECTED OBJECTS') return 0 else: return 1 oblist =", "return 1 oblist = bpy.context.selected_objects Edgelist = string(oblist) piper(Edgelist) actob", "\"newprop\" def draw(self, context): # You can set the property", "= bmesh.new() for v in verts: bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free()", "x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def check(olist): if len(olist) == 0: self.report({'INFO'},'NONE", "0: self.report({'INFO'},'NONE SELECTED OBJECTS') return 0 else: return 1 oblist", "You can set the property values that should be used", "unregister(): bpy.utils.unregister_class(STRING) # This allows you to run the script", "import bmesh class STRING(bpy.types.Operator): \"\"\"My Object Moving Script\"\"\" # Use", "you to run the script directly from Blender's Text editor", "\"blender\": (2, 80, 0), \"category\": \"Object\", 'Author' : '<NAME>' }", "in xlist: x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def check(olist): if len(olist) ==", "olist[y]) edges.append(bpy.context.active_object) return edges def piper(xlist): bpy.ops.object.select_all(action='DESELECT') for x in", "string(oblist) piper(Edgelist) actob = bpy.context.active_object actob.data.bevel_depth = self.bdepth bpy.ops.object.shade_smooth() ########################", "values that should be used when the user # presses", "precision = 2 ) def execute(self, context): # The original", "\"String\" # Display name in the interface. bl_options = {'REGISTER',", "in the UI. layout = self.layout props = layout.operator('object.stringtool_ot') def", "= 2 ) def execute(self, context): # The original script", "'UNDO'} # Enable undo for the operator. bdepth: bpy.props.FloatProperty(name =", "a tooltip for menu items and buttons. bl_idname = \"object.stringtool_ot\"", "#to create an edge between two given objects def Edgify(ob1,ob2):", "to run the script directly from Blender's Text editor #", "x in range(l): for y in range(l): if y !=", "y : Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object) return edges def piper(xlist): bpy.ops.object.select_all(action='DESELECT')", "that should be used when the user # presses the", "# The original script #################### #to create an edge between", "bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def check(olist): if len(olist) == 0: self.report({'INFO'},'NONE SELECTED", "range(l): for y in range(l): if y != x and", "[] l = len(olist) for x in range(l): for y", "bpy import bmesh class STRING(bpy.types.Operator): \"\"\"My Object Moving Script\"\"\" #", "bmesh class STRING(bpy.types.Operator): \"\"\"My Object Moving Script\"\"\" # Use this", "name in the interface. bl_options = {'REGISTER', 'UNDO'} # Enable", "!= x and x < y : Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object)", "Blender know the operator finished successfully. class STRING_PT(bpy.types.Panel): bl_idname =", ": '<NAME>' } import bpy import bmesh class STRING(bpy.types.Operator): \"\"\"My", "= layout.operator('object.stringtool_ot') def register(): bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING) # This", "pipe = bpy.context.object.data bm = bmesh.new() for v in verts:", "oblist = bpy.context.selected_objects Edgelist = string(oblist) piper(Edgelist) actob = bpy.context.active_object", "Lets Blender know the operator finished successfully. class STRING_PT(bpy.types.Panel): bl_idname", "{'FINISHED'} # Lets Blender know the operator finished successfully. class", "def check(olist): if len(olist) == 0: self.report({'INFO'},'NONE SELECTED OBJECTS') return", "Edgelist = string(oblist) piper(Edgelist) actob = bpy.context.active_object actob.data.bevel_depth = self.bdepth", "for x in xlist: x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def check(olist): if", "self.bdepth bpy.ops.object.shade_smooth() ######################## return {'FINISHED'} # Lets Blender know the", "< y : Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object) return edges def piper(xlist):", "= \"object.stringtool_ot\" # Unique identifier for buttons and menu items", "\"name\": \"STRING\", \"blender\": (2, 80, 0), \"category\": \"Object\", 'Author' :", "} import bpy import bmesh class STRING(bpy.types.Operator): \"\"\"My Object Moving", "into bmesh pipe = bpy.context.object.data bm = bmesh.new() for v", "def unregister(): bpy.utils.unregister_class(STRING) # This allows you to run the", "\"category\": \"Object\", 'Author' : '<NAME>' } import bpy import bmesh", "# You can set the property values that should be", "set the property values that should be used when the", "80, 0), \"category\": \"Object\", 'Author' : '<NAME>' } import bpy", "bdepth: bpy.props.FloatProperty(name = \"String Thickness\", min = 0.1, max =", "data into bmesh pipe = bpy.context.object.data bm = bmesh.new() for", "\"object_stringtool_pt\" bl_label = \"String\" bl_space_type = 'VIEW_3D' bl_region_type = 'UI'", "undo for the operator. bdepth: bpy.props.FloatProperty(name = \"String Thickness\", min", "for x in range(l): for y in range(l): if y", "bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def string(olist): edges", "bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING) # This allows you to run", "= 'VIEW_3D' bl_region_type = 'UI' bl_category = \"newprop\" def draw(self,", "to test the add-on without having to install it. if", "items and buttons. bl_idname = \"object.stringtool_ot\" # Unique identifier for", "(0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating the vertices using the current", "context): # The original script #################### #to create an edge", "button in the UI. layout = self.layout props = layout.operator('object.stringtool_ot')", "run the script directly from Blender's Text editor # to", "# presses the button in the UI. layout = self.layout", "the interface. bl_options = {'REGISTER', 'UNDO'} # Enable undo for", "the UI. layout = self.layout props = layout.operator('object.stringtool_ot') def register():", "add-on without having to install it. if __name__ == \"__main__\":", ") def execute(self, context): # The original script #################### #to", "Script\"\"\" # Use this as a tooltip for menu items", "know the operator finished successfully. class STRING_PT(bpy.types.Panel): bl_idname = \"object_stringtool_pt\"", "= 0.1, max = 5, precision = 2 ) def", "for the operator. bdepth: bpy.props.FloatProperty(name = \"String Thickness\", min =", "= \"String\" bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category =", "y != x and x < y : Edgify(olist[x], olist[y])", "#################### #to create an edge between two given objects def", "current mesh data into bmesh pipe = bpy.context.object.data bm =", "between two given objects def Edgify(ob1,ob2): loc1 = ob1.location loc2", "0.1, max = 5, precision = 2 ) def execute(self,", "def string(olist): edges = [] l = len(olist) for x", "Use this as a tooltip for menu items and buttons.", "x and x < y : Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object) return", "'UI' bl_category = \"newprop\" def draw(self, context): # You can", "5, precision = 2 ) def execute(self, context): # The", "and x < y : Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object) return edges", "This allows you to run the script directly from Blender's", "without having to install it. if __name__ == \"__main__\": register()", "l = len(olist) for x in range(l): for y in", "menu items and buttons. bl_idname = \"object.stringtool_ot\" # Unique identifier", "'VIEW_3D' bl_region_type = 'UI' bl_category = \"newprop\" def draw(self, context):", "= self.layout props = layout.operator('object.stringtool_ot') def register(): bpy.utils.register_class(STRING) def unregister():", "return 0 else: return 1 oblist = bpy.context.selected_objects Edgelist =", "allows you to run the script directly from Blender's Text", "(2, 80, 0), \"category\": \"Object\", 'Author' : '<NAME>' } import", "Text editor # to test the add-on without having to", "bl_idname = \"object.stringtool_ot\" # Unique identifier for buttons and menu", "buttons and menu items to reference. bl_label = \"String\" #", "loc1 = ob1.location loc2 = ob2.location verts = [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location", "bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def string(olist): edges = [] l =", "bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating the vertices using the current mesh data", "register(): bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING) # This allows you to", "STRING(bpy.types.Operator): \"\"\"My Object Moving Script\"\"\" # Use this as a", "bl_region_type = 'UI' bl_category = \"newprop\" def draw(self, context): #", "= \"String Thickness\", min = 0.1, max = 5, precision", "bl_label = \"String\" bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category", "= \"String\" # Display name in the interface. bl_options =", "items to reference. bl_label = \"String\" # Display name in", "\"Object\", 'Author' : '<NAME>' } import bpy import bmesh class", "bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating the vertices using the current mesh", "bm = bmesh.new() for v in verts: bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe)", "= ob2.location verts = [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location = (0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT')", "bpy.ops.object.convert(target='CURVE') def check(olist): if len(olist) == 0: self.report({'INFO'},'NONE SELECTED OBJECTS')", "len(olist) == 0: self.report({'INFO'},'NONE SELECTED OBJECTS') return 0 else: return", "menu items to reference. bl_label = \"String\" # Display name", "= len(olist) for x in range(l): for y in range(l):", "two given objects def Edgify(ob1,ob2): loc1 = ob1.location loc2 =", "for v in verts: bm.verts.new(v) bpy.ops.object.editmode_toggle() bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT')", "bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.", "= [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location = (0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating the", "bm.to_mesh(pipe) bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def string(olist): edges =", "ob2.location verts = [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location = (0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT')", "[loc1,loc2] bpy.ops.mesh.primitive_plane_add(location = (0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating the vertices", "#creating the vertices using the current mesh data into bmesh", "y in range(l): if y != x and x <", "in the interface. bl_options = {'REGISTER', 'UNDO'} # Enable undo", "return edges def piper(xlist): bpy.ops.object.select_all(action='DESELECT') for x in xlist: x.select_set(True)", "Edgify(olist[x], olist[y]) edges.append(bpy.context.active_object) return edges def piper(xlist): bpy.ops.object.select_all(action='DESELECT') for x", "piper(xlist): bpy.ops.object.select_all(action='DESELECT') for x in xlist: x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def", "bpy.context.selected_objects Edgelist = string(oblist) piper(Edgelist) actob = bpy.context.active_object actob.data.bevel_depth =", "= \"object_stringtool_pt\" bl_label = \"String\" bl_space_type = 'VIEW_3D' bl_region_type =", "\"object.stringtool_ot\" # Unique identifier for buttons and menu items to", "# Enable undo for the operator. bdepth: bpy.props.FloatProperty(name = \"String", "return {'FINISHED'} # Lets Blender know the operator finished successfully.", "buttons. bl_idname = \"object.stringtool_ot\" # Unique identifier for buttons and", "Thickness\", min = 0.1, max = 5, precision = 2", "bpy.ops.object.editmode_toggle() def string(olist): edges = [] l = len(olist) for", "in range(l): for y in range(l): if y != x", "= string(oblist) piper(Edgelist) actob = bpy.context.active_object actob.data.bevel_depth = self.bdepth bpy.ops.object.shade_smooth()", "the button in the UI. layout = self.layout props =", "max = 5, precision = 2 ) def execute(self, context):", "bm.free() bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def string(olist): edges = []", "be used when the user # presses the button in", "= 5, precision = 2 ) def execute(self, context): #", "######################## return {'FINISHED'} # Lets Blender know the operator finished", "successfully. class STRING_PT(bpy.types.Panel): bl_idname = \"object_stringtool_pt\" bl_label = \"String\" bl_space_type", "= bpy.context.active_object actob.data.bevel_depth = self.bdepth bpy.ops.object.shade_smooth() ######################## return {'FINISHED'} #", "def draw(self, context): # You can set the property values", "x in xlist: x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def check(olist): if len(olist)", "= self.bdepth bpy.ops.object.shade_smooth() ######################## return {'FINISHED'} # Lets Blender know", "# Unique identifier for buttons and menu items to reference.", "bpy.ops.mesh.primitive_plane_add(location = (0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating the vertices using", "bpy.context.active_object actob.data.bevel_depth = self.bdepth bpy.ops.object.shade_smooth() ######################## return {'FINISHED'} # Lets", "= {'REGISTER', 'UNDO'} # Enable undo for the operator. bdepth:", "execute(self, context): # The original script #################### #to create an", "check(olist): if len(olist) == 0: self.report({'INFO'},'NONE SELECTED OBJECTS') return 0", "bpy.props.FloatProperty(name = \"String Thickness\", min = 0.1, max = 5,", "the add-on without having to install it. if __name__ ==", "if y != x and x < y : Edgify(olist[x],", "context): # You can set the property values that should", "xlist: x.select_set(True) bpy.ops.object.join() bpy.ops.object.convert(target='CURVE') def check(olist): if len(olist) == 0:", "# Display name in the interface. bl_options = {'REGISTER', 'UNDO'}", "bpy.ops.mesh.edge_face_add() bpy.ops.object.editmode_toggle() def string(olist): edges = [] l = len(olist)", "verts = [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location = (0,0,0)) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.delete(type='VERT') #creating", "= ob1.location loc2 = ob2.location verts = [loc1,loc2] bpy.ops.mesh.primitive_plane_add(location =", "\"String Thickness\", min = 0.1, max = 5, precision =", "editor # to test the add-on without having to install", "2 ) def execute(self, context): # The original script ####################", "layout.operator('object.stringtool_ot') def register(): bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING) # This allows", "if len(olist) == 0: self.report({'INFO'},'NONE SELECTED OBJECTS') return 0 else:", "the script directly from Blender's Text editor # to test", "# Lets Blender know the operator finished successfully. class STRING_PT(bpy.types.Panel):", "\"String\" bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = \"newprop\"", "should be used when the user # presses the button", "the user # presses the button in the UI. layout", "0), \"category\": \"Object\", 'Author' : '<NAME>' } import bpy import", "the operator. bdepth: bpy.props.FloatProperty(name = \"String Thickness\", min = 0.1,", "directly from Blender's Text editor # to test the add-on", "given objects def Edgify(ob1,ob2): loc1 = ob1.location loc2 = ob2.location", "identifier for buttons and menu items to reference. bl_label =" ]
[ "open a XML file that you don't know the model,", "`instance=ClassName(file_path,**options)` For Example to open a XML file that you", "but __NOT__ from Analysis, InstrumentControl or FrontEnds Help ----- <a", "the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All data models normally", "save(), str() and if appropriate show() methods. Examples -------- <a", "you don't know the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All", "href=\"../../../Examples/How_To_Open_S2p.html\"> How to open a s2p file </a> Import Structure", "subpackage is further divided into modules grouped by a common", "by a common theme. Classes for data that are already", "\"\"\" The DataHandlers subpackage is designed to manipulate data, by", "All data models normally have save(), str() and if appropriate", "href=\"../index.html\">`pyMez.Code`</a> <div> <a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> | <a href=\"../../index.html\">API Documentation Home</a>", "divided into modules grouped by a common theme. Classes for", "s2p file </a> Import Structure ---------------- DataHandlers typically import from", "<a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> | <a href=\"../../index.html\">API Documentation Home</a> | <a", "theme. Classes for data that are already on disk normally", "or FrontEnds Help ----- <a href=\"../index.html\">`pyMez.Code`</a> <div> <a href=\"../../../pyMez_Documentation.html\">Documentation Home</a>", "<a href=\"../index.html\">`pyMez.Code`</a> <div> <a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> | <a href=\"../../index.html\">API Documentation", "for data that are already on disk normally follows the", "str() and if appropriate show() methods. Examples -------- <a href=\"../../../Examples/How_To_Open_S2p.html\">", "| <a href=\"../../index.html\">API Documentation Home</a> | <a href=\"../../../Examples/html/Examples_Home.html\">Examples</a> | <a", "are already on disk normally follows the following pattern: `instance=ClassName(file_path,**options)`", "FrontEnds Help ----- <a href=\"../index.html\">`pyMez.Code`</a> <div> <a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> |", "that you don't know the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')`", "data, by allowing different data types to be opened, created,", "data types to be opened, created, saved and updated. The", "<div> <a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> | <a href=\"../../index.html\">API Documentation Home</a> |", "a common theme. Classes for data that are already on", "models normally have save(), str() and if appropriate show() methods.", "to be opened, created, saved and updated. The subpackage is", "href=\"../../index.html\">API Documentation Home</a> | <a href=\"../../../Examples/html/Examples_Home.html\">Examples</a> | <a href=\"../../../Reference_Index.html\">Index </a>", "the following pattern: `instance=ClassName(file_path,**options)` For Example to open a XML", "normally follows the following pattern: `instance=ClassName(file_path,**options)` For Example to open", "How to open a s2p file </a> Import Structure ----------------", "Structure ---------------- DataHandlers typically import from Utils but __NOT__ from", "href=\"../../../pyMez_Documentation.html\">Documentation Home</a> | <a href=\"../../index.html\">API Documentation Home</a> | <a href=\"../../../Examples/html/Examples_Home.html\">Examples</a>", "DataHandlers subpackage is designed to manipulate data, by allowing different", "Classes for data that are already on disk normally follows", "__NOT__ from Analysis, InstrumentControl or FrontEnds Help ----- <a href=\"../index.html\">`pyMez.Code`</a>", "or `xml=XMLBase('MyXML.xml')` All data models normally have save(), str() and", "file </a> Import Structure ---------------- DataHandlers typically import from Utils", "already on disk normally follows the following pattern: `instance=ClassName(file_path,**options)` For", "created, saved and updated. The subpackage is further divided into", "typically import from Utils but __NOT__ from Analysis, InstrumentControl or", "opened, created, saved and updated. The subpackage is further divided", "on disk normally follows the following pattern: `instance=ClassName(file_path,**options)` For Example", "---------------- DataHandlers typically import from Utils but __NOT__ from Analysis,", "data models normally have save(), str() and if appropriate show()", "Home</a> | <a href=\"../../index.html\">API Documentation Home</a> | <a href=\"../../../Examples/html/Examples_Home.html\">Examples</a> |", "into modules grouped by a common theme. Classes for data", "updated. The subpackage is further divided into modules grouped by", "The DataHandlers subpackage is designed to manipulate data, by allowing", "to manipulate data, by allowing different data types to be", "<a href=\"../../index.html\">API Documentation Home</a> | <a href=\"../../../Examples/html/Examples_Home.html\">Examples</a> | <a href=\"../../../Reference_Index.html\">Index", "For Example to open a XML file that you don't", "subpackage is designed to manipulate data, by allowing different data", "to open a XML file that you don't know the", "use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All data models normally have save(),", "data that are already on disk normally follows the following", "don't know the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All data", "saved and updated. The subpackage is further divided into modules", "modules grouped by a common theme. Classes for data that", "designed to manipulate data, by allowing different data types to", "different data types to be opened, created, saved and updated.", "<a href=\"../../../Examples/How_To_Open_S2p.html\"> How to open a s2p file </a> Import", "manipulate data, by allowing different data types to be opened,", "disk normally follows the following pattern: `instance=ClassName(file_path,**options)` For Example to", "Help ----- <a href=\"../index.html\">`pyMez.Code`</a> <div> <a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> | <a", "open a s2p file </a> Import Structure ---------------- DataHandlers typically", "Home</a> | <a href=\"../../../Examples/html/Examples_Home.html\">Examples</a> | <a href=\"../../../Reference_Index.html\">Index </a> </div> \"\"\"", "`xml=XMLBase('MyXML.xml')` All data models normally have save(), str() and if", "be opened, created, saved and updated. The subpackage is further", "and updated. The subpackage is further divided into modules grouped", "further divided into modules grouped by a common theme. Classes", "Documentation Home</a> | <a href=\"../../../Examples/html/Examples_Home.html\">Examples</a> | <a href=\"../../../Reference_Index.html\">Index </a> </div>", "Example to open a XML file that you don't know", "Analysis, InstrumentControl or FrontEnds Help ----- <a href=\"../index.html\">`pyMez.Code`</a> <div> <a", "from Analysis, InstrumentControl or FrontEnds Help ----- <a href=\"../index.html\">`pyMez.Code`</a> <div>", "methods. Examples -------- <a href=\"../../../Examples/How_To_Open_S2p.html\"> How to open a s2p", "have save(), str() and if appropriate show() methods. Examples --------", "know the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All data models", "Utils but __NOT__ from Analysis, InstrumentControl or FrontEnds Help -----", "appropriate show() methods. Examples -------- <a href=\"../../../Examples/How_To_Open_S2p.html\"> How to open", "file that you don't know the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or", "----- <a href=\"../index.html\">`pyMez.Code`</a> <div> <a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> | <a href=\"../../index.html\">API", "show() methods. Examples -------- <a href=\"../../../Examples/How_To_Open_S2p.html\"> How to open a", "following pattern: `instance=ClassName(file_path,**options)` For Example to open a XML file", "a XML file that you don't know the model, use", "and if appropriate show() methods. Examples -------- <a href=\"../../../Examples/How_To_Open_S2p.html\"> How", "types to be opened, created, saved and updated. The subpackage", "a s2p file </a> Import Structure ---------------- DataHandlers typically import", "common theme. Classes for data that are already on disk", "follows the following pattern: `instance=ClassName(file_path,**options)` For Example to open a", "import from Utils but __NOT__ from Analysis, InstrumentControl or FrontEnds", "Examples -------- <a href=\"../../../Examples/How_To_Open_S2p.html\"> How to open a s2p file", "model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All data models normally have", "The subpackage is further divided into modules grouped by a", "by allowing different data types to be opened, created, saved", "grouped by a common theme. Classes for data that are", "that are already on disk normally follows the following pattern:", "from Utils but __NOT__ from Analysis, InstrumentControl or FrontEnds Help", "normally have save(), str() and if appropriate show() methods. Examples", "to open a s2p file </a> Import Structure ---------------- DataHandlers", "if appropriate show() methods. Examples -------- <a href=\"../../../Examples/How_To_Open_S2p.html\"> How to", "pattern: `instance=ClassName(file_path,**options)` For Example to open a XML file that", "InstrumentControl or FrontEnds Help ----- <a href=\"../index.html\">`pyMez.Code`</a> <div> <a href=\"../../../pyMez_Documentation.html\">Documentation", "Import Structure ---------------- DataHandlers typically import from Utils but __NOT__", "XML file that you don't know the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')'", "DataHandlers typically import from Utils but __NOT__ from Analysis, InstrumentControl", "</a> Import Structure ---------------- DataHandlers typically import from Utils but", "is designed to manipulate data, by allowing different data types", "`xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All data models normally have save(), str()", "is further divided into modules grouped by a common theme.", "-------- <a href=\"../../../Examples/How_To_Open_S2p.html\"> How to open a s2p file </a>", "allowing different data types to be opened, created, saved and" ]
[ "backend that does nothing. \"\"\" from djangomail.backends.base import BaseEmailBackend class", "from djangomail.backends.base import BaseEmailBackend class EmailBackend(BaseEmailBackend): def send_messages(self, email_messages): return", "nothing. \"\"\" from djangomail.backends.base import BaseEmailBackend class EmailBackend(BaseEmailBackend): def send_messages(self,", "Dummy email backend that does nothing. \"\"\" from djangomail.backends.base import", "\"\"\" Dummy email backend that does nothing. \"\"\" from djangomail.backends.base", "<reponame>somenzz/djangomail \"\"\" Dummy email backend that does nothing. \"\"\" from", "\"\"\" from djangomail.backends.base import BaseEmailBackend class EmailBackend(BaseEmailBackend): def send_messages(self, email_messages):", "email backend that does nothing. \"\"\" from djangomail.backends.base import BaseEmailBackend", "does nothing. \"\"\" from djangomail.backends.base import BaseEmailBackend class EmailBackend(BaseEmailBackend): def", "that does nothing. \"\"\" from djangomail.backends.base import BaseEmailBackend class EmailBackend(BaseEmailBackend):", "djangomail.backends.base import BaseEmailBackend class EmailBackend(BaseEmailBackend): def send_messages(self, email_messages): return len(list(email_messages))" ]
[ "service_state = \"running\" else: service_state = \"stopped\" services[service_name] = {\"name\":", "line_data[0], \"state\": state_val, \"source\": \"systemd\"} return services def main(): module", "# noqa DOCUMENTATION = ''' --- module: scan_services short_description: Return", "if match_any: # Try extra flags \" -l --allservices\" needed", "rc, stdout, stderr = self.module.run_command(\"%s list-unit-files --type=service | tail -n", "author: <NAME> ''' EXAMPLES = ''' - monit: scan_services #", "= {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} services[service_name] = service_data", "\"source\": \"systemd\"} return services def main(): module = AnsibleModule(argument_spec =", "\"arp-ethers.service\": { # \"source\": \"systemd\", # \"state\": \"stopped\", # \"name\":", "= m.group('name') service_goal = m.group('goal') service_state = m.group('state') if m.group('pid'):", "= False class ServiceScanService(BaseService): def gather_services(self): services = {} service_path", "self.module.get_bin_path(\"chkconfig\") # sysvinit if service_path is not None and chkconfig_path", "m = p.match(line) if not m: continue service_name = m.group('name')", "re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path,", "in f: if 'systemd' in line: return True return False", "information as fact data description: - Return service state information", "# \"name\": \"network\" # }, # \"arp-ethers.service\": { # \"source\":", "\"sysv\"} # Upstart if initctl_path is not None and chkconfig_path", "in (0,): service_state = 'running' #elif rc in (1,3): else:", "% service_path p = re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout, stderr", "= self.module.run_command(\"%s --status-all 2>&1 | grep -E \\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\"", "DOCUMENTATION = ''' --- module: scan_services short_description: Return service state", "}, # \"arp-ethers.service\": { # \"source\": \"systemd\", # \"state\": \"stopped\",", "return None rc, stdout, stderr = self.module.run_command(\"%s list-unit-files --type=service |", "# \"state\": \"running\", # \"name\": \"network\" # }, # \"arp-ethers.service\":", "-n -2\" % systemctl_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data", "= ''' - monit: scan_services # Example fact output: #", "= \" \".join(line_data[3:]) if line_data[1] == \"+\": service_state = \"running\"", "cases where stdout does not fit pattern match_any = False", "% service_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data = line.split()", "self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True) elif '--list' in stderr:", "= p.match(line) if m: service_name = m.group('service') service_state = 'stopped'", "ansible.module_utils.basic import * # noqa DOCUMENTATION = ''' --- module:", "if systemctl_path is None: return None rc, stdout, stderr =", "if m.group('pid'): pid = m.group('pid') else: pid = None #", "stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True) elif '--list'", "in sudoers' in stderr.lower(): self.incomplete_warning = True continue else: service_state", "= dict()) # noqa service_modules = (ServiceScanService, SystemctlScanService) all_services =", "= self.module.get_bin_path(\"initctl\") chkconfig_path = self.module.get_bin_path(\"chkconfig\") # sysvinit if service_path is", "Extra flag needed for RHEL5 rc, stdout, stderr = self.module.run_command('%s", "2>&1 | grep -E \\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\" % service_path, use_unsafe_shell=True)", "= p.match(line) if not m: continue service_name = m.group('name') service_goal", "insufficient privileges.\") else: results = dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg'] =", "init is the systemd command, using comm as cmdline could", "line_data[1] == \"+\": service_state = \"running\" else: service_state = \"stopped\"", "RH sysvinit elif chkconfig_path is not None: #print '%s --status-all", "initctl_path is not None and chkconfig_path is None: p =", "% chkconfig_path, use_unsafe_shell=True) elif '--list' in stderr: # Extra flag", "data description: - Return service state information as fact data", "\"network\" # }, # \"arp-ethers.service\": { # \"source\": \"systemd\", #", "\"upstart\"} services[service_name] = payload # RH sysvinit elif chkconfig_path is", "if p.match(line): match_any = True if not match_any: p_simple =", "rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name),", "4: continue # Skipping because we expected more data service_name", "'root' in stderr or 'permission' in stderr.lower() or 'not in", "\"goal\": service_goal, \"source\": \"upstart\"} services[service_name] = payload # RH sysvinit", "and chkconfig_path is None: p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout, stderr", "= re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout, stderr = self.module.run_command(\"%s list\" % initctl_path)", "* # noqa DOCUMENTATION = ''' --- module: scan_services short_description:", "not None and chkconfig_path is None: p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc,", "None and chkconfig_path is None: p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout,", "if not self.systemd_enabled(): return None systemctl_path = self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"])", "match_any: p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any = False for line in", "p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any = False for line in stdout.split('\\n'):", "def gather_services(self): services = {} service_path = self.module.get_bin_path(\"service\") if service_path", "short_description: Return service state information as fact data description: -", "line in stdout.split('\\n'): if p.match(line): match_any = True if not", "-E \\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\" % service_path, use_unsafe_shell=True) for line in", "True if match_any: # Try extra flags \" -l --allservices\"", "(\\\\+|\\\\-) \\\\]\\\"\" % service_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data", "not None and chkconfig_path is None: rc, stdout, stderr =", "fit pattern match_any = False for line in stdout.split('\\n'): if", "# } # } # } ''' class BaseService(object): def", "= stdout.replace(\"\\r\",\"\") for line in real_stdout.split(\"\\n\"): m = p.match(line) if", "#print '%s --status-all | grep -E \"is (running|stopped)\"' % service_path", "stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True) for line in", "stderr.lower() or 'not in sudoers' in stderr.lower(): self.incomplete_warning = True", "= \"running\" else: service_state = \"stopped\" services[service_name] = {\"name\": service_name,", "'stopped' if m.group('rl3') == 'on': rc, stdout, stderr = self.module.run_command('%s", "{ # \"network\": { # \"source\": \"sysv\", # \"state\": \"running\",", "if service_path is not None and chkconfig_path is None: rc,", "# Skipping because we expected more data service_name = \"", "\"stopped\" services[service_name] = {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} #", "expected more data service_name = \" \".join(line_data[3:]) if line_data[1] ==", "\"WARNING: Could not find status for all services. Sometimes this", "Upstart if initctl_path is not None and chkconfig_path is None:", "state_val = \"running\" else: state_val = \"stopped\" services[line_data[0]] = {\"name\":", "in service_modules: svcmod = svc_module(module) svc = svcmod.gather_services() if svc", "= svc_module(module) svc = svcmod.gather_services() if svc is not None:", "service_data = {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} services[service_name] =", "msg=\"Failed to find any services. Sometimes this is due to", "state information as fact data description: - Return service state", "<filename>awx/plugins/library/scan_services.py #!/usr/bin/env python import re from ansible.module_utils.basic import * #", "state information as fact data for various service management utilities", "if m: service_name = m.group('service') service_state = 'stopped' if m.group('rl3')", "to find any services. Sometimes this is due to insufficient", "elif chkconfig_path is not None: #print '%s --status-all | grep", "class SystemctlScanService(BaseService): def systemd_enabled(self): # Check if init is the", "comm doesn't exist, old kernel, no systemd return False for", "service_state = 'running' #elif rc in (1,3): else: if 'root'", "service_name), use_unsafe_shell=True) service_state = rc if rc in (0,): service_state", "< 4: continue # Skipping because we expected more data", "service_state = 'stopped' service_data = {\"name\": service_name, \"state\": service_state, \"source\":", "\"name\": \"network\" # }, # \"arp-ethers.service\": { # \"source\": \"systemd\",", "p.match(line) if not m: continue service_name = m.group('name') service_goal =", "None # NOQA payload = {\"name\": service_name, \"state\": service_state, \"goal\":", "return None systemctl_path = self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if systemctl_path is", "sysvinit if service_path is not None and chkconfig_path is None:", "= {\"name\": service_name, \"state\": service_state, \"goal\": service_goal, \"source\": \"upstart\"} services[service_name]", "service_state, \"goal\": service_goal, \"source\": \"upstart\"} services[service_name] = payload # RH", "None rc, stdout, stderr = self.module.run_command(\"%s list-unit-files --type=service | tail", "except IOError: # If comm doesn't exist, old kernel, no", "= \"running\" else: state_val = \"stopped\" services[line_data[0]] = {\"name\": line_data[0],", "in real_stdout.split(\"\\n\"): m = p.match(line) if not m: continue service_name", "return False for line in f: if 'systemd' in line:", "data service_name = \" \".join(line_data[3:]) if line_data[1] == \"+\": service_state", "Try extra flags \" -l --allservices\" needed for SLES11 rc,", "service_goal = m.group('goal') service_state = m.group('state') if m.group('pid'): pid =", "= self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True) for line in stdout.split('\\n'):", "# }, # \"arp-ethers.service\": { # \"source\": \"systemd\", # \"state\":", "r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)", "(1,3): else: if 'root' in stderr or 'permission' in stderr.lower()", "use_unsafe_shell=True) elif '--list' in stderr: # Extra flag needed for", "initctl_path) real_stdout = stdout.replace(\"\\r\",\"\") for line in real_stdout.split(\"\\n\"): m =", "\\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\" % service_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"):", "use_unsafe_shell=True) service_state = rc if rc in (0,): service_state =", "output: # host | success >> { # \"ansible_facts\": {", "{ # \"source\": \"systemd\", # \"state\": \"stopped\", # \"name\": \"arp-ethers.service\"", "self.module.run_command(\"%s list-unit-files --type=service | tail -n +2 | head -n", "systemd_enabled(self): # Check if init is the systemd command, using", "for special cases where stdout does not fit pattern match_any", "% initctl_path) real_stdout = stdout.replace(\"\\r\",\"\") for line in real_stdout.split(\"\\n\"): m", "line_data = line.split() if len(line_data) != 2: continue if line_data[1]", "service_path is not None and chkconfig_path is None: rc, stdout,", "stdout.replace(\"\\r\",\"\") for line in real_stdout.split(\"\\n\"): m = p.match(line) if not", "= \"WARNING: Could not find status for all services. Sometimes", "continue service_name = m.group('name') service_goal = m.group('goal') service_state = m.group('state')", "m.group('goal') service_state = m.group('state') if m.group('pid'): pid = m.group('pid') else:", "{\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} # Upstart if initctl_path", "If comm doesn't exist, old kernel, no systemd return False", "opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if systemctl_path is None: return None rc, stdout,", "| success >> { # \"ansible_facts\": { # \"services\": {", "\"running\", # \"name\": \"network\" # }, # \"arp-ethers.service\": { #", "-E \"is (running|stopped)\"' % service_path p = re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)')", "in stderr.lower(): self.incomplete_warning = True continue else: service_state = 'stopped'", "'stopped' service_data = {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} services[service_name]", "service_goal, \"source\": \"upstart\"} services[service_name] = payload # RH sysvinit elif", "main(): module = AnsibleModule(argument_spec = dict()) # noqa service_modules =", "incomplete_warning: results['msg'] = \"WARNING: Could not find status for all", "Return service state information as fact data for various service", "(running|stopped)\"' % service_path p = re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout,", "% (service_path, service_name), use_unsafe_shell=True) service_state = rc if rc in", "gather_services(self): services = {} service_path = self.module.get_bin_path(\"service\") if service_path is", "self.module.get_bin_path(\"initctl\") chkconfig_path = self.module.get_bin_path(\"chkconfig\") # sysvinit if service_path is not", "False for line in stdout.split('\\n'): if p.match(line): match_any = True", "= True continue else: service_state = 'stopped' service_data = {\"name\":", "service_modules = (ServiceScanService, SystemctlScanService) all_services = {} incomplete_warning = False", "# Extra flag needed for RHEL5 rc, stdout, stderr =", "\"+\": service_state = \"running\" else: service_state = \"stopped\" services[service_name] =", "chkconfig_path is None: rc, stdout, stderr = self.module.run_command(\"%s --status-all 2>&1", "systemctl_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data = line.split() if", "stderr = self.module.run_command(\"%s list\" % initctl_path) real_stdout = stdout.replace(\"\\r\",\"\") for", "service state information as fact data for various service management", "chkconfig_path, use_unsafe_shell=True) for line in stdout.split('\\n'): m = p.match(line) if", "stdout, stderr = self.module.run_command(\"%s list-unit-files --type=service | tail -n +2", "utilities version_added: \"1.9\" options: requirements: [ ] author: <NAME> '''", "# } # } ''' class BaseService(object): def __init__(self, module):", "None systemctl_path = self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if systemctl_path is None:", "| head -n -2\" % systemctl_path, use_unsafe_shell=True) for line in", "== 0: results = dict(skipped=True, msg=\"Failed to find any services.", "host | success >> { # \"ansible_facts\": { # \"services\":", "if 'root' in stderr or 'permission' in stderr.lower() or 'not", "AnsibleModule(argument_spec = dict()) # noqa service_modules = (ServiceScanService, SystemctlScanService) all_services", "\"source\": \"sysv\"} services[service_name] = service_data return services class SystemctlScanService(BaseService): def", "match_any = True if not match_any: p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any", "False for svc_module in service_modules: svcmod = svc_module(module) svc =", "service_name = \" \".join(line_data[3:]) if line_data[1] == \"+\": service_state =", "False class ServiceScanService(BaseService): def gather_services(self): services = {} service_path =", "systemctl_path is None: return None rc, stdout, stderr = self.module.run_command(\"%s", "EXAMPLES = ''' - monit: scan_services # Example fact output:", "= False for line in stdout.split('\\n'): if p.match(line): match_any =", "for RHEL5 rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path,", "not fit pattern match_any = False for line in stdout.split('\\n'):", "services = {} service_path = self.module.get_bin_path(\"service\") if service_path is None:", "BaseService(object): def __init__(self, module): self.module = module self.incomplete_warning = False", "# \"services\": { # \"network\": { # \"source\": \"sysv\", #", "grep -E \\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\" % service_path, use_unsafe_shell=True) for line", "= line.split() if len(line_data) < 4: continue # Skipping because", "noqa service_modules = (ServiceScanService, SystemctlScanService) all_services = {} incomplete_warning =", "elif '--list' in stderr: # Extra flag needed for RHEL5", "\"state\": service_state, \"source\": \"sysv\"} # Upstart if initctl_path is not", "if rc in (0,): service_state = 'running' #elif rc in", "p.match(line): match_any = True if not match_any: p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)')", "= True if match_any: # Try extra flags \" -l", "stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True) service_state", "\"sysv\", # \"state\": \"running\", # \"name\": \"network\" # }, #", "find status for all services. Sometimes this is due to", "= self.module.get_bin_path(\"service\") if service_path is None: return None initctl_path =", "service_name = m.group('service') service_state = 'stopped' if m.group('rl3') == 'on':", "line in stdout.split('\\n'): if p_simple.match(line): match_any = True if match_any:", "rc, stdout, stderr = self.module.run_command(\"%s --status-all 2>&1 | grep -E", "flag needed for RHEL5 rc, stdout, stderr = self.module.run_command('%s --list'", "due to insufficient privileges.\") else: results = dict(ansible_facts=dict(services=all_services)) if incomplete_warning:", "[ ] author: <NAME> ''' EXAMPLES = ''' - monit:", "\"state\": state_val, \"source\": \"systemd\"} return services def main(): module =", "find any services. Sometimes this is due to insufficient privileges.\")", "pid = m.group('pid') else: pid = None # NOQA payload", "p = re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout, stderr = self.module.run_command('%s'", "be symlink try: f = open('/proc/1/comm', 'r') except IOError: #", "\".join(line_data[3:]) if line_data[1] == \"+\": service_state = \"running\" else: service_state", ">> { # \"ansible_facts\": { # \"services\": { # \"network\":", "-l --allservices' % chkconfig_path, use_unsafe_shell=True) elif '--list' in stderr: #", "stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True) for line", "incomplete_warning = False for svc_module in service_modules: svcmod = svc_module(module)", "if len(all_services) == 0: results = dict(skipped=True, msg=\"Failed to find", "% systemctl_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data = line.split()", "len(all_services) == 0: results = dict(skipped=True, msg=\"Failed to find any", "using comm as cmdline could be symlink try: f =", "list-unit-files --type=service | tail -n +2 | head -n -2\"", "as fact data description: - Return service state information as", "stdout.split(\"\\n\"): line_data = line.split() if len(line_data) < 4: continue #", "services[service_name] = service_data return services class SystemctlScanService(BaseService): def systemd_enabled(self): #", "stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True) elif", "--type=service | tail -n +2 | head -n -2\" %", "= m.group('goal') service_state = m.group('state') if m.group('pid'): pid = m.group('pid')", "\"services\": { # \"network\": { # \"source\": \"sysv\", # \"state\":", "rc, stdout, stderr = self.module.run_command(\"%s list\" % initctl_path) real_stdout =", "as cmdline could be symlink try: f = open('/proc/1/comm', 'r')", "if svc is not None: all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning =", "= {} service_path = self.module.get_bin_path(\"service\") if service_path is None: return", "# Check if init is the systemd command, using comm", "= re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout, stderr = self.module.run_command('%s' %", "len(line_data) < 4: continue # Skipping because we expected more", "= self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if systemctl_path is None: return None", "state_val = \"stopped\" services[line_data[0]] = {\"name\": line_data[0], \"state\": state_val, \"source\":", "line in stdout.split(\"\\n\"): line_data = line.split() if len(line_data) < 4:", "None: p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout, stderr = self.module.run_command(\"%s list\"", "| grep -E \\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\" % service_path, use_unsafe_shell=True) for", "--list' % chkconfig_path, use_unsafe_shell=True) for line in stdout.split('\\n'): m =", "--status-all | grep -E \"is (running|stopped)\"' % service_path p =", "# Example fact output: # host | success >> {", "Check if init is the systemd command, using comm as", "\"sysv\"} services[service_name] = service_data return services class SystemctlScanService(BaseService): def systemd_enabled(self):", "# sysvinit if service_path is not None and chkconfig_path is", "service_state, \"source\": \"sysv\"} # Upstart if initctl_path is not None", "in stdout.split('\\n'): m = p.match(line) if m: service_name = m.group('service')", "service_modules: svcmod = svc_module(module) svc = svcmod.gather_services() if svc is", "as fact data for various service management utilities version_added: \"1.9\"", "True return False def gather_services(self): services = {} if not", "self.incomplete_warning = False class ServiceScanService(BaseService): def gather_services(self): services = {}", "services def main(): module = AnsibleModule(argument_spec = dict()) # noqa", "self.module.run_command(\"%s list\" % initctl_path) real_stdout = stdout.replace(\"\\r\",\"\") for line in", "None: all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning = True if len(all_services) ==", "'--list' in stderr: # Extra flag needed for RHEL5 rc,", "stdout, stderr = self.module.run_command(\"%s --status-all 2>&1 | grep -E \\\"\\\\[", "None: return None rc, stdout, stderr = self.module.run_command(\"%s list-unit-files --type=service", "= {\"name\": line_data[0], \"state\": state_val, \"source\": \"systemd\"} return services def", "results = dict(skipped=True, msg=\"Failed to find any services. Sometimes this", "\\\\]\\\"\" % service_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data =", "= 'stopped' service_data = {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"}", "service_state, \"source\": \"sysv\"} services[service_name] = service_data return services class SystemctlScanService(BaseService):", "all services. Sometimes this is due to insufficient privileges.\" module.exit_json(**results)", "Return service state information as fact data description: - Return", "stdout.split(\"\\n\"): line_data = line.split() if len(line_data) != 2: continue if", "m: continue service_name = m.group('name') service_goal = m.group('goal') service_state =", "grep -E \"is (running|stopped)\"' % service_path p = re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+'", "self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True) for line in stdout.split('\\n'): m", "line in stdout.split(\"\\n\"): line_data = line.split() if len(line_data) != 2:", "data for various service management utilities version_added: \"1.9\" options: requirements:", "module = AnsibleModule(argument_spec = dict()) # noqa service_modules = (ServiceScanService,", "-l --allservices\" needed for SLES11 rc, stdout, stderr = self.module.run_command('%s", "else: service_state = \"stopped\" services[service_name] = {\"name\": service_name, \"state\": service_state,", "sudoers' in stderr.lower(): self.incomplete_warning = True continue else: service_state =", "stdout.split('\\n'): m = p.match(line) if m: service_name = m.group('service') service_state", "line in real_stdout.split(\"\\n\"): m = p.match(line) if not m: continue", "status for all services. Sometimes this is due to insufficient", "None: rc, stdout, stderr = self.module.run_command(\"%s --status-all 2>&1 | grep", "monit: scan_services # Example fact output: # host | success", "head -n -2\" % systemctl_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"):", "# \"ansible_facts\": { # \"services\": { # \"network\": { #", "pid = None # NOQA payload = {\"name\": service_name, \"state\":", "!= 2: continue if line_data[1] == \"enabled\": state_val = \"running\"", "__init__(self, module): self.module = module self.incomplete_warning = False class ServiceScanService(BaseService):", "] author: <NAME> ''' EXAMPLES = ''' - monit: scan_services", "module self.incomplete_warning = False class ServiceScanService(BaseService): def gather_services(self): services =", "\"state\": service_state, \"source\": \"sysv\"} services[service_name] = service_data return services class", "Check for special cases where stdout does not fit pattern", "= open('/proc/1/comm', 'r') except IOError: # If comm doesn't exist,", "pattern match_any = False for line in stdout.split('\\n'): if p.match(line):", "\"state\": \"stopped\", # \"name\": \"arp-ethers.service\" # } # } #", "# \"name\": \"arp-ethers.service\" # } # } # } '''", "} ''' class BaseService(object): def __init__(self, module): self.module = module", "for line in stdout.split('\\n'): if p_simple.match(line): match_any = True if", "'running' #elif rc in (1,3): else: if 'root' in stderr", "== \"enabled\": state_val = \"running\" else: state_val = \"stopped\" services[line_data[0]]", "where stdout does not fit pattern match_any = False for", "(ServiceScanService, SystemctlScanService) all_services = {} incomplete_warning = False for svc_module", "\"/usr/local/bin\"]) if systemctl_path is None: return None rc, stdout, stderr", "if init is the systemd command, using comm as cmdline", "service_name, \"state\": service_state, \"source\": \"sysv\"} # Upstart if initctl_path is", "is not None and chkconfig_path is None: rc, stdout, stderr", "'not in sudoers' in stderr.lower(): self.incomplete_warning = True continue else:", "needed for RHEL5 rc, stdout, stderr = self.module.run_command('%s --list' %", "various service management utilities version_added: \"1.9\" options: requirements: [ ]", "extra flags \" -l --allservices\" needed for SLES11 rc, stdout,", "noqa DOCUMENTATION = ''' --- module: scan_services short_description: Return service", "tail -n +2 | head -n -2\" % systemctl_path, use_unsafe_shell=True)", "else: service_state = 'stopped' service_data = {\"name\": service_name, \"state\": service_state,", "{} if not self.systemd_enabled(): return None systemctl_path = self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\",", "rc in (0,): service_state = 'running' #elif rc in (1,3):", "we expected more data service_name = \" \".join(line_data[3:]) if line_data[1]", "not None: #print '%s --status-all | grep -E \"is (running|stopped)\"'", "chkconfig_path, use_unsafe_shell=True) elif '--list' in stderr: # Extra flag needed", "rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)", "ServiceScanService(BaseService): def gather_services(self): services = {} service_path = self.module.get_bin_path(\"service\") if", "def __init__(self, module): self.module = module self.incomplete_warning = False class", "# RH sysvinit elif chkconfig_path is not None: #print '%s", "f: if 'systemd' in line: return True return False def", "= self.module.get_bin_path(\"chkconfig\") # sysvinit if service_path is not None and", "payload = {\"name\": service_name, \"state\": service_state, \"goal\": service_goal, \"source\": \"upstart\"}", "# noqa service_modules = (ServiceScanService, SystemctlScanService) all_services = {} incomplete_warning", "exist, old kernel, no systemd return False for line in", "m = p.match(line) if m: service_name = m.group('service') service_state =", "special cases where stdout does not fit pattern match_any =", "all_services = {} incomplete_warning = False for svc_module in service_modules:", "# \"source\": \"sysv\", # \"state\": \"running\", # \"name\": \"network\" #", "m.group('rl3') == 'on': rc, stdout, stderr = self.module.run_command('%s %s status'", "service_name = m.group('name') service_goal = m.group('goal') service_state = m.group('state') if", "stderr = self.module.run_command(\"%s --status-all 2>&1 | grep -E \\\"\\\\[ (\\\\+|\\\\-)", "in stdout.split(\"\\n\"): line_data = line.split() if len(line_data) < 4: continue", "service management utilities version_added: \"1.9\" options: requirements: [ ] author:", "--status-all 2>&1 | grep -E \\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\" % service_path,", "not match_any: p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any = False for line", "# \"network\": { # \"source\": \"sysv\", # \"state\": \"running\", #", "if not match_any: p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any = False for", "= self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True) service_state =", "service_data return services class SystemctlScanService(BaseService): def systemd_enabled(self): # Check if", "rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True) # Check", "\"enabled\": state_val = \"running\" else: state_val = \"stopped\" services[line_data[0]] =", "= ''' --- module: scan_services short_description: Return service state information", "self.module.get_bin_path(\"service\") if service_path is None: return None initctl_path = self.module.get_bin_path(\"initctl\")", "if len(line_data) < 4: continue # Skipping because we expected", "module): self.module = module self.incomplete_warning = False class ServiceScanService(BaseService): def", "{} incomplete_warning = False for svc_module in service_modules: svcmod =", "is None: return None initctl_path = self.module.get_bin_path(\"initctl\") chkconfig_path = self.module.get_bin_path(\"chkconfig\")", "systemd command, using comm as cmdline could be symlink try:", "fact data for various service management utilities version_added: \"1.9\" options:", "dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg'] = \"WARNING: Could not find status", "real_stdout.split(\"\\n\"): m = p.match(line) if not m: continue service_name =", "% chkconfig_path, use_unsafe_shell=True) # Check for special cases where stdout", "does not fit pattern match_any = False for line in", "stdout.split('\\n'): if p_simple.match(line): match_any = True if match_any: # Try", "\" -l --allservices\" needed for SLES11 rc, stdout, stderr =", "stderr or 'permission' in stderr.lower() or 'not in sudoers' in", "cmdline could be symlink try: f = open('/proc/1/comm', 'r') except", "import re from ansible.module_utils.basic import * # noqa DOCUMENTATION =", "chkconfig_path is None: p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout, stderr =", "def gather_services(self): services = {} if not self.systemd_enabled(): return None", "for line in stdout.split(\"\\n\"): line_data = line.split() if len(line_data) !=", "services[service_name] = payload # RH sysvinit elif chkconfig_path is not", "= payload # RH sysvinit elif chkconfig_path is not None:", "use_unsafe_shell=True) # Check for special cases where stdout does not", "if svcmod.incomplete_warning: incomplete_warning = True if len(all_services) == 0: results", "line in stdout.split('\\n'): m = p.match(line) if m: service_name =", "class ServiceScanService(BaseService): def gather_services(self): services = {} service_path = self.module.get_bin_path(\"service\")", "service_path is None: return None initctl_path = self.module.get_bin_path(\"initctl\") chkconfig_path =", "(service_path, service_name), use_unsafe_shell=True) service_state = rc if rc in (0,):", "service_name, \"state\": service_state, \"source\": \"sysv\"} services[service_name] = service_data return services", "if initctl_path is not None and chkconfig_path is None: p", "return False def gather_services(self): services = {} if not self.systemd_enabled():", "SystemctlScanService(BaseService): def systemd_enabled(self): # Check if init is the systemd", "payload # RH sysvinit elif chkconfig_path is not None: #print", "None and chkconfig_path is None: rc, stdout, stderr = self.module.run_command(\"%s", "-n +2 | head -n -2\" % systemctl_path, use_unsafe_shell=True) for", "Example fact output: # host | success >> { #", "else: results = dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg'] = \"WARNING: Could", "{\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} services[service_name] = service_data return", "and chkconfig_path is None: rc, stdout, stderr = self.module.run_command(\"%s --status-all", "class BaseService(object): def __init__(self, module): self.module = module self.incomplete_warning =", "management utilities version_added: \"1.9\" options: requirements: [ ] author: <NAME>", "service_path = self.module.get_bin_path(\"service\") if service_path is None: return None initctl_path", "for line in stdout.split('\\n'): m = p.match(line) if m: service_name", "%s status' % (service_path, service_name), use_unsafe_shell=True) service_state = rc if", "fact data description: - Return service state information as fact", "# If comm doesn't exist, old kernel, no systemd return", "match_any = False for line in stdout.split('\\n'): if p.match(line): match_any", "True if not match_any: p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any = False", "gather_services(self): services = {} if not self.systemd_enabled(): return None systemctl_path", "in stdout.split('\\n'): if p_simple.match(line): match_any = True if match_any: #", "dict(skipped=True, msg=\"Failed to find any services. Sometimes this is due", "to insufficient privileges.\") else: results = dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg']", "\"state\": \"running\", # \"name\": \"network\" # }, # \"arp-ethers.service\": {", "stderr: # Extra flag needed for RHEL5 rc, stdout, stderr", "stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)", "p.match(line) if m: service_name = m.group('service') service_state = 'stopped' if", "if line_data[1] == \"+\": service_state = \"running\" else: service_state =", "'on': rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path,", "import * # noqa DOCUMENTATION = ''' --- module: scan_services", "chkconfig_path is not None: #print '%s --status-all | grep -E", "is None: rc, stdout, stderr = self.module.run_command(\"%s --status-all 2>&1 |", "from ansible.module_utils.basic import * # noqa DOCUMENTATION = ''' ---", "= m.group('service') service_state = 'stopped' if m.group('rl3') == 'on': rc,", "# \"arp-ethers.service\": { # \"source\": \"systemd\", # \"state\": \"stopped\", #", "\"source\": \"systemd\", # \"state\": \"stopped\", # \"name\": \"arp-ethers.service\" # }", "# \"state\": \"stopped\", # \"name\": \"arp-ethers.service\" # } # }", "= svcmod.gather_services() if svc is not None: all_services.update(svc) if svcmod.incomplete_warning:", "chkconfig_path = self.module.get_bin_path(\"chkconfig\") # sysvinit if service_path is not None", "\"running\" else: service_state = \"stopped\" services[service_name] = {\"name\": service_name, \"state\":", "m.group('name') service_goal = m.group('goal') service_state = m.group('state') if m.group('pid'): pid", "open('/proc/1/comm', 'r') except IOError: # If comm doesn't exist, old", "= self.module.run_command(\"%s list-unit-files --type=service | tail -n +2 | head", "rc in (1,3): else: if 'root' in stderr or 'permission'", "line.split() if len(line_data) != 2: continue if line_data[1] == \"enabled\":", "use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data = line.split() if len(line_data)", "| grep -E \"is (running|stopped)\"' % service_path p = re.compile(", "\"source\": \"sysv\"} # Upstart if initctl_path is not None and", "continue else: service_state = 'stopped' service_data = {\"name\": service_name, \"state\":", "IOError: # If comm doesn't exist, old kernel, no systemd", "2: continue if line_data[1] == \"enabled\": state_val = \"running\" else:", "RHEL5 rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)", "= True if not match_any: p_simple = re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any =", "is not None: all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning = True if", "def systemd_enabled(self): # Check if init is the systemd command,", "None: return None initctl_path = self.module.get_bin_path(\"initctl\") chkconfig_path = self.module.get_bin_path(\"chkconfig\") #", "systemd return False for line in f: if 'systemd' in", "\"stopped\", # \"name\": \"arp-ethers.service\" # } # } # }", "\"systemd\", # \"state\": \"stopped\", # \"name\": \"arp-ethers.service\" # } #", "} # } ''' class BaseService(object): def __init__(self, module): self.module", "service_state = rc if rc in (0,): service_state = 'running'", "svcmod.incomplete_warning: incomplete_warning = True if len(all_services) == 0: results =", "continue # Skipping because we expected more data service_name =", "systemctl_path = self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if systemctl_path is None: return", "#elif rc in (1,3): else: if 'root' in stderr or", "comm as cmdline could be symlink try: f = open('/proc/1/comm',", "== 'on': rc, stdout, stderr = self.module.run_command('%s %s status' %", "or 'not in sudoers' in stderr.lower(): self.incomplete_warning = True continue", "- monit: scan_services # Example fact output: # host |", "{ # \"services\": { # \"network\": { # \"source\": \"sysv\",", "f = open('/proc/1/comm', 'r') except IOError: # If comm doesn't", "'systemd' in line: return True return False def gather_services(self): services", "len(line_data) != 2: continue if line_data[1] == \"enabled\": state_val =", "# host | success >> { # \"ansible_facts\": { #", "svcmod.gather_services() if svc is not None: all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning", "(0,): service_state = 'running' #elif rc in (1,3): else: if", "= dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg'] = \"WARNING: Could not find", "'permission' in stderr.lower() or 'not in sudoers' in stderr.lower(): self.incomplete_warning", "\"name\": \"arp-ethers.service\" # } # } # } ''' class", "stderr.lower(): self.incomplete_warning = True continue else: service_state = 'stopped' service_data", "service_state = m.group('state') if m.group('pid'): pid = m.group('pid') else: pid", "is None: return None rc, stdout, stderr = self.module.run_command(\"%s list-unit-files", "# Check for special cases where stdout does not fit", "\"ansible_facts\": { # \"services\": { # \"network\": { # \"source\":", "None initctl_path = self.module.get_bin_path(\"initctl\") chkconfig_path = self.module.get_bin_path(\"chkconfig\") # sysvinit if", "\" \".join(line_data[3:]) if line_data[1] == \"+\": service_state = \"running\" else:", "doesn't exist, old kernel, no systemd return False for line", "description: - Return service state information as fact data for", "= m.group('state') if m.group('pid'): pid = m.group('pid') else: pid =", "= False for line in stdout.split('\\n'): if p_simple.match(line): match_any =", "else: state_val = \"stopped\" services[line_data[0]] = {\"name\": line_data[0], \"state\": state_val,", "service_state = \"stopped\" services[service_name] = {\"name\": service_name, \"state\": service_state, \"source\":", "re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any = False for line in stdout.split('\\n'): if p_simple.match(line):", "m.group('service') service_state = 'stopped' if m.group('rl3') == 'on': rc, stdout,", "\"is (running|stopped)\"' % service_path p = re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc,", "None: #print '%s --status-all | grep -E \"is (running|stopped)\"' %", "or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():", "self.incomplete_warning = True continue else: service_state = 'stopped' service_data =", "for svc_module in service_modules: svcmod = svc_module(module) svc = svcmod.gather_services()", "svc_module in service_modules: svcmod = svc_module(module) svc = svcmod.gather_services() if", "self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True) service_state = rc", "\"stopped\" services[line_data[0]] = {\"name\": line_data[0], \"state\": state_val, \"source\": \"systemd\"} return", "results['msg'] = \"WARNING: Could not find status for all services.", "could be symlink try: f = open('/proc/1/comm', 'r') except IOError:", "more data service_name = \" \".join(line_data[3:]) if line_data[1] == \"+\":", "if p_simple.match(line): match_any = True if match_any: # Try extra", "service_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data = line.split() if", "if not m: continue service_name = m.group('name') service_goal = m.group('goal')", "dict()) # noqa service_modules = (ServiceScanService, SystemctlScanService) all_services = {}", "because we expected more data service_name = \" \".join(line_data[3:]) if", "success >> { # \"ansible_facts\": { # \"services\": { #", "continue if line_data[1] == \"enabled\": state_val = \"running\" else: state_val", "= self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True) # Check for special cases", "symlink try: f = open('/proc/1/comm', 'r') except IOError: # If", "Sometimes this is due to insufficient privileges.\") else: results =", "NOQA payload = {\"name\": service_name, \"state\": service_state, \"goal\": service_goal, \"source\":", "def main(): module = AnsibleModule(argument_spec = dict()) # noqa service_modules", "| tail -n +2 | head -n -2\" % systemctl_path,", "any services. Sometimes this is due to insufficient privileges.\") else:", "initctl_path = self.module.get_bin_path(\"initctl\") chkconfig_path = self.module.get_bin_path(\"chkconfig\") # sysvinit if service_path", "services = {} if not self.systemd_enabled(): return None systemctl_path =", "in stdout.split('\\n'): if p.match(line): match_any = True if not match_any:", "line_data[1] == \"enabled\": state_val = \"running\" else: state_val = \"stopped\"", "r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True) #", "old kernel, no systemd return False for line in f:", "if len(line_data) != 2: continue if line_data[1] == \"enabled\": state_val", "- Return service state information as fact data for various", "\"source\": \"sysv\", # \"state\": \"running\", # \"name\": \"network\" # },", "version_added: \"1.9\" options: requirements: [ ] author: <NAME> ''' EXAMPLES", "for SLES11 rc, stdout, stderr = self.module.run_command('%s -l --allservices' %", "try: f = open('/proc/1/comm', 'r') except IOError: # If comm", "{\"name\": line_data[0], \"state\": state_val, \"source\": \"systemd\"} return services def main():", "results = dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg'] = \"WARNING: Could not", "= 'running' #elif rc in (1,3): else: if 'root' in", "-2\" % systemctl_path, use_unsafe_shell=True) for line in stdout.split(\"\\n\"): line_data =", "not m: continue service_name = m.group('name') service_goal = m.group('goal') service_state", "else: pid = None # NOQA payload = {\"name\": service_name,", "kernel, no systemd return False for line in f: if", "match_any = True if match_any: # Try extra flags \"", "for various service management utilities version_added: \"1.9\" options: requirements: [", "return None initctl_path = self.module.get_bin_path(\"initctl\") chkconfig_path = self.module.get_bin_path(\"chkconfig\") # sysvinit", "\"network\": { # \"source\": \"sysv\", # \"state\": \"running\", # \"name\":", "--allservices' % chkconfig_path, use_unsafe_shell=True) elif '--list' in stderr: # Extra", "service_path p = re.compile( r'(?P<service>.*?)\\s+[0-9]:(?P<rl0>on|off)\\s+[0-9]:(?P<rl1>on|off)\\s+[0-9]:(?P<rl2>on|off)\\s+' r'[0-9]:(?P<rl3>on|off)\\s+[0-9]:(?P<rl4>on|off)\\s+[0-9]:(?P<rl5>on|off)\\s+[0-9]:(?P<rl6>on|off)') rc, stdout, stderr =", "for line in f: if 'systemd' in line: return True", "svc_module(module) svc = svcmod.gather_services() if svc is not None: all_services.update(svc)", "False def gather_services(self): services = {} if not self.systemd_enabled(): return", "return services class SystemctlScanService(BaseService): def systemd_enabled(self): # Check if init", "self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True) # Check for special cases where", "if service_path is None: return None initctl_path = self.module.get_bin_path(\"initctl\") chkconfig_path", "<NAME> ''' EXAMPLES = ''' - monit: scan_services # Example", "{ # \"ansible_facts\": { # \"services\": { # \"network\": {", "list\" % initctl_path) real_stdout = stdout.replace(\"\\r\",\"\") for line in real_stdout.split(\"\\n\"):", "not self.systemd_enabled(): return None systemctl_path = self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if", "self.module = module self.incomplete_warning = False class ServiceScanService(BaseService): def gather_services(self):", "\"state\": service_state, \"goal\": service_goal, \"source\": \"upstart\"} services[service_name] = payload #", "match_any = False for line in stdout.split('\\n'): if p_simple.match(line): match_any", "svc = svcmod.gather_services() if svc is not None: all_services.update(svc) if", "= self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True) elif '--list' in", "p_simple.match(line): match_any = True if match_any: # Try extra flags", "match_any: # Try extra flags \" -l --allservices\" needed for", "rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True) for", "''' --- module: scan_services short_description: Return service state information as", "# Upstart if initctl_path is not None and chkconfig_path is", "m.group('pid') else: pid = None # NOQA payload = {\"name\":", "\"1.9\" options: requirements: [ ] author: <NAME> ''' EXAMPLES =", "for line in stdout.split('\\n'): if p.match(line): match_any = True if", "requirements: [ ] author: <NAME> ''' EXAMPLES = ''' -", "status' % (service_path, service_name), use_unsafe_shell=True) service_state = rc if rc", "service state information as fact data description: - Return service", "for line in real_stdout.split(\"\\n\"): m = p.match(line) if not m:", "{} service_path = self.module.get_bin_path(\"service\") if service_path is None: return None", "no systemd return False for line in f: if 'systemd'", "\"systemd\"} return services def main(): module = AnsibleModule(argument_spec = dict())", "= m.group('pid') else: pid = None # NOQA payload =", "= False for svc_module in service_modules: svcmod = svc_module(module) svc", "for line in stdout.split(\"\\n\"): line_data = line.split() if len(line_data) <", "= \"stopped\" services[service_name] = {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"}", "rc if rc in (0,): service_state = 'running' #elif rc", "''' class BaseService(object): def __init__(self, module): self.module = module self.incomplete_warning", "= 'stopped' if m.group('rl3') == 'on': rc, stdout, stderr =", "False for line in stdout.split('\\n'): if p_simple.match(line): match_any = True", "scan_services # Example fact output: # host | success >>", "stdout does not fit pattern match_any = False for line", "services[line_data[0]] = {\"name\": line_data[0], \"state\": state_val, \"source\": \"systemd\"} return services", "stdout.split('\\n'): if p.match(line): match_any = True if not match_any: p_simple", "re from ansible.module_utils.basic import * # noqa DOCUMENTATION = '''", "= {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} # Upstart if", "stderr = self.module.run_command(\"%s list-unit-files --type=service | tail -n +2 |", "line in f: if 'systemd' in line: return True return", "--- module: scan_services short_description: Return service state information as fact", "state_val, \"source\": \"systemd\"} return services def main(): module = AnsibleModule(argument_spec", "== \"+\": service_state = \"running\" else: service_state = \"stopped\" services[service_name]", "in line: return True return False def gather_services(self): services =", "options: requirements: [ ] author: <NAME> ''' EXAMPLES = '''", "use_unsafe_shell=True) for line in stdout.split('\\n'): m = p.match(line) if m:", "self.systemd_enabled(): return None systemctl_path = self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if systemctl_path", "return services def main(): module = AnsibleModule(argument_spec = dict()) #", "line_data = line.split() if len(line_data) < 4: continue # Skipping", "incomplete_warning = True if len(all_services) == 0: results = dict(skipped=True,", "privileges.\") else: results = dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg'] = \"WARNING:", "service_name, \"state\": service_state, \"goal\": service_goal, \"source\": \"upstart\"} services[service_name] = payload", "''' - monit: scan_services # Example fact output: # host", "re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout, stderr = self.module.run_command(\"%s list\" % initctl_path) real_stdout", "self.module.get_bin_path(\"systemctl\", opt_dirs=[\"/usr/bin\", \"/usr/local/bin\"]) if systemctl_path is None: return None rc,", "0: results = dict(skipped=True, msg=\"Failed to find any services. Sometimes", "if 'systemd' in line: return True return False def gather_services(self):", "+2 | head -n -2\" % systemctl_path, use_unsafe_shell=True) for line", "scan_services short_description: Return service state information as fact data description:", "services. Sometimes this is due to insufficient privileges.\") else: results", "the systemd command, using comm as cmdline could be symlink", "# NOQA payload = {\"name\": service_name, \"state\": service_state, \"goal\": service_goal,", "# Try extra flags \" -l --allservices\" needed for SLES11", "\"source\": \"upstart\"} services[service_name] = payload # RH sysvinit elif chkconfig_path", "= line.split() if len(line_data) != 2: continue if line_data[1] ==", "{ # \"source\": \"sysv\", # \"state\": \"running\", # \"name\": \"network\"", "stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True) # Check for special", "'r') except IOError: # If comm doesn't exist, old kernel,", "not None: all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning = True if len(all_services)", "} # } # } ''' class BaseService(object): def __init__(self,", "chkconfig_path, use_unsafe_shell=True) # Check for special cases where stdout does", "information as fact data for various service management utilities version_added:", "services class SystemctlScanService(BaseService): def systemd_enabled(self): # Check if init is", "is the systemd command, using comm as cmdline could be", "True continue else: service_state = 'stopped' service_data = {\"name\": service_name,", "is not None and chkconfig_path is None: p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$')", "python import re from ansible.module_utils.basic import * # noqa DOCUMENTATION", "# \"source\": \"systemd\", # \"state\": \"stopped\", # \"name\": \"arp-ethers.service\" #", "sysvinit elif chkconfig_path is not None: #print '%s --status-all |", "= {} if not self.systemd_enabled(): return None systemctl_path = self.module.get_bin_path(\"systemctl\",", "''' EXAMPLES = ''' - monit: scan_services # Example fact", "if line_data[1] == \"enabled\": state_val = \"running\" else: state_val =", "is due to insufficient privileges.\") else: results = dict(ansible_facts=dict(services=all_services)) if", "services[service_name] = {\"name\": service_name, \"state\": service_state, \"source\": \"sysv\"} # Upstart", "all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning = True if len(all_services) == 0:", "True if len(all_services) == 0: results = dict(skipped=True, msg=\"Failed to", "SystemctlScanService) all_services = {} incomplete_warning = False for svc_module in", "False for line in f: if 'systemd' in line: return", "in stderr.lower() or 'not in sudoers' in stderr.lower(): self.incomplete_warning =", "= self.module.run_command(\"%s list\" % initctl_path) real_stdout = stdout.replace(\"\\r\",\"\") for line", "for all services. Sometimes this is due to insufficient privileges.\"", "is None: p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout, stderr = self.module.run_command(\"%s", "in (1,3): else: if 'root' in stderr or 'permission' in", "is not None: #print '%s --status-all | grep -E \"is", "SLES11 rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path,", "module: scan_services short_description: Return service state information as fact data", "p = re.compile(r'^\\s?(?P<name>.*)\\s(?P<goal>\\w+)\\/(?P<state>\\w+)(\\,\\sprocess\\s(?P<pid>[0-9]+))?\\s*$') rc, stdout, stderr = self.module.run_command(\"%s list\" %", "stdout, stderr = self.module.run_command(\"%s list\" % initctl_path) real_stdout = stdout.replace(\"\\r\",\"\")", "command, using comm as cmdline could be symlink try: f", "stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True) # Check for", "--allservices\" needed for SLES11 rc, stdout, stderr = self.module.run_command('%s -l", "Could not find status for all services. Sometimes this is", "needed for SLES11 rc, stdout, stderr = self.module.run_command('%s -l --allservices'", "self.module.run_command(\"%s --status-all 2>&1 | grep -E \\\"\\\\[ (\\\\+|\\\\-) \\\\]\\\"\" %", "= module self.incomplete_warning = False class ServiceScanService(BaseService): def gather_services(self): services", "\"arp-ethers.service\" # } # } # } ''' class BaseService(object):", "#!/usr/bin/env python import re from ansible.module_utils.basic import * # noqa", "= None # NOQA payload = {\"name\": service_name, \"state\": service_state,", "flags \" -l --allservices\" needed for SLES11 rc, stdout, stderr", "not find status for all services. Sometimes this is due", "else: if 'root' in stderr or 'permission' in stderr.lower() or", "{\"name\": service_name, \"state\": service_state, \"goal\": service_goal, \"source\": \"upstart\"} services[service_name] =", "= True if len(all_services) == 0: results = dict(skipped=True, msg=\"Failed", "fact output: # host | success >> { # \"ansible_facts\":", "if m.group('rl3') == 'on': rc, stdout, stderr = self.module.run_command('%s %s", "svc is not None: all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning = True", "= rc if rc in (0,): service_state = 'running' #elif", "return True return False def gather_services(self): services = {} if", "= \"stopped\" services[line_data[0]] = {\"name\": line_data[0], \"state\": state_val, \"source\": \"systemd\"}", "services. Sometimes this is due to insufficient privileges.\" module.exit_json(**results) main()", "m: service_name = m.group('service') service_state = 'stopped' if m.group('rl3') ==", "= (ServiceScanService, SystemctlScanService) all_services = {} incomplete_warning = False for", "service_state = 'stopped' if m.group('rl3') == 'on': rc, stdout, stderr", "m.group('state') if m.group('pid'): pid = m.group('pid') else: pid = None", "m.group('pid'): pid = m.group('pid') else: pid = None # NOQA", "= re.compile(r'(?P<service>.*?)\\s+(?P<rl0>on|off)') match_any = False for line in stdout.split('\\n'): if", "line: return True return False def gather_services(self): services = {}", "\"running\" else: state_val = \"stopped\" services[line_data[0]] = {\"name\": line_data[0], \"state\":", "in stderr or 'permission' in stderr.lower() or 'not in sudoers'", "this is due to insufficient privileges.\") else: results = dict(ansible_facts=dict(services=all_services))", "= AnsibleModule(argument_spec = dict()) # noqa service_modules = (ServiceScanService, SystemctlScanService)", "'%s --status-all | grep -E \"is (running|stopped)\"' % service_path p", "in stderr: # Extra flag needed for RHEL5 rc, stdout,", "= {} incomplete_warning = False for svc_module in service_modules: svcmod", "if incomplete_warning: results['msg'] = \"WARNING: Could not find status for", "line.split() if len(line_data) < 4: continue # Skipping because we", "= service_data return services class SystemctlScanService(BaseService): def systemd_enabled(self): # Check", "in stdout.split(\"\\n\"): line_data = line.split() if len(line_data) != 2: continue", "# } ''' class BaseService(object): def __init__(self, module): self.module =", "% chkconfig_path, use_unsafe_shell=True) for line in stdout.split('\\n'): m = p.match(line)", "real_stdout = stdout.replace(\"\\r\",\"\") for line in real_stdout.split(\"\\n\"): m = p.match(line)", "= dict(skipped=True, msg=\"Failed to find any services. Sometimes this is", "svcmod = svc_module(module) svc = svcmod.gather_services() if svc is not", "Skipping because we expected more data service_name = \" \".join(line_data[3:])" ]
[ "test_run_id = run.get(\"test_run_id\") if not test_run_id in results: result =", "trigger_url = sys.argv[1] trigger_resp = requests.get(trigger_url) if trigger_resp.ok: trigger_json =", "fail_count)) exit(1) print (\"All test runs passed.\") def _get_result(test_run): #", "result.get(\"result\") in [\"pass\", \"fail\"]: results[test_run_id] = result pass_count = sum([r.get(\"result\")", "the environment variable RUNSCOPE_ACCESS_TOKEN. You can get an access token", "in [\"pass\", \"fail\"]: results[test_run_id] = result pass_count = sum([r.get(\"result\") ==", "} result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting result: {}\".format(result_url)) headers =", "for r in results.values()]) fail_count = sum([r.get(\"result\") == \"fail\" for", "passed.\") def _get_result(test_run): # generate Personal Access Token at https://www.runscope.com/applications", "in results.values()]) if fail_count > 0: print (\"{} test runs", "def main(): trigger_url = sys.argv[1] trigger_resp = requests.get(trigger_url) if trigger_resp.ok:", "if not \"RUNSCOPE_ACCESS_TOKEN\" in os.environ: print (\"Please set the environment", "token by going to https://www.runscope.com/applications\") exit(1) API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts", "results[test_run_id] = result pass_count = sum([r.get(\"result\") == \"pass\" for r", "{} test runs.\".format(len(test_runs))) results = {} while len(results.keys()) < len(test_runs):", "Personal Access Token at https://www.runscope.com/applications if not \"RUNSCOPE_ACCESS_TOKEN\" in os.environ:", "set the environment variable RUNSCOPE_ACCESS_TOKEN. You can get an access", "result pass_count = sum([r.get(\"result\") == \"pass\" for r in results.values()])", "_get_result(test_run): # generate Personal Access Token at https://www.runscope.com/applications if not", "} result_resp = requests.get(result_url, headers=headers) if result_resp.ok: return result_resp.json().get(\"data\") return", "\"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting result: {}\".format(result_url)) headers = { \"Authorization\": \"Bearer", "print (\"Getting result: {}\".format(result_url)) headers = { \"Authorization\": \"Bearer {}\".format(API_TOKEN),", "import time import os def main(): trigger_url = sys.argv[1] trigger_resp", "{} while len(results.keys()) < len(test_runs): time.sleep(1) for run in test_runs:", "\"pass\" for r in results.values()]) fail_count = sum([r.get(\"result\") == \"fail\"", "test runs failed.\".format(pass_count, fail_count)) exit(1) print (\"All test runs passed.\")", "len(results.keys()) < len(test_runs): time.sleep(1) for run in test_runs: test_run_id =", "import os def main(): trigger_url = sys.argv[1] trigger_resp = requests.get(trigger_url)", "for r in results.values()]) if fail_count > 0: print (\"{}", "(\"Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You can get an", "results: result = _get_result(run) if result.get(\"result\") in [\"pass\", \"fail\"]: results[test_run_id]", "{}\".format(result_url)) headers = { \"Authorization\": \"Bearer {}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\" }", "requests import sys import time import os def main(): trigger_url", "os.environ: print (\"Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You can", "an access token by going to https://www.runscope.com/applications\") exit(1) API_TOKEN =", "{ \"Authorization\": \"Bearer {}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\" } result_resp = requests.get(result_url,", "= run.get(\"test_run_id\") if not test_run_id in results: result = _get_result(run)", "in os.environ: print (\"Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You", "environment variable RUNSCOPE_ACCESS_TOKEN. You can get an access token by", "(\"All test runs passed.\") def _get_result(test_run): # generate Personal Access", "< len(test_runs): time.sleep(1) for run in test_runs: test_run_id = run.get(\"test_run_id\")", "os def main(): trigger_url = sys.argv[1] trigger_resp = requests.get(trigger_url) if", "\"fail\" for r in results.values()]) if fail_count > 0: print", "sys import time import os def main(): trigger_url = sys.argv[1]", "can get an access token by going to https://www.runscope.com/applications\") exit(1)", "runs passed.\") def _get_result(test_run): # generate Personal Access Token at", "Token at https://www.runscope.com/applications if not \"RUNSCOPE_ACCESS_TOKEN\" in os.environ: print (\"Please", "= sys.argv[1] trigger_resp = requests.get(trigger_url) if trigger_resp.ok: trigger_json = trigger_resp.json().get(\"data\",", "main(): trigger_url = sys.argv[1] trigger_resp = requests.get(trigger_url) if trigger_resp.ok: trigger_json", "= trigger_json.get(\"runs\", []) print (\"Started {} test runs.\".format(len(test_runs))) results =", "exit(1) API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts = { \"base_url\": \"https://api.runscope.com\", \"bucket_key\":", "result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting result: {}\".format(result_url)) headers = {", "at https://www.runscope.com/applications if not \"RUNSCOPE_ACCESS_TOKEN\" in os.environ: print (\"Please set", "\"python-trigger-sample\" } result_resp = requests.get(result_url, headers=headers) if result_resp.ok: return result_resp.json().get(\"data\")", "(\"Getting result: {}\".format(result_url)) headers = { \"Authorization\": \"Bearer {}\".format(API_TOKEN), \"User-Agent\":", "\"test_run_id\": test_run.get(\"test_run_id\") } result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting result: {}\".format(result_url))", "test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\") } result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting result:", "for run in test_runs: test_run_id = run.get(\"test_run_id\") if not test_run_id", "passed. {} test runs failed.\".format(pass_count, fail_count)) exit(1) print (\"All test", "print (\"Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You can get", "headers = { \"Authorization\": \"Bearer {}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\" } result_resp", "test_runs = trigger_json.get(\"runs\", []) print (\"Started {} test runs.\".format(len(test_runs))) results", "[]) print (\"Started {} test runs.\".format(len(test_runs))) results = {} while", "if trigger_resp.ok: trigger_json = trigger_resp.json().get(\"data\", {}) test_runs = trigger_json.get(\"runs\", [])", "0: print (\"{} test runs passed. {} test runs failed.\".format(pass_count,", "= os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts = { \"base_url\": \"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\":", "= {} while len(results.keys()) < len(test_runs): time.sleep(1) for run in", "if result.get(\"result\") in [\"pass\", \"fail\"]: results[test_run_id] = result pass_count =", "You can get an access token by going to https://www.runscope.com/applications\")", "time import os def main(): trigger_url = sys.argv[1] trigger_resp =", "(\"{} test runs passed. {} test runs failed.\".format(pass_count, fail_count)) exit(1)", "if not test_run_id in results: result = _get_result(run) if result.get(\"result\")", "runs.\".format(len(test_runs))) results = {} while len(results.keys()) < len(test_runs): time.sleep(1) for", "not test_run_id in results: result = _get_result(run) if result.get(\"result\") in", "def _get_result(test_run): # generate Personal Access Token at https://www.runscope.com/applications if", "len(test_runs): time.sleep(1) for run in test_runs: test_run_id = run.get(\"test_run_id\") if", "run.get(\"test_run_id\") if not test_run_id in results: result = _get_result(run) if", "runs passed. {} test runs failed.\".format(pass_count, fail_count)) exit(1) print (\"All", "Access Token at https://www.runscope.com/applications if not \"RUNSCOPE_ACCESS_TOKEN\" in os.environ: print", "= \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting result: {}\".format(result_url)) headers = { \"Authorization\":", "fail_count = sum([r.get(\"result\") == \"fail\" for r in results.values()]) if", "= sum([r.get(\"result\") == \"pass\" for r in results.values()]) fail_count =", "\"fail\"]: results[test_run_id] = result pass_count = sum([r.get(\"result\") == \"pass\" for", "\"RUNSCOPE_ACCESS_TOKEN\" in os.environ: print (\"Please set the environment variable RUNSCOPE_ACCESS_TOKEN.", "pass_count = sum([r.get(\"result\") == \"pass\" for r in results.values()]) fail_count", "# generate Personal Access Token at https://www.runscope.com/applications if not \"RUNSCOPE_ACCESS_TOKEN\"", "= requests.get(result_url, headers=headers) if result_resp.ok: return result_resp.json().get(\"data\") return None if", "\"Bearer {}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\" } result_resp = requests.get(result_url, headers=headers) if", "os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts = { \"base_url\": \"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"),", "result: {}\".format(result_url)) headers = { \"Authorization\": \"Bearer {}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\"", "\"User-Agent\": \"python-trigger-sample\" } result_resp = requests.get(result_url, headers=headers) if result_resp.ok: return", "results = {} while len(results.keys()) < len(test_runs): time.sleep(1) for run", "in results: result = _get_result(run) if result.get(\"result\") in [\"pass\", \"fail\"]:", "sum([r.get(\"result\") == \"fail\" for r in results.values()]) if fail_count >", "\"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\") } result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts)", "sum([r.get(\"result\") == \"pass\" for r in results.values()]) fail_count = sum([r.get(\"result\")", "headers=headers) if result_resp.ok: return result_resp.json().get(\"data\") return None if __name__ ==", "run in test_runs: test_run_id = run.get(\"test_run_id\") if not test_run_id in", "variable RUNSCOPE_ACCESS_TOKEN. You can get an access token by going", "test runs.\".format(len(test_runs))) results = {} while len(results.keys()) < len(test_runs): time.sleep(1)", "print (\"Started {} test runs.\".format(len(test_runs))) results = {} while len(results.keys())", "API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts = { \"base_url\": \"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"),", "== \"fail\" for r in results.values()]) if fail_count > 0:", "https://www.runscope.com/applications\") exit(1) API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts = { \"base_url\": \"https://api.runscope.com\",", "runs failed.\".format(pass_count, fail_count)) exit(1) print (\"All test runs passed.\") def", "result_resp = requests.get(result_url, headers=headers) if result_resp.ok: return result_resp.json().get(\"data\") return None", "failed.\".format(pass_count, fail_count)) exit(1) print (\"All test runs passed.\") def _get_result(test_run):", "{}) test_runs = trigger_json.get(\"runs\", []) print (\"Started {} test runs.\".format(len(test_runs)))", "requests.get(trigger_url) if trigger_resp.ok: trigger_json = trigger_resp.json().get(\"data\", {}) test_runs = trigger_json.get(\"runs\",", "generate Personal Access Token at https://www.runscope.com/applications if not \"RUNSCOPE_ACCESS_TOKEN\" in", "\"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\") } result_url =", "import sys import time import os def main(): trigger_url =", "trigger_resp.ok: trigger_json = trigger_resp.json().get(\"data\", {}) test_runs = trigger_json.get(\"runs\", []) print", "test_run_id in results: result = _get_result(run) if result.get(\"result\") in [\"pass\",", "print (\"{} test runs passed. {} test runs failed.\".format(pass_count, fail_count))", "results.values()]) if fail_count > 0: print (\"{} test runs passed.", "exit(1) print (\"All test runs passed.\") def _get_result(test_run): # generate", "test_run.get(\"test_run_id\") } result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting result: {}\".format(result_url)) headers", "> 0: print (\"{} test runs passed. {} test runs", "print (\"All test runs passed.\") def _get_result(test_run): # generate Personal", "result = _get_result(run) if result.get(\"result\") in [\"pass\", \"fail\"]: results[test_run_id] =", "== \"pass\" for r in results.values()]) fail_count = sum([r.get(\"result\") ==", "= trigger_resp.json().get(\"data\", {}) test_runs = trigger_json.get(\"runs\", []) print (\"Started {}", "by going to https://www.runscope.com/applications\") exit(1) API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts =", "going to https://www.runscope.com/applications\") exit(1) API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts = {", "{ \"base_url\": \"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\") }", "[\"pass\", \"fail\"]: results[test_run_id] = result pass_count = sum([r.get(\"result\") == \"pass\"", "= { \"Authorization\": \"Bearer {}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\" } result_resp =", "\"Authorization\": \"Bearer {}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\" } result_resp = requests.get(result_url, headers=headers)", "get an access token by going to https://www.runscope.com/applications\") exit(1) API_TOKEN", "{}\".format(API_TOKEN), \"User-Agent\": \"python-trigger-sample\" } result_resp = requests.get(result_url, headers=headers) if result_resp.ok:", "if result_resp.ok: return result_resp.json().get(\"data\") return None if __name__ == '__main__':", "= requests.get(trigger_url) if trigger_resp.ok: trigger_json = trigger_resp.json().get(\"data\", {}) test_runs =", "\"base_url\": \"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\") } result_url", "in test_runs: test_run_id = run.get(\"test_run_id\") if not test_run_id in results:", "opts = { \"base_url\": \"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"), \"test_run_id\":", "while len(results.keys()) < len(test_runs): time.sleep(1) for run in test_runs: test_run_id", "if fail_count > 0: print (\"{} test runs passed. {}", "test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\") } result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print", "results.values()]) fail_count = sum([r.get(\"result\") == \"fail\" for r in results.values()])", "RUNSCOPE_ACCESS_TOKEN. You can get an access token by going to", "sys.argv[1] trigger_resp = requests.get(trigger_url) if trigger_resp.ok: trigger_json = trigger_resp.json().get(\"data\", {})", "test runs passed.\") def _get_result(test_run): # generate Personal Access Token", "import requests import sys import time import os def main():", "test_runs: test_run_id = run.get(\"test_run_id\") if not test_run_id in results: result", "requests.get(result_url, headers=headers) if result_resp.ok: return result_resp.json().get(\"data\") return None if __name__", "{} test runs failed.\".format(pass_count, fail_count)) exit(1) print (\"All test runs", "to https://www.runscope.com/applications\") exit(1) API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"] opts = { \"base_url\":", "trigger_resp = requests.get(trigger_url) if trigger_resp.ok: trigger_json = trigger_resp.json().get(\"data\", {}) test_runs", "trigger_resp.json().get(\"data\", {}) test_runs = trigger_json.get(\"runs\", []) print (\"Started {} test", "fail_count > 0: print (\"{} test runs passed. {} test", "= sum([r.get(\"result\") == \"fail\" for r in results.values()]) if fail_count", "= { \"base_url\": \"https://api.runscope.com\", \"bucket_key\": test_run.get(\"bucket_key\"), \"test_id\": test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\")", "\"test_id\": test_run.get(\"test_id\"), \"test_run_id\": test_run.get(\"test_run_id\") } result_url = \"{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}\".format(**opts) print (\"Getting", "= _get_result(run) if result.get(\"result\") in [\"pass\", \"fail\"]: results[test_run_id] = result", "trigger_json = trigger_resp.json().get(\"data\", {}) test_runs = trigger_json.get(\"runs\", []) print (\"Started", "test runs passed. {} test runs failed.\".format(pass_count, fail_count)) exit(1) print", "_get_result(run) if result.get(\"result\") in [\"pass\", \"fail\"]: results[test_run_id] = result pass_count", "in results.values()]) fail_count = sum([r.get(\"result\") == \"fail\" for r in", "https://www.runscope.com/applications if not \"RUNSCOPE_ACCESS_TOKEN\" in os.environ: print (\"Please set the", "result_resp.ok: return result_resp.json().get(\"data\") return None if __name__ == '__main__': main()", "r in results.values()]) if fail_count > 0: print (\"{} test", "= result pass_count = sum([r.get(\"result\") == \"pass\" for r in", "trigger_json.get(\"runs\", []) print (\"Started {} test runs.\".format(len(test_runs))) results = {}", "access token by going to https://www.runscope.com/applications\") exit(1) API_TOKEN = os.environ[\"RUNSCOPE_ACCESS_TOKEN\"]", "(\"Started {} test runs.\".format(len(test_runs))) results = {} while len(results.keys()) <", "time.sleep(1) for run in test_runs: test_run_id = run.get(\"test_run_id\") if not", "r in results.values()]) fail_count = sum([r.get(\"result\") == \"fail\" for r", "not \"RUNSCOPE_ACCESS_TOKEN\" in os.environ: print (\"Please set the environment variable" ]
[ "Optional[str] = search_id self.results: List[CVE] = results class EmailsSearchResults: def", "response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, search_id=response.data.search_id) def get_ip_details(self, ip: str) ->", "search_id self.results: List[AS] = results class IPSearchResults: def __init__(self, results:", "api_token, base_url=DEFAULT_BASE_URL): self.session = requests.Session() self.session.headers.update({'Authorization': 'Bearer ' + api_token})", "as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def get_domain_details(self, domain_name: str) -> Optional[Domain]:", "self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() certs = list() for r in", "response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, search_id=response.data.search_id) def search_historical_dns(self, dns_type, domain_name: str,", "offset: int = 0) \\ -> HistoricalDNSSearchResults: \"\"\" Returns the", "-> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(), \"limit\":", "Pro subscription \"\"\" response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() domains", "Spyse Pro subscription \"\"\" response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors()", "search results that matched the search query.\"\"\" response = self.__search('{}/certificate/search/count'.format(self.base_url),", "for r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, response.data.total_items) def count_ip(self,", "hosts that matched the search query. Allows getting only the", "search_domains(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int =", "SearchQuery, scroll_id: str = None) -> IPSearchResults: \"\"\" Returns a", "self.search_id: Optional[str] = search_id self.results: List[Domain] = results class AutonomousSystemsSearchResults:", "Response.from_dict(self.session.get(endpoint).json()) def __search(self, endpoint, query: SearchQuery, lim: int = MAX_LIMIT,", "def search_historical_whois(self, domain_name: str, limit: int = MAX_LIMIT, offset: int", "str) -> Optional[Email]: \"\"\"Returns details about email\"\"\" response = self.__get('{}/email/{}'.format(self.base_url,", "offset: int = 0) -> EmailsSearchResults: \"\"\" Returns a list", "def __init__(self, results: List[Domain], total_items: int = None, search_id: str", "int) -> Optional[AS]: \"\"\"Returns details about an autonomous system by", "if len(response.data.items) > 0 else None def search_cve(self, query: SearchQuery,", "0 else None def search_emails(self, query: SearchQuery, limit: int =", "Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_certificate(self, query:", "results class IPSearchResults: def __init__(self, results: List[IP], total_items: int =", "limit, offset) response.check_errors() certs = list() for r in response.data.items:", "None) -> EmailsSearchResults: \"\"\" Returns a list of emails that", "consume=1): if search_id: body = {\"search_params\": query.get(), \"search_id\": search_id} else:", "int = 0) -> IPSearchResults: \"\"\" Returns a list of", "fingerprint_sha256)) response.check_errors() return Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None", "query, limit, offset) response.check_errors() certs = list() for r in", "List[IP] = results class CertificatesSearchResults: def __init__(self, results: List[Certificate], total_items:", "-> IPSearchResults: \"\"\" Returns a list of IPv4 hosts that", "= requests.Session() self.session.headers.update({'Authorization': 'Bearer ' + api_token}) self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url", "= None) -> DomainsSearchResults: \"\"\" Returns a list of domains", "= list() for r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, search_id=response.data.search_id)", "certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, response.data.total_items) def count_certificate(self, query: SearchQuery) -> int:", "List[Domain], total_items: int = None, search_id: str = None): self.total_items:", "results. \"\"\" response = self.__search('{}/as/search'.format(self.base_url), query, limit, offset) response.check_errors() as_list", "\"\"\" response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() domains = list()", "def __init__(self, results: List[DNSHistoricalRecord], total_items: int = None, search_id: str", "def set_user_agent(self, s: str): self.session.headers.update({'User-Agent': s}) def get_quotas(self) -> Optional[Account]:", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[AS]", "Optional[Certificate]: \"\"\"Returns details about SSL/TLS certificate\"\"\" response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256))", "response.data.total_items def scroll_ip(self, query: SearchQuery, scroll_id: str = None) ->", "results that matched the search query.\"\"\" response = self.__search('{}/cve/search/count'.format(self.base_url), query)", "Optional[str] = search_id self.results: List[WHOISHistoricalRecord] = results class Client: DEFAULT_BASE_URL", "Optional[Email]: \"\"\"Returns details about email\"\"\" response = self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors()", "= self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors() return Email.from_dict(response.data.items[0]) if len(response.data.items) > 0", "None) -> IPSearchResults: \"\"\" Returns a list of IPv4 hosts", "for the given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors()", "MAX_LIMIT, offset: int = 0) -> CVESearchResults: \"\"\" Returns a", "results class HistoricalWHOISSearchResults: def __init__(self, results: List[WHOISHistoricalRecord], total_items: int =", "response = self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def search_autonomous_systems(self, query:", "but requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/email/scroll/search'.format(self.base_url),", "precise number of search results that matched the search query.\"\"\"", "10,000 results. \"\"\" response = self.__search('{}/cve/search'.format(self.base_url), query, limit, offset) response.check_errors()", "\"\"\"Returns details about your account quotas.\"\"\" response = self.__get('{}/account/quota'.format(self.base_url)) response.check_errors()", "scroll_emails(self, query: SearchQuery, scroll_id: str = None) -> EmailsSearchResults: \"\"\"", "query.get()} return Response.from_dict(self.session.post(endpoint, json=body).json()) def set_user_agent(self, s: str): self.session.headers.update({'User-Agent': s})", "0) \\ -> HistoricalWHOISSearchResults: \"\"\" Returns the historical WHOIS records", "'spyse-python'}) self.base_url = base_url self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account =", "details about SSL/TLS certificate\"\"\" response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors() return", "self.base_url = base_url self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account = self.get_quotas()", "results class CVESearchResults: def __init__(self, results: List[CVE], total_items: int =", "about an autonomous system by AS number.\"\"\" response = self.__get('{}/as/{}'.format(self.base_url,", "scroll_ip(self, query: SearchQuery, scroll_id: str = None) -> IPSearchResults: \"\"\"", "int: \"\"\"Returns the precise number of search results that matched", "offset: int = 0) -> CVESearchResults: \"\"\" Returns a list", "__init__(self, results: List[Email], total_items: int = None, search_id: str =", "EmailsSearchResults(emails, response.data.total_items) def count_emails(self, query: SearchQuery) -> int: \"\"\"Returns the", "search query.\"\"\" response = self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def", "> 0 else None def search_emails(self, query: SearchQuery, limit: int", "10000 RATE_LIMIT_FRAME_IN_SECONDS = 1 def __init__(self, api_token, base_url=DEFAULT_BASE_URL): self.session =", "email\"\"\" response = self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors() return Email.from_dict(response.data.items[0]) if len(response.data.items)", "Optional[str] = search_id self.results: List[Email] = results class HistoricalDNSSearchResults: def", "def scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str = None) -> AutonomousSystemsSearchResults:", "SearchQuery, scroll_id: str = None) -> AutonomousSystemsSearchResults: \"\"\" Returns a", "response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def get_domain_details(self, domain_name: str) ->", "in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, response.data.total_items) def count_emails(self, query: SearchQuery)", "None def count_autonomous_systems(self, query: SearchQuery) -> int: \"\"\"Returns the precise", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[IP] =", "List[WHOISHistoricalRecord], total_items: int = None, search_id: str = None): self.total_items:", "Returns a list of SSL/TLS certificates that matched the search", "consume=1): return Response.from_dict(self.session.get(endpoint).json()) def __search(self, endpoint, query: SearchQuery, lim: int", "query: SearchQuery, scroll_id: str = None) -> AutonomousSystemsSearchResults: \"\"\" Returns", "base_url self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account = self.get_quotas() self.limiter._capacity =", "import requests from typing import List, Optional from .models import", "for r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, response.data.total_items) def count_certificate(self,", "response.data.total_items) def scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str = None) ->", "= 0) -> IPSearchResults: \"\"\" Returns a list of IPv4", "self.search_id: Optional[str] = search_id self.results: List[DNSHistoricalRecord] = results class HistoricalWHOISSearchResults:", "= search_id self.results: List[Email] = results class HistoricalDNSSearchResults: def __init__(self,", "search_id self.results: List[WHOISHistoricalRecord] = results class Client: DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data'", "about your account quotas.\"\"\" response = self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return Account.from_dict(response.data.items[0])", "def count_autonomous_systems(self, query: SearchQuery) -> int: \"\"\"Returns the precise number", "matched the search query.\"\"\" response = self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors() return", "List[Domain] = results class AutonomousSystemsSearchResults: def __init__(self, results: List[AS], total_items:", "= total_items self.search_id: Optional[str] = search_id self.results: List[DNSHistoricalRecord] = results", "results class Client: DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data' MAX_LIMIT = 100 SEARCH_RESULTS_LIMIT", "Pro subscription \"\"\" response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() ips", "Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord from .response import Response from .search_query", "= 0) -> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.post(endpoint, json={\"search_params\":", "0 else None def search_ip(self, query: SearchQuery, limit: int =", "results. \"\"\" response = self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset) response.check_errors() certs", "return Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_domains(self,", "scroll_id) response.check_errors() domains = list() for r in response.data.items: domains.append(Domain.from_dict(r))", "def __search(self, endpoint, query: SearchQuery, lim: int = MAX_LIMIT, offset:", "-> CVESearchResults: \"\"\" Returns a list of CVEs that matched", "Spyse Pro subscription \"\"\" response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors()", "results: List[Certificate], total_items: int = None, search_id: str = None):", "query, scroll_id) response.check_errors() ips = list() for r in response.data.items:", "response = self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_emails(self, query:", "if len(response.data.items) > 0 else None def count_autonomous_systems(self, query: SearchQuery)", "List[DNSHistoricalRecord] = results class HistoricalWHOISSearchResults: def __init__(self, results: List[WHOISHistoricalRecord], total_items:", "response = self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_ip(self, query:", "in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, response.data.total_items) def scroll_autonomous_systems(self, query: SearchQuery,", "cve_id: str) -> Optional[CVE]: \"\"\"Returns details about CVE\"\"\" response =", "query, scroll_id) response.check_errors() emails = list() for r in response.data.items:", "in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, response.data.total_items) def count_cve(self, query: SearchQuery)", "return AS.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def count_autonomous_systems(self,", "AutonomousSystemsSearchResults: \"\"\" Returns a list of autonomous systems that matched", "a list of IPv4 hosts that matched the search query.", "limit(self.limiter, consume=1): return Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(), \"limit\": lim, \"offset\": offset}).json())", "def count_cve(self, query: SearchQuery) -> int: \"\"\"Returns the precise number", "return IPSearchResults(ips, search_id=response.data.search_id) def get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]: \"\"\"Returns", "AutonomousSystemsSearchResults(as_list, response.data.total_items) def scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str = None)", "offset) response.check_errors() certs = list() for r in response.data.items: certs.append(Certificate.from_dict(r))", "limit, offset) response.check_errors() cve_list = list() for r in response.data.items:", "return response.data.total_items def scroll_cve(self, query: SearchQuery, scroll_id: str = None)", "self.__search('{}/email/search'.format(self.base_url), query, limit, offset) response.check_errors() emails = list() for r", "= list() for r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, response.data.total_items)", "results: List[IP], total_items: int = None, search_id: str = None):", "self.__search('{}/as/search'.format(self.base_url), query, limit, offset) response.check_errors() as_list = list() for r", "cve_list = list() for r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list,", "records = list() for r in response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records,", "subscription \"\"\" response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() domains =", "= MAX_LIMIT, offset: int = 0) \\ -> HistoricalWHOISSearchResults: \"\"\"", "details about IP\"\"\" response = self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors() return IP.from_dict(response.data.items[0])", "> 0 else None def search_cve(self, query: SearchQuery, limit: int", "search_id self.results: List[Email] = results class HistoricalDNSSearchResults: def __init__(self, results:", "for r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, search_id=response.data.search_id) def get_ip_details(self,", "in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, search_id=response.data.search_id) def get_ip_details(self, ip: str)", "certificate hosts that matched the search query. Allows getting only", "> 0 else None def search_ip(self, query: SearchQuery, limit: int", "Email, DNSHistoricalRecord, WHOISHistoricalRecord from .response import Response from .search_query import", "response.data.total_items) def search_historical_whois(self, domain_name: str, limit: int = MAX_LIMIT, offset:", "Returns the historical DNS records about the given domain name.", "json={\"search_params\": query.get(), \"limit\": lim, \"offset\": offset}).json()) def __scroll(self, endpoint, query:", "Returns a list of IPv4 hosts that matched the search", "search_certificate(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int =", "search results that matched the search query.\"\"\" response = self.__search('{}/domain/search/count'.format(self.base_url),", "the search query.\"\"\" response = self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items", "response.check_errors() return response.data.total_items def scroll_cve(self, query: SearchQuery, scroll_id: str =", "get_limiter, limit class DomainsSearchResults: def __init__(self, results: List[Domain], total_items: int", "asn)) response.check_errors() return AS.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None", "but requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/ip/scroll/search'.format(self.base_url),", "given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records =", "-> Optional[Domain]: \"\"\"Returns details about domain\"\"\" response = self.__get('{}/domain/{}'.format(self.base_url, domain_name))", "= self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset) response.check_errors() certs = list() for", "the first 10,000 results. \"\"\" response = self.__search('{}/cve/search'.format(self.base_url), query, limit,", "IPSearchResults(ips, response.data.total_items) def count_ip(self, query: SearchQuery) -> int: \"\"\"Returns the", "self.session.headers.update({'User-Agent': s}) def get_quotas(self) -> Optional[Account]: \"\"\"Returns details about your", "int = 0) -> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.post(endpoint,", "query.\"\"\" response = self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def search_autonomous_systems(self,", "return Account.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def get_autonomous_system_details(self,", "int = 0) \\ -> HistoricalDNSSearchResults: \"\"\" Returns the historical", "AS, Domain, IP, CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord from", "details about an autonomous system by AS number.\"\"\" response =", "but requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url),", "list of autonomous systems that matched the search query. Allows", "search_id=response.data.search_id) def get_domain_details(self, domain_name: str) -> Optional[Domain]: \"\"\"Returns details about", "ip)) response.check_errors() return IP.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None", "a list of CVE that matched the search query. Allows", "details about your account quotas.\"\"\" response = self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return", "response.check_errors() return response.data.total_items def scroll_ip(self, query: SearchQuery, scroll_id: str =", "else None def count_autonomous_systems(self, query: SearchQuery) -> int: \"\"\"Returns the", "consume=1): return Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(), \"limit\": lim, \"offset\": offset}).json()) def", "\"\"\"Returns details about SSL/TLS certificate\"\"\" response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors()", "Optional from .models import AS, Domain, IP, CVE, Account, Certificate,", "IPSearchResults: \"\"\" Returns a list of IPv4 hosts that matched", "\"\"\" response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() as_list = list()", "{\"search_params\": query.get()} return Response.from_dict(self.session.post(endpoint, json=body).json()) def set_user_agent(self, s: str): self.session.headers.update({'User-Agent':", "domain\"\"\" response = self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors() return Domain.from_dict(response.data.items[0]) if len(response.data.items)", "-> Optional[IP]: \"\"\"Returns details about IP\"\"\" response = self.__get('{}/ip/{}'.format(self.base_url, ip))", "search_autonomous_systems(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int =", "subscription \"\"\" response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() certs =", "get_quotas(self) -> Optional[Account]: \"\"\"Returns details about your account quotas.\"\"\" response", "__search(self, endpoint, query: SearchQuery, lim: int = MAX_LIMIT, offset: int", "count_certificate(self, query: SearchQuery) -> int: \"\"\"Returns the precise number of", "MAX_LIMIT, offset: int = 0) -> CertificatesSearchResults: \"\"\" Returns a", "a Spyse Pro subscription \"\"\" response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id)", "-> Optional[CVE]: \"\"\"Returns details about CVE\"\"\" response = self.__get('{}/cve/{}'.format(self.base_url, cve_id))", "domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, response.data.total_items) def count_domains(self, query: SearchQuery): \"\"\"Returns the", "int = MAX_LIMIT, offset: int = 0) -> DomainsSearchResults: \"\"\"", "= search_id self.results: List[IP] = results class CertificatesSearchResults: def __init__(self,", "response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, search_id=response.data.search_id) def get_email_details(self, email: str) ->", "Optional[str] = search_id self.results: List[IP] = results class CertificatesSearchResults: def", "return Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(), \"limit\": lim, \"offset\": offset}).json()) def __scroll(self,", "details about domain\"\"\" response = self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors() return Domain.from_dict(response.data.items[0])", "__init__(self, results: List[DNSHistoricalRecord], total_items: int = None, search_id: str =", "self.search_id: Optional[str] = search_id self.results: List[Certificate] = results class CVESearchResults:", "list() for r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, search_id=response.data.search_id) def", "list() for r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, search_id=response.data.search_id) def", "str) -> Optional[Domain]: \"\"\"Returns details about domain\"\"\" response = self.__get('{}/domain/{}'.format(self.base_url,", "response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors() return Certificate.from_dict(response.data.items[0]) if len(response.data.items) >", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[IP]", "historical WHOIS records for the given domain name. \"\"\" response", "= self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def search_autonomous_systems(self, query: SearchQuery,", "MAX_LIMIT, offset: int = 0) \\ -> HistoricalWHOISSearchResults: \"\"\" Returns", "of emails that matched the search query. Allows getting only", "records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records, response.data.total_items) def search_historical_whois(self, domain_name: str, limit: int", "scroll_id) response.check_errors() cve_list = list() for r in response.data.items: cve_list.append(CVE.from_dict(r))", "certs = list() for r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs,", "= 0) \\ -> HistoricalWHOISSearchResults: \"\"\" Returns the historical WHOIS", "= search_id self.results: List[Certificate] = results class CVESearchResults: def __init__(self,", "response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() cve_list = list() for", "= self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r in response.data.items:", "__init__(self, results: List[WHOISHistoricalRecord], total_items: int = None, search_id: str =", "the first 10,000 results. \"\"\" response = self.__search('{}/domain/search'.format(self.base_url), query, limit,", "response.data.total_items def scroll_cve(self, query: SearchQuery, scroll_id: str = None) ->", "limit: int = MAX_LIMIT, offset: int = 0) \\ ->", "int = 0) -> AutonomousSystemsSearchResults: \"\"\" Returns a list of", "= search_id self.results: List[DNSHistoricalRecord] = results class HistoricalWHOISSearchResults: def __init__(self,", "def __init__(self, api_token, base_url=DEFAULT_BASE_URL): self.session = requests.Session() self.session.headers.update({'Authorization': 'Bearer '", "offset) response.check_errors() domains = list() for r in response.data.items: domains.append(Domain.from_dict(r))", "def __init__(self, results: List[Email], total_items: int = None, search_id: str", "List[IP], total_items: int = None, search_id: str = None): self.total_items:", "get_email_details(self, email: str) -> Optional[Email]: \"\"\"Returns details about email\"\"\" response", "query.get(), \"search_id\": search_id} else: body = {\"search_params\": query.get()} return Response.from_dict(self.session.post(endpoint,", "list() for r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, response.data.total_items) def", "EmailsSearchResults: def __init__(self, results: List[Email], total_items: int = None, search_id:", "the given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records", "a list of SSL/TLS certificate hosts that matched the search", "return response.data.total_items def scroll_emails(self, query: SearchQuery, scroll_id: str = None)", "return CVESearchResults(cve_list, search_id=response.data.search_id) def get_email_details(self, email: str) -> Optional[Email]: \"\"\"Returns", "subscription \"\"\" response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() ips =", "SearchQuery, scroll_id: str = None) -> CVESearchResults: \"\"\" Returns a", "return HistoricalDNSSearchResults(records, response.data.total_items) def search_historical_whois(self, domain_name: str, limit: int =", "the search query.\"\"\" response = self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items", "emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, response.data.total_items) def count_emails(self, query: SearchQuery) -> int:", "domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, search_id=response.data.search_id) def get_ip_details(self, ip: str) -> Optional[IP]:", "\"\"\" response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() ips = list()", "getting only the first 10,000 results. \"\"\" response = self.__search('{}/email/search'.format(self.base_url),", "response = self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_domains(self, query:", "of SSL/TLS certificate hosts that matched the search query. Allows", "= 0) -> DomainsSearchResults: \"\"\" Returns a list of domains", "results. \"\"\" response = self.__search('{}/cve/search'.format(self.base_url), query, limit, offset) response.check_errors() cve_list", "Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.get(endpoint).json()) def __search(self, endpoint, query:", "response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r in", "1 def __init__(self, api_token, base_url=DEFAULT_BASE_URL): self.session = requests.Session() self.session.headers.update({'Authorization': 'Bearer", "domains that matched the search query. Allows getting all the", "cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, response.data.total_items) def count_cve(self, query: SearchQuery) -> int:", "def get_ip_details(self, ip: str) -> Optional[IP]: \"\"\"Returns details about IP\"\"\"", "MAX_LIMIT, offset: int = 0) -> AutonomousSystemsSearchResults: \"\"\" Returns a", "the precise number of search results that matched the search", "DomainsSearchResults(domains, search_id=response.data.search_id) def get_ip_details(self, ip: str) -> Optional[IP]: \"\"\"Returns details", "response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, response.data.total_items) def count_domains(self, query: SearchQuery): \"\"\"Returns", "self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r in response.data.items: records.append(WHOISHistoricalRecord.from_dict(r))", "autonomous systems that matched the search query. Allows getting only", "int = None, search_id: str = None): self.total_items: Optional[int] =", "IP\"\"\" response = self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors() return IP.from_dict(response.data.items[0]) if len(response.data.items)", "int = MAX_LIMIT, offset: int = 0) \\ -> HistoricalDNSSearchResults:", "results: List[DNSHistoricalRecord], total_items: int = None, search_id: str = None):", "about the given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors()", "\"\"\" Returns a list of IPv4 hosts that matched the", "self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account = self.get_quotas() self.limiter._capacity = self.account.requests_rate_limit", "number of search results that matched the search query.\"\"\" response", "int = MAX_LIMIT, offset: int = 0) -> Response: with", "the first 10,000 results. \"\"\" response = self.__search('{}/ip/search'.format(self.base_url), query, limit,", "return DomainsSearchResults(domains, search_id=response.data.search_id) def get_ip_details(self, ip: str) -> Optional[IP]: \"\"\"Returns", "= self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() ips = list() for r", "from limiter import get_limiter, limit class DomainsSearchResults: def __init__(self, results:", "self.account.requests_rate_limit def __get(self, endpoint: str) -> Response: with limit(self.limiter, consume=1):", "= {\"search_params\": query.get(), \"search_id\": search_id} else: body = {\"search_params\": query.get()}", "self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() as_list = list() for r in", "for r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, response.data.total_items) def count_domains(self,", "Allows getting only the first 10,000 results. \"\"\" response =", "self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_emails(self, query: SearchQuery, scroll_id:", "SEARCH_RESULTS_LIMIT = 10000 RATE_LIMIT_FRAME_IN_SECONDS = 1 def __init__(self, api_token, base_url=DEFAULT_BASE_URL):", "> 0 else None def search_domains(self, query: SearchQuery, limit: int", "self.results: List[IP] = results class CertificatesSearchResults: def __init__(self, results: List[Certificate],", "' + api_token}) self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url = base_url self.limiter =", "get_ip_details(self, ip: str) -> Optional[IP]: \"\"\"Returns details about IP\"\"\" response", "SearchQuery, scroll_id: str = None) -> CertificatesSearchResults: \"\"\" Returns a", "response = self.__search('{}/as/search'.format(self.base_url), query, limit, offset) response.check_errors() as_list = list()", "= list() for r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, response.data.total_items)", "scroll_id) response.check_errors() emails = list() for r in response.data.items: emails.append(Email.from_dict(r))", "def __init__(self, results: List[AS], total_items: int = None, search_id: str", "results class EmailsSearchResults: def __init__(self, results: List[Email], total_items: int =", "with limit(self.limiter, consume=1): return Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(), \"limit\": lim, \"offset\":", "response = self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors() return AS.from_dict(response.data.items[0]) if len(response.data.items) >", "self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_domains(self, query: SearchQuery, scroll_id:", "for r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, search_id=response.data.search_id) def get_cve_details(self,", "domain_name)) response.check_errors() return Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None", "return CertificatesSearchResults(certs, search_id=response.data.search_id) def get_cve_details(self, cve_id: str) -> Optional[CVE]: \"\"\"Returns", "search_id=response.data.search_id) def get_ip_details(self, ip: str) -> Optional[IP]: \"\"\"Returns details about", "limit: int = MAX_LIMIT, offset: int = 0) -> IPSearchResults:", "\"\"\" Returns a list of domains that matched the search", "the search query.\"\"\" response = self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items", "response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, search_id=response.data.search_id) def get_certificate_details(self, fingerprint_sha256: str) ->", "import AS, Domain, IP, CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord", "-> AutonomousSystemsSearchResults: \"\"\" Returns a list of autonomous systems that", "response = self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors() return Email.from_dict(response.data.items[0]) if len(response.data.items) >", "response.check_errors() return Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def", "list of SSL/TLS certificates that matched the search query. Allows", "offset) response.check_errors() emails = list() for r in response.data.items: emails.append(Email.from_dict(r))", "r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, search_id=response.data.search_id) def get_cve_details(self, cve_id:", "str = None): self.total_items: Optional[int] = total_items self.search_id: Optional[str] =", "return response.data.total_items def scroll_domains(self, query: SearchQuery, scroll_id: str = None)", "self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors() return Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0 else", "= {\"search_params\": query.get()} return Response.from_dict(self.session.post(endpoint, json=body).json()) def set_user_agent(self, s: str):", "return EmailsSearchResults(emails, response.data.total_items) def count_emails(self, query: SearchQuery) -> int: \"\"\"Returns", "= MAX_LIMIT, offset: int = 0) -> AutonomousSystemsSearchResults: \"\"\" Returns", "CertificatesSearchResults(certs, search_id=response.data.search_id) def get_cve_details(self, cve_id: str) -> Optional[CVE]: \"\"\"Returns details", "def count_ip(self, query: SearchQuery) -> int: \"\"\"Returns the precise number", "None, search_id: str = None): self.total_items: Optional[int] = total_items self.search_id:", "'Bearer ' + api_token}) self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url = base_url self.limiter", "0 else None def search_domains(self, query: SearchQuery, limit: int =", "def __scroll(self, endpoint, query: SearchQuery, search_id: Optional[str] = None) ->", "limiter import get_limiter, limit class DomainsSearchResults: def __init__(self, results: List[Domain],", "SSL/TLS certificates that matched the search query. Allows getting all", "self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors() return IP.from_dict(response.data.items[0]) if len(response.data.items) > 0 else", "r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, response.data.total_items) def count_emails(self, query:", "requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query,", "only the first 10,000 results. \"\"\" response = self.__search('{}/ip/search'.format(self.base_url), query,", "class Client: DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data' MAX_LIMIT = 100 SEARCH_RESULTS_LIMIT =", "= self.__search('{}/email/search'.format(self.base_url), query, limit, offset) response.check_errors() emails = list() for", "the search query. Allows getting only the first 10,000 results.", "Response.from_dict(self.session.post(endpoint, json=body).json()) def set_user_agent(self, s: str): self.session.headers.update({'User-Agent': s}) def get_quotas(self)", "query. Allows getting only the first 10,000 results. \"\"\" response", "r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def get_domain_details(self, domain_name:", "total_items: int = None, search_id: str = None): self.total_items: Optional[int]", "search_historical_whois(self, domain_name: str, limit: int = MAX_LIMIT, offset: int =", "lim: int = MAX_LIMIT, offset: int = 0) -> Response:", "by AS number.\"\"\" response = self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors() return AS.from_dict(response.data.items[0])", "0 else None def count_autonomous_systems(self, query: SearchQuery) -> int: \"\"\"Returns", "= results class CertificatesSearchResults: def __init__(self, results: List[Certificate], total_items: int", "self.__search('{}/ip/search'.format(self.base_url), query, limit, offset) response.check_errors() ips = list() for r", "scroll_id: str = None) -> AutonomousSystemsSearchResults: \"\"\" Returns a list", "def search_cve(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int", "s}) def get_quotas(self) -> Optional[Account]: \"\"\"Returns details about your account", "= self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_cve(self, query: SearchQuery,", "offset}).json()) def __scroll(self, endpoint, query: SearchQuery, search_id: Optional[str] = None)", "DNS records about the given domain name. \"\"\" response =", "search_id self.results: List[DNSHistoricalRecord] = results class HistoricalWHOISSearchResults: def __init__(self, results:", "query) response.check_errors() return response.data.total_items def scroll_ip(self, query: SearchQuery, scroll_id: str", "def scroll_ip(self, query: SearchQuery, scroll_id: str = None) -> IPSearchResults:", "limit: int = MAX_LIMIT, offset: int = 0) -> AutonomousSystemsSearchResults:", "self.search_id: Optional[str] = search_id self.results: List[WHOISHistoricalRecord] = results class Client:", "self.__search('{}/cve/search'.format(self.base_url), query, limit, offset) response.check_errors() cve_list = list() for r", "self.results: List[CVE] = results class EmailsSearchResults: def __init__(self, results: List[Email],", "for r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, response.data.total_items) def count_emails(self,", "response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() emails = list() for", "None def search_emails(self, query: SearchQuery, limit: int = MAX_LIMIT, offset:", "-> Optional[Email]: \"\"\"Returns details about email\"\"\" response = self.__get('{}/email/{}'.format(self.base_url, email))", "\"limit\": lim, \"offset\": offset}).json()) def __scroll(self, endpoint, query: SearchQuery, search_id:", "get_autonomous_system_details(self, asn: int) -> Optional[AS]: \"\"\"Returns details about an autonomous", "limit, offset) response.check_errors() ips = list() for r in response.data.items:", "query, scroll_id) response.check_errors() cve_list = list() for r in response.data.items:", "= self.account.requests_rate_limit def __get(self, endpoint: str) -> Response: with limit(self.limiter,", "self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() emails = list() for r in", "Optional[CVE]: \"\"\"Returns details about CVE\"\"\" response = self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors()", "None def search_cve(self, query: SearchQuery, limit: int = MAX_LIMIT, offset:", "= self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r in response.data.items:", "Client: DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data' MAX_LIMIT = 100 SEARCH_RESULTS_LIMIT = 10000", "if len(response.data.items) > 0 else None def search_domains(self, query: SearchQuery,", "if search_id: body = {\"search_params\": query.get(), \"search_id\": search_id} else: body", "response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() as_list = list() for", "DomainsSearchResults: \"\"\" Returns a list of domains that matched the", "Pro subscription \"\"\" response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() emails", "def get_cve_details(self, cve_id: str) -> Optional[CVE]: \"\"\"Returns details about CVE\"\"\"", "api_token}) self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url = base_url self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1)", "historical DNS records about the given domain name. \"\"\" response", "first 10,000 results. \"\"\" response = self.__search('{}/as/search'.format(self.base_url), query, limit, offset)", "system by AS number.\"\"\" response = self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors() return", "= 0) -> EmailsSearchResults: \"\"\" Returns a list of emails", "response.data.total_items) def count_certificate(self, query: SearchQuery) -> int: \"\"\"Returns the precise", "hosts that matched the search query. Allows getting all the", "Response from .search_query import SearchQuery from limiter import get_limiter, limit", "with limit(self.limiter, consume=1): return Response.from_dict(self.session.get(endpoint).json()) def __search(self, endpoint, query: SearchQuery,", "response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records, response.data.total_items) def search_historical_whois(self, domain_name: str, limit:", "count_autonomous_systems(self, query: SearchQuery) -> int: \"\"\"Returns the precise number of", "= list() for r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id)", "-> DomainsSearchResults: \"\"\" Returns a list of domains that matched", "DNSHistoricalRecord, WHOISHistoricalRecord from .response import Response from .search_query import SearchQuery", "int = MAX_LIMIT, offset: int = 0) \\ -> HistoricalWHOISSearchResults:", "the results but requires a Spyse Pro subscription \"\"\" response", "def get_domain_details(self, domain_name: str) -> Optional[Domain]: \"\"\"Returns details about domain\"\"\"", "str) -> Optional[Certificate]: \"\"\"Returns details about SSL/TLS certificate\"\"\" response =", "get_cve_details(self, cve_id: str) -> Optional[CVE]: \"\"\"Returns details about CVE\"\"\" response", "= None): self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id", "AutonomousSystemsSearchResults: def __init__(self, results: List[AS], total_items: int = None, search_id:", "first 10,000 results. \"\"\" response = self.__search('{}/cve/search'.format(self.base_url), query, limit, offset)", "> 0 else None def search_certificate(self, query: SearchQuery, limit: int", "= total_items self.search_id: Optional[str] = search_id self.results: List[Certificate] = results", "of search results that matched the search query.\"\"\" response =", "len(response.data.items) > 0 else None def get_autonomous_system_details(self, asn: int) ->", "that matched the search query.\"\"\" response = self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors()", "Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_domains(self, query:", "= MAX_LIMIT, offset: int = 0) -> CertificatesSearchResults: \"\"\" Returns", "= 0) -> CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS", "def __init__(self, results: List[IP], total_items: int = None, search_id: str", "Returns a list of emails that matched the search query.", "search query.\"\"\" response = self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def", "query: SearchQuery): \"\"\"Returns the precise number of search results that", "int = 0) -> CVESearchResults: \"\"\" Returns a list of", "class HistoricalWHOISSearchResults: def __init__(self, results: List[WHOISHistoricalRecord], total_items: int = None,", "total_items self.search_id: Optional[str] = search_id self.results: List[DNSHistoricalRecord] = results class", "results: List[WHOISHistoricalRecord], total_items: int = None, search_id: str = None):", "ips.append(IP.from_dict(r)) return IPSearchResults(ips, search_id=response.data.search_id) def get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]:", "def count_certificate(self, query: SearchQuery) -> int: \"\"\"Returns the precise number", "CVE that matched the search query. Allows getting only the", "query: SearchQuery, scroll_id: str = None) -> CVESearchResults: \"\"\" Returns", "RATE_LIMIT_FRAME_IN_SECONDS = 1 def __init__(self, api_token, base_url=DEFAULT_BASE_URL): self.session = requests.Session()", "in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def get_domain_details(self, domain_name: str)", "offset) response.check_errors() ips = list() for r in response.data.items: ips.append(IP.from_dict(r))", "response.check_errors() ips = list() for r in response.data.items: ips.append(IP.from_dict(r)) return", "SearchQuery, scroll_id: str = None) -> DomainsSearchResults: \"\"\" Returns a", "certificate\"\"\" response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors() return Certificate.from_dict(response.data.items[0]) if len(response.data.items)", ".response import Response from .search_query import SearchQuery from limiter import", "int = MAX_LIMIT, offset: int = 0) -> CertificatesSearchResults: \"\"\"", "0) \\ -> HistoricalDNSSearchResults: \"\"\" Returns the historical DNS records", "List[AS], total_items: int = None, search_id: str = None): self.total_items:", "return CVESearchResults(cve_list, response.data.total_items) def count_cve(self, query: SearchQuery) -> int: \"\"\"Returns", "the historical DNS records about the given domain name. \"\"\"", "response.data.total_items) def count_emails(self, query: SearchQuery) -> int: \"\"\"Returns the precise", "= results class IPSearchResults: def __init__(self, results: List[IP], total_items: int", "the first 10,000 results. \"\"\" response = self.__search('{}/certificate/search'.format(self.base_url), query, limit,", "= self.__search('{}/as/search'.format(self.base_url), query, limit, offset) response.check_errors() as_list = list() for", "None) -> DomainsSearchResults: \"\"\" Returns a list of domains that", "query: SearchQuery, search_id: Optional[str] = None) -> Response: with limit(self.limiter,", "SSL/TLS certificate hosts that matched the search query. Allows getting", "= None) -> CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS", "= MAX_LIMIT, offset: int = 0) -> EmailsSearchResults: \"\"\" Returns", "10,000 results. \"\"\" response = self.__search('{}/as/search'.format(self.base_url), query, limit, offset) response.check_errors()", "for r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, search_id=response.data.search_id) def get_certificate_details(self,", "scroll_id: str = None) -> EmailsSearchResults: \"\"\" Returns a list", "\"\"\" response = self.__search('{}/ip/search'.format(self.base_url), query, limit, offset) response.check_errors() ips =", "10,000 results. \"\"\" response = self.__search('{}/domain/search'.format(self.base_url), query, limit, offset) response.check_errors()", "SearchQuery, scroll_id: str = None) -> EmailsSearchResults: \"\"\" Returns a", "scroll_id: str = None) -> CertificatesSearchResults: \"\"\" Returns a list", "-> Response: with limit(self.limiter, consume=1): if search_id: body = {\"search_params\":", "-> int: \"\"\"Returns the precise number of search results that", "response.check_errors() return response.data.total_items def scroll_domains(self, query: SearchQuery, scroll_id: str =", "limit class DomainsSearchResults: def __init__(self, results: List[Domain], total_items: int =", "= total_items self.search_id: Optional[str] = search_id self.results: List[AS] = results", "__init__(self, results: List[Domain], total_items: int = None, search_id: str =", "self.results: List[Email] = results class HistoricalDNSSearchResults: def __init__(self, results: List[DNSHistoricalRecord],", "= base_url self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account = self.get_quotas() self.limiter._capacity", "getting all the results but requires a Spyse Pro subscription", "= MAX_LIMIT, offset: int = 0) -> DomainsSearchResults: \"\"\" Returns", "first 10,000 results. \"\"\" response = self.__search('{}/ip/search'.format(self.base_url), query, limit, offset)", "= self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() certs = list() for r", "response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, response.data.total_items) def scroll_autonomous_systems(self, query: SearchQuery, scroll_id:", "requests.Session() self.session.headers.update({'Authorization': 'Bearer ' + api_token}) self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url =", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[Domain] =", "return AutonomousSystemsSearchResults(as_list, response.data.total_items) def scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str =", "query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0)", "response.check_errors() return response.data.total_items def search_autonomous_systems(self, query: SearchQuery, limit: int =", "total_items self.search_id: Optional[str] = search_id self.results: List[Certificate] = results class", "response.check_errors() return CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def", "MAX_LIMIT, offset: int = 0) \\ -> HistoricalDNSSearchResults: \"\"\" Returns", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[Certificate]", "s: str): self.session.headers.update({'User-Agent': s}) def get_quotas(self) -> Optional[Account]: \"\"\"Returns details", "0) -> AutonomousSystemsSearchResults: \"\"\" Returns a list of autonomous systems", "total_items self.search_id: Optional[str] = search_id self.results: List[CVE] = results class", "self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_ip(self, query: SearchQuery, scroll_id:", "limit, offset) response.check_errors() domains = list() for r in response.data.items:", "response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, response.data.total_items) def count_certificate(self, query: SearchQuery) ->", "list of CVE that matched the search query. Allows getting", "self.limiter._capacity = self.account.requests_rate_limit def __get(self, endpoint: str) -> Response: with", "response = self.__search('{}/ip/search'.format(self.base_url), query, limit, offset) response.check_errors() ips = list()", "= None, search_id: str = None): self.total_items: Optional[int] = total_items", "= self.__search('{}/cve/search'.format(self.base_url), query, limit, offset) response.check_errors() cve_list = list() for", "= self.get_quotas() self.limiter._capacity = self.account.requests_rate_limit def __get(self, endpoint: str) ->", "r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, search_id=response.data.search_id) def get_email_details(self, email:", "Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(), \"limit\": lim, \"offset\": offset}).json()) def __scroll(self, endpoint,", "CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord from .response import Response", "endpoint: str) -> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.get(endpoint).json()) def", "self.account = self.get_quotas() self.limiter._capacity = self.account.requests_rate_limit def __get(self, endpoint: str)", "but requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/as/scroll/search'.format(self.base_url),", "else None def search_certificate(self, query: SearchQuery, limit: int = MAX_LIMIT,", "CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS certificates that matched", "Pro subscription \"\"\" response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() as_list", "CVESearchResults(cve_list, response.data.total_items) def count_cve(self, query: SearchQuery) -> int: \"\"\"Returns the", "self.get_quotas() self.limiter._capacity = self.account.requests_rate_limit def __get(self, endpoint: str) -> Response:", "response.check_errors() return Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def", "int = 0) -> EmailsSearchResults: \"\"\" Returns a list of", "'https://api.spyse.com/v4/data' MAX_LIMIT = 100 SEARCH_RESULTS_LIMIT = 10000 RATE_LIMIT_FRAME_IN_SECONDS = 1", "SearchQuery, lim: int = MAX_LIMIT, offset: int = 0) ->", "r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, response.data.total_items) def count_certificate(self, query:", "autonomous system by AS number.\"\"\" response = self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors()", "= results class Client: DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data' MAX_LIMIT = 100", "response.check_errors() return IP.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[AS] =", "a Spyse Pro subscription \"\"\" response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id)", "total_items self.search_id: Optional[str] = search_id self.results: List[Email] = results class", "ip: str) -> Optional[IP]: \"\"\"Returns details about IP\"\"\" response =", "\"\"\"Returns details about an autonomous system by AS number.\"\"\" response", "the search query.\"\"\" response = self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items", "the search query. Allows getting all the results but requires", "Domain, IP, CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord from .response", "= list() for r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, search_id=response.data.search_id)", "self.search_id: Optional[str] = search_id self.results: List[AS] = results class IPSearchResults:", "as_list = list() for r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list,", "str = None) -> CertificatesSearchResults: \"\"\" Returns a list of", "lim, \"offset\": offset}).json()) def __scroll(self, endpoint, query: SearchQuery, search_id: Optional[str]", "DomainsSearchResults(domains, response.data.total_items) def count_domains(self, query: SearchQuery): \"\"\"Returns the precise number", "of domains that matched the search query. Allows getting all", "Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord from .response import Response from", "limit: int = MAX_LIMIT, offset: int = 0) -> CertificatesSearchResults:", "name. \"\"\" response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[DNSHistoricalRecord] =", "CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS certificate hosts that", "Optional[Account]: \"\"\"Returns details about your account quotas.\"\"\" response = self.__get('{}/account/quota'.format(self.base_url))", "total_items self.search_id: Optional[str] = search_id self.results: List[Domain] = results class", "query: SearchQuery) -> int: \"\"\"Returns the precise number of search", "Allows getting all the results but requires a Spyse Pro", "query.\"\"\" response = self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_cve(self,", "list() for r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, response.data.total_items) def", "IP.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_ip(self, query:", "def scroll_domains(self, query: SearchQuery, scroll_id: str = None) -> DomainsSearchResults:", "Optional[str] = search_id self.results: List[Domain] = results class AutonomousSystemsSearchResults: def", "total_items self.search_id: Optional[str] = search_id self.results: List[WHOISHistoricalRecord] = results class", "CertificatesSearchResults: def __init__(self, results: List[Certificate], total_items: int = None, search_id:", "Optional[IP]: \"\"\"Returns details about IP\"\"\" response = self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors()", "return IPSearchResults(ips, response.data.total_items) def count_ip(self, query: SearchQuery) -> int: \"\"\"Returns", "len(response.data.items) > 0 else None def search_certificate(self, query: SearchQuery, limit:", "response.check_errors() emails = list() for r in response.data.items: emails.append(Email.from_dict(r)) return", "search_id: body = {\"search_params\": query.get(), \"search_id\": search_id} else: body =", "else None def search_ip(self, query: SearchQuery, limit: int = MAX_LIMIT,", "<reponame>fabaff/spyse-python import requests from typing import List, Optional from .models", "EmailsSearchResults: \"\"\" Returns a list of emails that matched the", "results: List[CVE], total_items: int = None, search_id: str = None):", "first 10,000 results. \"\"\" response = self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset)", "WHOISHistoricalRecord from .response import Response from .search_query import SearchQuery from", "get_domain_details(self, domain_name: str) -> Optional[Domain]: \"\"\"Returns details about domain\"\"\" response", "response.data.total_items def scroll_emails(self, query: SearchQuery, scroll_id: str = None) ->", "return Response.from_dict(self.session.post(endpoint, json=body).json()) def set_user_agent(self, s: str): self.session.headers.update({'User-Agent': s}) def", "results. \"\"\" response = self.__search('{}/domain/search'.format(self.base_url), query, limit, offset) response.check_errors() domains", "response.check_errors() records = list() for r in response.data.items: records.append(WHOISHistoricalRecord.from_dict(r)) return", "SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) ->", "offset: int = 0) -> Response: with limit(self.limiter, consume=1): return", "base_url=DEFAULT_BASE_URL): self.session = requests.Session() self.session.headers.update({'Authorization': 'Bearer ' + api_token}) self.session.headers.update({'User-Agent':", "in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, search_id=response.data.search_id) def get_email_details(self, email: str)", "int = MAX_LIMIT, offset: int = 0) -> CVESearchResults: \"\"\"", "= total_items self.search_id: Optional[str] = search_id self.results: List[CVE] = results", "email: str) -> Optional[Email]: \"\"\"Returns details about email\"\"\" response =", "a Spyse Pro subscription \"\"\" response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id)", "query: SearchQuery, lim: int = MAX_LIMIT, offset: int = 0)", "else: body = {\"search_params\": query.get()} return Response.from_dict(self.session.post(endpoint, json=body).json()) def set_user_agent(self,", "if len(response.data.items) > 0 else None def get_autonomous_system_details(self, asn: int)", "self.results: List[DNSHistoricalRecord] = results class HistoricalWHOISSearchResults: def __init__(self, results: List[WHOISHistoricalRecord],", "def count_domains(self, query: SearchQuery): \"\"\"Returns the precise number of search", "= list() for r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, response.data.total_items)", "\"\"\" response = self.__search('{}/email/search'.format(self.base_url), query, limit, offset) response.check_errors() emails =", "HistoricalWHOISSearchResults: \"\"\" Returns the historical WHOIS records for the given", "getting only the first 10,000 results. \"\"\" response = self.__search('{}/as/search'.format(self.base_url),", "for r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, response.data.total_items) def scroll_autonomous_systems(self,", "= 0) -> AutonomousSystemsSearchResults: \"\"\" Returns a list of autonomous", "-> CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS certificates that", "about IP\"\"\" response = self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors() return IP.from_dict(response.data.items[0]) if", "list() for r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, search_id=response.data.search_id) def", "CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_cve(self, query:", "domain_name: str, limit: int = MAX_LIMIT, offset: int = 0)", "self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url = base_url self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account", "\"\"\"Returns details about domain\"\"\" response = self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors() return", "query: SearchQuery, scroll_id: str = None) -> IPSearchResults: \"\"\" Returns", "typing import List, Optional from .models import AS, Domain, IP,", "__init__(self, results: List[CVE], total_items: int = None, search_id: str =", "list of IPv4 hosts that matched the search query. Allows", "query, limit, offset) response.check_errors() ips = list() for r in", "else None def get_autonomous_system_details(self, asn: int) -> Optional[AS]: \"\"\"Returns details", "total_items self.search_id: Optional[str] = search_id self.results: List[IP] = results class", "search results that matched the search query.\"\"\" response = self.__search('{}/cve/search/count'.format(self.base_url),", "self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors() return AS.from_dict(response.data.items[0]) if len(response.data.items) > 0 else", "response.check_errors() records = list() for r in response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return", "json=body).json()) def set_user_agent(self, s: str): self.session.headers.update({'User-Agent': s}) def get_quotas(self) ->", "limit(self.limiter, consume=1): if search_id: body = {\"search_params\": query.get(), \"search_id\": search_id}", "the first 10,000 results. \"\"\" response = self.__search('{}/email/search'.format(self.base_url), query, limit,", "self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors() return Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0 else", "-> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.get(endpoint).json()) def __search(self, endpoint,", "MAX_LIMIT, offset: int = 0) -> DomainsSearchResults: \"\"\" Returns a", "IPSearchResults(ips, search_id=response.data.search_id) def get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]: \"\"\"Returns details", "None def search_certificate(self, query: SearchQuery, limit: int = MAX_LIMIT, offset:", "> 0 else None def count_autonomous_systems(self, query: SearchQuery) -> int:", "a Spyse Pro subscription \"\"\" response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id)", "offset: int = 0) -> CertificatesSearchResults: \"\"\" Returns a list", "= self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_ip(self, query: SearchQuery,", "self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors() return Email.from_dict(response.data.items[0]) if len(response.data.items) > 0 else", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[Email] =", "results that matched the search query.\"\"\" response = self.__search('{}/as/search/count'.format(self.base_url), query)", "limit: int = MAX_LIMIT, offset: int = 0) -> DomainsSearchResults:", "0) -> EmailsSearchResults: \"\"\" Returns a list of emails that", "def get_quotas(self) -> Optional[Account]: \"\"\"Returns details about your account quotas.\"\"\"", "HistoricalDNSSearchResults: def __init__(self, results: List[DNSHistoricalRecord], total_items: int = None, search_id:", "List[Certificate] = results class CVESearchResults: def __init__(self, results: List[CVE], total_items:", "self.session = requests.Session() self.session.headers.update({'Authorization': 'Bearer ' + api_token}) self.session.headers.update({'User-Agent': 'spyse-python'})", "only the first 10,000 results. \"\"\" response = self.__search('{}/cve/search'.format(self.base_url), query,", "subscription \"\"\" response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() cve_list =", "search_id=response.data.search_id) def search_historical_dns(self, dns_type, domain_name: str, limit: int = MAX_LIMIT,", "offset: int = 0) \\ -> HistoricalWHOISSearchResults: \"\"\" Returns the", "search query.\"\"\" response = self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def", "Pro subscription \"\"\" response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() cve_list", "that matched the search query. Allows getting only the first", "response = self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_cve(self, query:", "search_id=response.data.search_id) def get_cve_details(self, cve_id: str) -> Optional[CVE]: \"\"\"Returns details about", "__get(self, endpoint: str) -> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.get(endpoint).json())", "= MAX_LIMIT, offset: int = 0) -> Response: with limit(self.limiter,", "\\ -> HistoricalDNSSearchResults: \"\"\" Returns the historical DNS records about", "def get_email_details(self, email: str) -> Optional[Email]: \"\"\"Returns details about email\"\"\"", "results. \"\"\" response = self.__search('{}/email/search'.format(self.base_url), query, limit, offset) response.check_errors() emails", "offset) response.check_errors() as_list = list() for r in response.data.items: as_list.append(AS.from_dict(r))", "str = None) -> AutonomousSystemsSearchResults: \"\"\" Returns a list of", "all the results but requires a Spyse Pro subscription \"\"\"", "of CVE that matched the search query. Allows getting only", "CVESearchResults: \"\"\" Returns a list of CVEs that matched the", "WHOIS records for the given domain name. \"\"\" response =", "the search query.\"\"\" response = self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors() return response.data.total_items", "about email\"\"\" response = self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors() return Email.from_dict(response.data.items[0]) if", "List[CVE] = results class EmailsSearchResults: def __init__(self, results: List[Email], total_items:", "len(response.data.items) > 0 else None def search_emails(self, query: SearchQuery, limit:", "from typing import List, Optional from .models import AS, Domain,", "requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query,", "domain_name: str) -> Optional[Domain]: \"\"\"Returns details about domain\"\"\" response =", "systems that matched the search query. Allows getting only the", "a list of CVEs that matched the search query. Allows", "Spyse Pro subscription \"\"\" response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors()", "for r in response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records, response.data.total_items) def search_historical_whois(self,", "of autonomous systems that matched the search query. Allows getting", "CVE\"\"\" response = self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors() return CVE.from_dict(response.data.items[0]) if len(response.data.items)", "= self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors() return AS.from_dict(response.data.items[0]) if len(response.data.items) > 0", "str, limit: int = MAX_LIMIT, offset: int = 0) \\", "self.__search('{}/domain/search'.format(self.base_url), query, limit, offset) response.check_errors() domains = list() for r", "response.check_errors() return AS.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def", "class CVESearchResults: def __init__(self, results: List[CVE], total_items: int = None,", "for r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, search_id=response.data.search_id) def search_historical_dns(self,", "IPSearchResults: def __init__(self, results: List[IP], total_items: int = None, search_id:", "\"\"\" response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r", "subscription \"\"\" response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() emails =", "search_ip(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int =", "= list() for r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, response.data.total_items)", "query. Allows getting all the results but requires a Spyse", "r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, response.data.total_items) def count_domains(self, query:", "None): self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results:", "= self.__search('{}/ip/search'.format(self.base_url), query, limit, offset) response.check_errors() ips = list() for", "import get_limiter, limit class DomainsSearchResults: def __init__(self, results: List[Domain], total_items:", "self.search_id: Optional[str] = search_id self.results: List[Email] = results class HistoricalDNSSearchResults:", "= list() for r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, response.data.total_items)", "results class HistoricalDNSSearchResults: def __init__(self, results: List[DNSHistoricalRecord], total_items: int =", "len(response.data.items) > 0 else None def search_cve(self, query: SearchQuery, limit:", "Optional[Domain]: \"\"\"Returns details about domain\"\"\" response = self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors()", "query, scroll_id) response.check_errors() domains = list() for r in response.data.items:", "matched the search query.\"\"\" response = self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors() return", "scroll_domains(self, query: SearchQuery, scroll_id: str = None) -> DomainsSearchResults: \"\"\"", "scroll_id: str = None) -> DomainsSearchResults: \"\"\" Returns a list", "a Spyse Pro subscription \"\"\" response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id)", "= 100 SEARCH_RESULTS_LIMIT = 10000 RATE_LIMIT_FRAME_IN_SECONDS = 1 def __init__(self,", "response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, response.data.total_items) def count_ip(self, query: SearchQuery) ->", "= list() for r in response.data.items: records.append(WHOISHistoricalRecord.from_dict(r)) return HistoricalWHOISSearchResults(records, response.data.total_items)", "0 else None def search_cve(self, query: SearchQuery, limit: int =", "emails = list() for r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails,", "response.check_errors() return response.data.total_items def scroll_certificate(self, query: SearchQuery, scroll_id: str =", "count_emails(self, query: SearchQuery) -> int: \"\"\"Returns the precise number of", "search_historical_dns(self, dns_type, domain_name: str, limit: int = MAX_LIMIT, offset: int", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[WHOISHistoricalRecord]", "list() for r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, response.data.total_items) def", "query.\"\"\" response = self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_emails(self,", "Response: with limit(self.limiter, consume=1): if search_id: body = {\"search_params\": query.get(),", "for r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def get_domain_details(self,", "\"\"\" Returns the historical WHOIS records for the given domain", "\"\"\" response = self.__search('{}/cve/search'.format(self.base_url), query, limit, offset) response.check_errors() cve_list =", "return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def get_domain_details(self, domain_name: str) -> Optional[Domain]: \"\"\"Returns", "response.data.total_items def search_autonomous_systems(self, query: SearchQuery, limit: int = MAX_LIMIT, offset:", "list() for r in response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records, response.data.total_items) def", "search results that matched the search query.\"\"\" response = self.__search('{}/ip/search/count'.format(self.base_url),", "first 10,000 results. \"\"\" response = self.__search('{}/domain/search'.format(self.base_url), query, limit, offset)", "domain name. \"\"\" response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list()", "\"offset\": offset}).json()) def __scroll(self, endpoint, query: SearchQuery, search_id: Optional[str] =", "else None def search_emails(self, query: SearchQuery, limit: int = MAX_LIMIT,", "class CertificatesSearchResults: def __init__(self, results: List[Certificate], total_items: int = None,", "AS number.\"\"\" response = self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors() return AS.from_dict(response.data.items[0]) if", "search_id self.results: List[IP] = results class CertificatesSearchResults: def __init__(self, results:", "SearchQuery): \"\"\"Returns the precise number of search results that matched", "query, scroll_id) response.check_errors() as_list = list() for r in response.data.items:", "query) response.check_errors() return response.data.total_items def scroll_domains(self, query: SearchQuery, scroll_id: str", "Optional[str] = None) -> Response: with limit(self.limiter, consume=1): if search_id:", "int = MAX_LIMIT, offset: int = 0) -> IPSearchResults: \"\"\"", "CVESearchResults: def __init__(self, results: List[CVE], total_items: int = None, search_id:", "MAX_LIMIT = 100 SEARCH_RESULTS_LIMIT = 10000 RATE_LIMIT_FRAME_IN_SECONDS = 1 def", "endpoint, query: SearchQuery, lim: int = MAX_LIMIT, offset: int =", "matched the search query. Allows getting only the first 10,000", "HistoricalDNSSearchResults: \"\"\" Returns the historical DNS records about the given", "def search_domains(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int", "list of domains that matched the search query. Allows getting", "= 'https://api.spyse.com/v4/data' MAX_LIMIT = 100 SEARCH_RESULTS_LIMIT = 10000 RATE_LIMIT_FRAME_IN_SECONDS =", "= None) -> CVESearchResults: \"\"\" Returns a list of CVEs", "return DomainsSearchResults(domains, response.data.total_items) def count_domains(self, query: SearchQuery): \"\"\"Returns the precise", "None def search_domains(self, query: SearchQuery, limit: int = MAX_LIMIT, offset:", "that matched the search query.\"\"\" response = self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors()", "results: List[AS], total_items: int = None, search_id: str = None):", "0 else None def search_certificate(self, query: SearchQuery, limit: int =", "search_id self.results: List[CVE] = results class EmailsSearchResults: def __init__(self, results:", "len(response.data.items) > 0 else None def search_ip(self, query: SearchQuery, limit:", "None) -> Response: with limit(self.limiter, consume=1): if search_id: body =", "limit, offset) response.check_errors() as_list = list() for r in response.data.items:", "-> CVESearchResults: \"\"\" Returns a list of CVE that matched", "= search_id self.results: List[WHOISHistoricalRecord] = results class Client: DEFAULT_BASE_URL =", "return response.data.total_items def scroll_certificate(self, query: SearchQuery, scroll_id: str = None)", "int = MAX_LIMIT, offset: int = 0) -> EmailsSearchResults: \"\"\"", "query: SearchQuery, scroll_id: str = None) -> DomainsSearchResults: \"\"\" Returns", "= list() for r in response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records, response.data.total_items)", "search query.\"\"\" response = self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def", "response = self.__search('{}/domain/search'.format(self.base_url), query, limit, offset) response.check_errors() domains = list()", "Returns a list of SSL/TLS certificate hosts that matched the", "int = MAX_LIMIT, offset: int = 0) -> AutonomousSystemsSearchResults: \"\"\"", "\"\"\" response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() cve_list = list()", "of domains that matched the search query. Allows getting only", "response.check_errors() cve_list = list() for r in response.data.items: cve_list.append(CVE.from_dict(r)) return", "results class CertificatesSearchResults: def __init__(self, results: List[Certificate], total_items: int =", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[Email]", "MAX_LIMIT, offset: int = 0) -> IPSearchResults: \"\"\" Returns a", "get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]: \"\"\"Returns details about SSL/TLS certificate\"\"\"", "search_cve(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int =", "results. \"\"\" response = self.__search('{}/ip/search'.format(self.base_url), query, limit, offset) response.check_errors() ips", "\"\"\" response = self.__search('{}/as/search'.format(self.base_url), query, limit, offset) response.check_errors() as_list =", "capacity=1) self.account = self.get_quotas() self.limiter._capacity = self.account.requests_rate_limit def __get(self, endpoint:", "search query.\"\"\" response = self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def", ".models import AS, Domain, IP, CVE, Account, Certificate, Email, DNSHistoricalRecord,", "class IPSearchResults: def __init__(self, results: List[IP], total_items: int = None,", "self.__search('{}/cve/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_cve(self, query: SearchQuery, scroll_id:", "Returns the historical WHOIS records for the given domain name.", "query.\"\"\" response = self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_ip(self,", "self.results: List[Domain] = results class AutonomousSystemsSearchResults: def __init__(self, results: List[AS],", "response.check_errors() return response.data.total_items def scroll_emails(self, query: SearchQuery, scroll_id: str =", "= results class CVESearchResults: def __init__(self, results: List[CVE], total_items: int", "Optional[str] = search_id self.results: List[Certificate] = results class CVESearchResults: def", "List[Email], total_items: int = None, search_id: str = None): self.total_items:", "search query. Allows getting all the results but requires a", "response = self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset) response.check_errors() certs = list()", "query.\"\"\" response = self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_certificate(self,", "= MAX_LIMIT, offset: int = 0) -> CVESearchResults: \"\"\" Returns", "first 10,000 results. \"\"\" response = self.__search('{}/email/search'.format(self.base_url), query, limit, offset)", "response.check_errors() certs = list() for r in response.data.items: certs.append(Certificate.from_dict(r)) return", "response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() domains = list() for", "class HistoricalDNSSearchResults: def __init__(self, results: List[DNSHistoricalRecord], total_items: int = None,", "as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, response.data.total_items) def scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str", "results: List[Domain], total_items: int = None, search_id: str = None):", "def __init__(self, results: List[Certificate], total_items: int = None, search_id: str", "account quotas.\"\"\" response = self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return Account.from_dict(response.data.items[0]) if len(response.data.items)", "List[CVE], total_items: int = None, search_id: str = None): self.total_items:", "subscription \"\"\" response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() as_list =", "\"\"\"Returns details about IP\"\"\" response = self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors() return", "Returns a list of CVE that matched the search query.", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[WHOISHistoricalRecord] =", "str = None) -> CVESearchResults: \"\"\" Returns a list of", "email)) response.check_errors() return Email.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None", "HistoricalDNSSearchResults(records, response.data.total_items) def search_historical_whois(self, domain_name: str, limit: int = MAX_LIMIT,", "self.session.headers.update({'Authorization': 'Bearer ' + api_token}) self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url = base_url", "self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() domains = list() for r in", "but requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/domain/scroll/search'.format(self.base_url),", "str = None) -> DomainsSearchResults: \"\"\" Returns a list of", "ips.append(IP.from_dict(r)) return IPSearchResults(ips, response.data.total_items) def count_ip(self, query: SearchQuery) -> int:", "that matched the search query.\"\"\" response = self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors()", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[Domain]", "self.search_id: Optional[str] = search_id self.results: List[CVE] = results class EmailsSearchResults:", "= MAX_LIMIT, offset: int = 0) -> IPSearchResults: \"\"\" Returns", "= self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() domains = list() for r", "def scroll_cve(self, query: SearchQuery, scroll_id: str = None) -> CVESearchResults:", "= total_items self.search_id: Optional[str] = search_id self.results: List[Email] = results", "about CVE\"\"\" response = self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors() return CVE.from_dict(response.data.items[0]) if", "endpoint, query: SearchQuery, search_id: Optional[str] = None) -> Response: with", "a Spyse Pro subscription \"\"\" response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id)", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[DNSHistoricalRecord]", "\"\"\" Returns the historical DNS records about the given domain", "if len(response.data.items) > 0 else None def search_certificate(self, query: SearchQuery,", "self.results: List[AS] = results class IPSearchResults: def __init__(self, results: List[IP],", "cve_id)) response.check_errors() return CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None", "= self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors() return Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0", "of emails that matched the search query. Allows getting all", "emails that matched the search query. Allows getting all the", "search_id} else: body = {\"search_params\": query.get()} return Response.from_dict(self.session.post(endpoint, json=body).json()) def", "scroll_cve(self, query: SearchQuery, scroll_id: str = None) -> CVESearchResults: \"\"\"", "\"\"\" response = self.__search('{}/domain/search'.format(self.base_url), query, limit, offset) response.check_errors() domains =", "scroll_id) response.check_errors() as_list = list() for r in response.data.items: as_list.append(AS.from_dict(r))", "def get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]: \"\"\"Returns details about SSL/TLS", "systems that matched the search query. Allows getting all the", "asn: int) -> Optional[AS]: \"\"\"Returns details about an autonomous system", "\"search_id\": search_id} else: body = {\"search_params\": query.get()} return Response.from_dict(self.session.post(endpoint, json=body).json())", "= 1 def __init__(self, api_token, base_url=DEFAULT_BASE_URL): self.session = requests.Session() self.session.headers.update({'Authorization':", "list of CVEs that matched the search query. Allows getting", "domain name. \"\"\" response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list()", "__scroll(self, endpoint, query: SearchQuery, search_id: Optional[str] = None) -> Response:", "search_id=response.data.search_id) def get_email_details(self, email: str) -> Optional[Email]: \"\"\"Returns details about", "= search_id self.results: List[AS] = results class IPSearchResults: def __init__(self,", "Returns a list of autonomous systems that matched the search", "a list of SSL/TLS certificates that matched the search query.", "total_items self.search_id: Optional[str] = search_id self.results: List[AS] = results class", "str) -> Optional[CVE]: \"\"\"Returns details about CVE\"\"\" response = self.__get('{}/cve/{}'.format(self.base_url,", "scroll_id: str = None) -> IPSearchResults: \"\"\" Returns a list", "the historical WHOIS records for the given domain name. \"\"\"", "return CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_cve(self,", "query: SearchQuery, scroll_id: str = None) -> EmailsSearchResults: \"\"\" Returns", "def search_certificate(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int", "str): self.session.headers.update({'User-Agent': s}) def get_quotas(self) -> Optional[Account]: \"\"\"Returns details about", "List[AS] = results class IPSearchResults: def __init__(self, results: List[IP], total_items:", "SearchQuery from limiter import get_limiter, limit class DomainsSearchResults: def __init__(self,", "Returns a list of CVEs that matched the search query.", "Email.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_emails(self, query:", "= self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors() return IP.from_dict(response.data.items[0]) if len(response.data.items) > 0", "domains = list() for r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains,", "Optional[str] = search_id self.results: List[AS] = results class IPSearchResults: def", "domains that matched the search query. Allows getting only the", "self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def search_autonomous_systems(self, query: SearchQuery, limit:", "= self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors() return CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0", "limit(self.limiter, consume=1): return Response.from_dict(self.session.get(endpoint).json()) def __search(self, endpoint, query: SearchQuery, lim:", "search_id: str = None): self.total_items: Optional[int] = total_items self.search_id: Optional[str]", "about domain\"\"\" response = self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors() return Domain.from_dict(response.data.items[0]) if", "= total_items self.search_id: Optional[str] = search_id self.results: List[Domain] = results", "__init__(self, results: List[Certificate], total_items: int = None, search_id: str =", "0) -> IPSearchResults: \"\"\" Returns a list of IPv4 hosts", "Optional[AS]: \"\"\"Returns details about an autonomous system by AS number.\"\"\"", "ips = list() for r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips,", "\"\"\" response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() certs = list()", "__init__(self, api_token, base_url=DEFAULT_BASE_URL): self.session = requests.Session() self.session.headers.update({'Authorization': 'Bearer ' +", "response.check_errors() domains = list() for r in response.data.items: domains.append(Domain.from_dict(r)) return", "= search_id self.results: List[Domain] = results class AutonomousSystemsSearchResults: def __init__(self,", "cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, search_id=response.data.search_id) def get_email_details(self, email: str) -> Optional[Email]:", "offset: int = 0) -> IPSearchResults: \"\"\" Returns a list", "= None) -> EmailsSearchResults: \"\"\" Returns a list of emails", "IPv4 hosts that matched the search query. Allows getting all", "search_id: Optional[str] = None) -> Response: with limit(self.limiter, consume=1): if", "self.total_items: Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[CVE]", "= None) -> AutonomousSystemsSearchResults: \"\"\" Returns a list of autonomous", "response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r in", "if len(response.data.items) > 0 else None def search_emails(self, query: SearchQuery,", "response = self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_certificate(self, query:", "response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, response.data.total_items) def count_emails(self, query: SearchQuery) ->", "self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset) response.check_errors() certs = list() for r", "\"\"\" response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() emails = list()", "100 SEARCH_RESULTS_LIMIT = 10000 RATE_LIMIT_FRAME_IN_SECONDS = 1 def __init__(self, api_token,", "\"\"\" Returns a list of SSL/TLS certificate hosts that matched", "= self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors() return Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0", "details about email\"\"\" response = self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors() return Email.from_dict(response.data.items[0])", "return Response.from_dict(self.session.get(endpoint).json()) def __search(self, endpoint, query: SearchQuery, lim: int =", "query) response.check_errors() return response.data.total_items def scroll_cve(self, query: SearchQuery, scroll_id: str", "\"\"\" Returns a list of emails that matched the search", "Returns a list of domains that matched the search query.", "= MAX_LIMIT, offset: int = 0) \\ -> HistoricalDNSSearchResults: \"\"\"", "None def get_autonomous_system_details(self, asn: int) -> Optional[AS]: \"\"\"Returns details about", "but requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/cve/scroll/search'.format(self.base_url),", "{\"search_params\": query.get(), \"search_id\": search_id} else: body = {\"search_params\": query.get()} return", "def count_emails(self, query: SearchQuery) -> int: \"\"\"Returns the precise number", "response = self.__get('{}/ip/{}'.format(self.base_url, ip)) response.check_errors() return IP.from_dict(response.data.items[0]) if len(response.data.items) >", "10,000 results. \"\"\" response = self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset) response.check_errors()", "= 10000 RATE_LIMIT_FRAME_IN_SECONDS = 1 def __init__(self, api_token, base_url=DEFAULT_BASE_URL): self.session", "import List, Optional from .models import AS, Domain, IP, CVE,", "response.data.total_items) def count_ip(self, query: SearchQuery) -> int: \"\"\"Returns the precise", "count_domains(self, query: SearchQuery): \"\"\"Returns the precise number of search results", "response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, search_id=response.data.search_id) def get_cve_details(self, cve_id: str) ->", "CVEs that matched the search query. Allows getting all the", "query) response.check_errors() return response.data.total_items def scroll_emails(self, query: SearchQuery, scroll_id: str", "scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str = None) -> AutonomousSystemsSearchResults: \"\"\"", "search_id self.results: List[Domain] = results class AutonomousSystemsSearchResults: def __init__(self, results:", "self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors() return CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0 else", "-> Optional[AS]: \"\"\"Returns details about an autonomous system by AS", "\"\"\"Returns details about CVE\"\"\" response = self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors() return", "\\ -> HistoricalWHOISSearchResults: \"\"\" Returns the historical WHOIS records for", "= total_items self.search_id: Optional[str] = search_id self.results: List[IP] = results", "self.results: List[Certificate] = results class CVESearchResults: def __init__(self, results: List[CVE],", "= get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account = self.get_quotas() self.limiter._capacity = self.account.requests_rate_limit def", "return Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_certificate(self,", "results class AutonomousSystemsSearchResults: def __init__(self, results: List[AS], total_items: int =", "results but requires a Spyse Pro subscription \"\"\" response =", "r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, response.data.total_items) def count_cve(self, query:", "= results class EmailsSearchResults: def __init__(self, results: List[Email], total_items: int", "that matched the search query.\"\"\" response = self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors()", "about SSL/TLS certificate\"\"\" response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors() return Certificate.from_dict(response.data.items[0])", "list() for r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, response.data.total_items) def", "query, scroll_id) response.check_errors() certs = list() for r in response.data.items:", "-> HistoricalDNSSearchResults: \"\"\" Returns the historical DNS records about the", "in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, search_id=response.data.search_id) def get_cve_details(self, cve_id: str)", "= results class AutonomousSystemsSearchResults: def __init__(self, results: List[AS], total_items: int", "offset: int = 0) -> AutonomousSystemsSearchResults: \"\"\" Returns a list", "requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query,", "Pro subscription \"\"\" response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() certs", "= list() for r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, search_id=response.data.search_id)", "= None) -> Response: with limit(self.limiter, consume=1): if search_id: body", "response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, response.data.total_items) def count_cve(self, query: SearchQuery) ->", "= list() for r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, search_id=response.data.search_id)", "SearchQuery) -> int: \"\"\"Returns the precise number of search results", "10,000 results. \"\"\" response = self.__search('{}/ip/search'.format(self.base_url), query, limit, offset) response.check_errors()", "CertificatesSearchResults(certs, response.data.total_items) def count_certificate(self, query: SearchQuery) -> int: \"\"\"Returns the", "> 0 else None def get_autonomous_system_details(self, asn: int) -> Optional[AS]:", "def scroll_certificate(self, query: SearchQuery, scroll_id: str = None) -> CertificatesSearchResults:", "None) -> CVESearchResults: \"\"\" Returns a list of CVEs that", "that matched the search query.\"\"\" response = self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors()", "list() for r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, search_id=response.data.search_id) def", "int = 0) \\ -> HistoricalWHOISSearchResults: \"\"\" Returns the historical", "def search_autonomous_systems(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int", "results: List[Email], total_items: int = None, search_id: str = None):", "getting only the first 10,000 results. \"\"\" response = self.__search('{}/domain/search'.format(self.base_url),", "response = self.__search('{}/email/search'.format(self.base_url), query, limit, offset) response.check_errors() emails = list()", "= self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return Account.from_dict(response.data.items[0]) if len(response.data.items) > 0 else", "limit: int = MAX_LIMIT, offset: int = 0) -> CVESearchResults:", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[Certificate] =", "int = 0) -> DomainsSearchResults: \"\"\" Returns a list of", "response = self.__search('{}/cve/search'.format(self.base_url), query, limit, offset) response.check_errors() cve_list = list()", "= self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() cve_list = list() for r", "results that matched the search query.\"\"\" response = self.__search('{}/cve/email/count'.format(self.base_url), query)", "records for the given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}')", "list of emails that matched the search query. Allows getting", "Optional[int] = total_items self.search_id: Optional[str] = search_id self.results: List[CVE] =", "def search_emails(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int", "= 0) \\ -> HistoricalDNSSearchResults: \"\"\" Returns the historical DNS", "\"\"\" Returns a list of CVEs that matched the search", "list() for r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, response.data.total_items) def", "DomainsSearchResults: def __init__(self, results: List[Domain], total_items: int = None, search_id:", "self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() ips = list() for r in", "r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, search_id=response.data.search_id) def get_certificate_details(self, fingerprint_sha256:", "requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query,", "the search query.\"\"\" response = self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items", "= self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() emails = list() for r", "query) response.check_errors() return response.data.total_items def scroll_certificate(self, query: SearchQuery, scroll_id: str", "str) -> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.get(endpoint).json()) def __search(self,", "of IPv4 hosts that matched the search query. Allows getting", "your account quotas.\"\"\" response = self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return Account.from_dict(response.data.items[0]) if", "def scroll_emails(self, query: SearchQuery, scroll_id: str = None) -> EmailsSearchResults:", "search query. Allows getting only the first 10,000 results. \"\"\"", "-> HistoricalWHOISSearchResults: \"\"\" Returns the historical WHOIS records for the", "body = {\"search_params\": query.get()} return Response.from_dict(self.session.post(endpoint, json=body).json()) def set_user_agent(self, s:", "results that matched the search query.\"\"\" response = self.__search('{}/domain/search/count'.format(self.base_url), query)", "count_ip(self, query: SearchQuery) -> int: \"\"\"Returns the precise number of", "List[DNSHistoricalRecord], total_items: int = None, search_id: str = None): self.total_items:", "list() for r in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, search_id=response.data.search_id) def", "\"\"\" Returns a list of autonomous systems that matched the", "-> Optional[Certificate]: \"\"\"Returns details about SSL/TLS certificate\"\"\" response = self.__get('{}/certificate/{}'.format(self.base_url,", "= results class HistoricalDNSSearchResults: def __init__(self, results: List[DNSHistoricalRecord], total_items: int", "\"\"\" response = self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset) response.check_errors() certs =", "= self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_domains(self, query: SearchQuery,", "else None def search_domains(self, query: SearchQuery, limit: int = MAX_LIMIT,", "list of SSL/TLS certificate hosts that matched the search query.", "limit, offset) response.check_errors() emails = list() for r in response.data.items:", "given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records =", "only the first 10,000 results. \"\"\" response = self.__search('{}/as/search'.format(self.base_url), query,", "limit: int = MAX_LIMIT, offset: int = 0) -> EmailsSearchResults:", "import Response from .search_query import SearchQuery from limiter import get_limiter,", "matched the search query.\"\"\" response = self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors() return", ".search_query import SearchQuery from limiter import get_limiter, limit class DomainsSearchResults:", "query, limit, offset) response.check_errors() cve_list = list() for r in", "r in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, search_id=response.data.search_id) def get_ip_details(self, ip:", "getting only the first 10,000 results. \"\"\" response = self.__search('{}/ip/search'.format(self.base_url),", "= 0) -> CVESearchResults: \"\"\" Returns a list of CVE", "dns_type, domain_name: str, limit: int = MAX_LIMIT, offset: int =", "query) response.check_errors() return response.data.total_items def search_autonomous_systems(self, query: SearchQuery, limit: int", "List[Email] = results class HistoricalDNSSearchResults: def __init__(self, results: List[DNSHistoricalRecord], total_items:", "\"\"\"Returns the precise number of search results that matched the", "return CertificatesSearchResults(certs, response.data.total_items) def count_certificate(self, query: SearchQuery) -> int: \"\"\"Returns", "Optional[str] = search_id self.results: List[DNSHistoricalRecord] = results class HistoricalWHOISSearchResults: def", "Spyse Pro subscription \"\"\" response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors()", "certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, search_id=response.data.search_id) def get_cve_details(self, cve_id: str) -> Optional[CVE]:", "= self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() as_list = list() for r", "def get_autonomous_system_details(self, asn: int) -> Optional[AS]: \"\"\"Returns details about an", "fingerprint_sha256: str) -> Optional[Certificate]: \"\"\"Returns details about SSL/TLS certificate\"\"\" response", "that matched the search query.\"\"\" response = self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors()", "\"\"\"Returns details about email\"\"\" response = self.__get('{}/email/{}'.format(self.base_url, email)) response.check_errors() return", "search query.\"\"\" response = self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def", "= self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_emails(self, query: SearchQuery,", "class AutonomousSystemsSearchResults: def __init__(self, results: List[AS], total_items: int = None,", "query.\"\"\" response = self.__search('{}/domain/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_domains(self,", "scroll_id) response.check_errors() certs = list() for r in response.data.items: certs.append(Certificate.from_dict(r))", "= None) -> IPSearchResults: \"\"\" Returns a list of IPv4", "in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, search_id=response.data.search_id) def search_historical_dns(self, dns_type, domain_name:", "offset: int = 0) -> DomainsSearchResults: \"\"\" Returns a list", "return Email.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_emails(self,", "query, limit, offset) response.check_errors() emails = list() for r in", "from .response import Response from .search_query import SearchQuery from limiter", "else None def search_cve(self, query: SearchQuery, limit: int = MAX_LIMIT,", "List[Certificate], total_items: int = None, search_id: str = None): self.total_items:", "= list() for r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, response.data.total_items)", "search_emails(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int =", "an autonomous system by AS number.\"\"\" response = self.__get('{}/as/{}'.format(self.base_url, asn))", "matched the search query.\"\"\" response = self.__search('{}/as/search/count'.format(self.base_url), query) response.check_errors() return", "return response.data.total_items def scroll_ip(self, query: SearchQuery, scroll_id: str = None)", "len(response.data.items) > 0 else None def count_autonomous_systems(self, query: SearchQuery) ->", "the first 10,000 results. \"\"\" response = self.__search('{}/as/search'.format(self.base_url), query, limit,", "search results that matched the search query.\"\"\" response = self.__search('{}/cve/email/count'.format(self.base_url),", "that matched the search query. Allows getting all the results", "\"\"\" response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r", "a list of domains that matched the search query. Allows", "0) -> CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS certificate", "records = list() for r in response.data.items: records.append(WHOISHistoricalRecord.from_dict(r)) return HistoricalWHOISSearchResults(records,", "def __init__(self, results: List[WHOISHistoricalRecord], total_items: int = None, search_id: str", "__init__(self, results: List[AS], total_items: int = None, search_id: str =", "response.check_errors() return Account.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def", "AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def get_domain_details(self, domain_name: str) -> Optional[Domain]: \"\"\"Returns details", "str = None) -> IPSearchResults: \"\"\" Returns a list of", "response.check_errors() return Email.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def", "MAX_LIMIT, offset: int = 0) -> EmailsSearchResults: \"\"\" Returns a", "def search_historical_dns(self, dns_type, domain_name: str, limit: int = MAX_LIMIT, offset:", "search_id self.results: List[Certificate] = results class CVESearchResults: def __init__(self, results:", "Account.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def get_autonomous_system_details(self, asn:", "for r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, search_id=response.data.search_id) def get_email_details(self,", "scroll_id) response.check_errors() ips = list() for r in response.data.items: ips.append(IP.from_dict(r))", "= search_id self.results: List[CVE] = results class EmailsSearchResults: def __init__(self,", "Spyse Pro subscription \"\"\" response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors()", "getting only the first 10,000 results. \"\"\" response = self.__search('{}/cve/search'.format(self.base_url),", "response.data.total_items) def count_domains(self, query: SearchQuery): \"\"\"Returns the precise number of", "records about the given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}')", "0 else None def get_autonomous_system_details(self, asn: int) -> Optional[AS]: \"\"\"Returns", "in response.data.items: domains.append(Domain.from_dict(r)) return DomainsSearchResults(domains, response.data.total_items) def count_domains(self, query: SearchQuery):", "count_cve(self, query: SearchQuery) -> int: \"\"\"Returns the precise number of", "a list of autonomous systems that matched the search query.", "results that matched the search query.\"\"\" response = self.__search('{}/ip/search/count'.format(self.base_url), query)", "DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data' MAX_LIMIT = 100 SEARCH_RESULTS_LIMIT = 10000 RATE_LIMIT_FRAME_IN_SECONDS", "list() for r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id) def", "query, limit, offset) response.check_errors() as_list = list() for r in", "class EmailsSearchResults: def __init__(self, results: List[Email], total_items: int = None,", "return IP.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def search_ip(self,", "from .models import AS, Domain, IP, CVE, Account, Certificate, Email,", "List[WHOISHistoricalRecord] = results class Client: DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data' MAX_LIMIT =", "AS.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None def count_autonomous_systems(self, query:", "query, limit, offset) response.check_errors() domains = list() for r in", "of CVEs that matched the search query. Allows getting all", "search results that matched the search query.\"\"\" response = self.__search('{}/as/search/count'.format(self.base_url),", "response = self.__get('{}/domain/{}'.format(self.base_url, domain_name)) response.check_errors() return Domain.from_dict(response.data.items[0]) if len(response.data.items) >", "response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() certs = list() for", "def search_ip(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int", "number.\"\"\" response = self.__get('{}/as/{}'.format(self.base_url, asn)) response.check_errors() return AS.from_dict(response.data.items[0]) if len(response.data.items)", "r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, search_id=response.data.search_id) def search_historical_dns(self, dns_type,", "emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, search_id=response.data.search_id) def search_historical_dns(self, dns_type, domain_name: str, limit:", "import SearchQuery from limiter import get_limiter, limit class DomainsSearchResults: def", "self.results: List[WHOISHistoricalRecord] = results class Client: DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data' MAX_LIMIT", "set_user_agent(self, s: str): self.session.headers.update({'User-Agent': s}) def get_quotas(self) -> Optional[Account]: \"\"\"Returns", "from .search_query import SearchQuery from limiter import get_limiter, limit class", "= self.__search('{}/domain/search'.format(self.base_url), query, limit, offset) response.check_errors() domains = list() for", "self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() cve_list = list() for r in", "def __get(self, endpoint: str) -> Response: with limit(self.limiter, consume=1): return", "in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, response.data.total_items) def count_ip(self, query: SearchQuery)", "getting only the first 10,000 results. \"\"\" response = self.__search('{}/certificate/search'.format(self.base_url),", "\"\"\" Returns a list of SSL/TLS certificates that matched the", "results that matched the search query.\"\"\" response = self.__search('{}/certificate/search/count'.format(self.base_url), query)", "a list of emails that matched the search query. Allows", "name. \"\"\" response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for", "int = 0) -> CertificatesSearchResults: \"\"\" Returns a list of", "SSL/TLS certificate\"\"\" response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256)) response.check_errors() return Certificate.from_dict(response.data.items[0]) if", "only the first 10,000 results. \"\"\" response = self.__search('{}/certificate/search'.format(self.base_url), query,", "0) -> DomainsSearchResults: \"\"\" Returns a list of domains that", "= self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_certificate(self, query: SearchQuery,", "scroll_id: str = None) -> CVESearchResults: \"\"\" Returns a list", "str) -> Optional[IP]: \"\"\"Returns details about IP\"\"\" response = self.__get('{}/ip/{}'.format(self.base_url,", "query: SearchQuery, scroll_id: str = None) -> CertificatesSearchResults: \"\"\" Returns", "Spyse Pro subscription \"\"\" response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors()", "SearchQuery, search_id: Optional[str] = None) -> Response: with limit(self.limiter, consume=1):", "r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, response.data.total_items) def scroll_autonomous_systems(self, query:", "str = None) -> EmailsSearchResults: \"\"\" Returns a list of", "EmailsSearchResults(emails, search_id=response.data.search_id) def search_historical_dns(self, dns_type, domain_name: str, limit: int =", "r in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, response.data.total_items) def count_ip(self, query:", "r in response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records, response.data.total_items) def search_historical_whois(self, domain_name:", "= results class HistoricalWHOISSearchResults: def __init__(self, results: List[WHOISHistoricalRecord], total_items: int", "the given domain name. \"\"\" response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records", "len(response.data.items) > 0 else None def search_domains(self, query: SearchQuery, limit:", "__init__(self, results: List[IP], total_items: int = None, search_id: str =", "-> Optional[Account]: \"\"\"Returns details about your account quotas.\"\"\" response =", "list() for r in response.data.items: as_list.append(AS.from_dict(r)) return AutonomousSystemsSearchResults(as_list, response.data.total_items) def", "for r in response.data.items: cve_list.append(CVE.from_dict(r)) return CVESearchResults(cve_list, response.data.total_items) def count_cve(self,", "10,000 results. \"\"\" response = self.__search('{}/email/search'.format(self.base_url), query, limit, offset) response.check_errors()", "HistoricalWHOISSearchResults: def __init__(self, results: List[WHOISHistoricalRecord], total_items: int = None, search_id:", "query.get(), \"limit\": lim, \"offset\": offset}).json()) def __scroll(self, endpoint, query: SearchQuery,", "def __init__(self, results: List[CVE], total_items: int = None, search_id: str", "None) -> CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS certificates", "\"\"\" Returns a list of CVE that matched the search", "self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}') response.check_errors() records = list() for r in response.data.items: records.append(DNSHistoricalRecord.from_dict(r))", "self.search_id: Optional[str] = search_id self.results: List[IP] = results class CertificatesSearchResults:", "List, Optional from .models import AS, Domain, IP, CVE, Account,", "-> CertificatesSearchResults: \"\"\" Returns a list of SSL/TLS certificate hosts", "None def search_ip(self, query: SearchQuery, limit: int = MAX_LIMIT, offset:", "class DomainsSearchResults: def __init__(self, results: List[Domain], total_items: int = None,", "get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1) self.account = self.get_quotas() self.limiter._capacity = self.account.requests_rate_limit def __get(self,", "response.data.total_items def scroll_domains(self, query: SearchQuery, scroll_id: str = None) ->", "only the first 10,000 results. \"\"\" response = self.__search('{}/domain/search'.format(self.base_url), query,", "response.check_errors() as_list = list() for r in response.data.items: as_list.append(AS.from_dict(r)) return", "with limit(self.limiter, consume=1): if search_id: body = {\"search_params\": query.get(), \"search_id\":", "None) -> AutonomousSystemsSearchResults: \"\"\" Returns a list of autonomous systems", "matched the search query. Allows getting all the results but", "IPv4 hosts that matched the search query. Allows getting only", "details about CVE\"\"\" response = self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors() return CVE.from_dict(response.data.items[0])", "0) -> Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(),", "search_id=response.data.search_id) def get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]: \"\"\"Returns details about", "emails that matched the search query. Allows getting only the", "in response.data.items: records.append(DNSHistoricalRecord.from_dict(r)) return HistoricalDNSSearchResults(records, response.data.total_items) def search_historical_whois(self, domain_name: str,", "= list() for r in response.data.items: emails.append(Email.from_dict(r)) return EmailsSearchResults(emails, search_id=response.data.search_id)", "autonomous systems that matched the search query. Allows getting all", "CVESearchResults: \"\"\" Returns a list of CVE that matched the", "offset) response.check_errors() cve_list = list() for r in response.data.items: cve_list.append(CVE.from_dict(r))", "+ api_token}) self.session.headers.update({'User-Agent': 'spyse-python'}) self.base_url = base_url self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS,", "return EmailsSearchResults(emails, search_id=response.data.search_id) def search_historical_dns(self, dns_type, domain_name: str, limit: int", "response.data.total_items) def count_cve(self, query: SearchQuery) -> int: \"\"\"Returns the precise", "response = self.__get('{}/cve/{}'.format(self.base_url, cve_id)) response.check_errors() return CVE.from_dict(response.data.items[0]) if len(response.data.items) >", "CVESearchResults(cve_list, search_id=response.data.search_id) def get_email_details(self, email: str) -> Optional[Email]: \"\"\"Returns details", "body = {\"search_params\": query.get(), \"search_id\": search_id} else: body = {\"search_params\":", "IP, CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord from .response import", "= total_items self.search_id: Optional[str] = search_id self.results: List[WHOISHistoricalRecord] = results", "requests from typing import List, Optional from .models import AS,", "MAX_LIMIT, offset: int = 0) -> Response: with limit(self.limiter, consume=1):", "return response.data.total_items def search_autonomous_systems(self, query: SearchQuery, limit: int = MAX_LIMIT,", "requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query,", "requires a Spyse Pro subscription \"\"\" response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query,", "in response.data.items: certs.append(Certificate.from_dict(r)) return CertificatesSearchResults(certs, response.data.total_items) def count_certificate(self, query: SearchQuery)", "self.__search('{}/certificate/search/count'.format(self.base_url), query) response.check_errors() return response.data.total_items def scroll_certificate(self, query: SearchQuery, scroll_id:", "certificates that matched the search query. Allows getting all the", "self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return Account.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None", "quotas.\"\"\" response = self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return Account.from_dict(response.data.items[0]) if len(response.data.items) >", "-> EmailsSearchResults: \"\"\" Returns a list of emails that matched", "in response.data.items: ips.append(IP.from_dict(r)) return IPSearchResults(ips, search_id=response.data.search_id) def get_certificate_details(self, fingerprint_sha256: str)", "matched the search query.\"\"\" response = self.__search('{}/cve/email/count'.format(self.base_url), query) response.check_errors() return", "scroll_certificate(self, query: SearchQuery, scroll_id: str = None) -> CertificatesSearchResults: \"\"\"", "only the first 10,000 results. \"\"\" response = self.__search('{}/email/search'.format(self.base_url), query,", "if len(response.data.items) > 0 else None def search_ip(self, query: SearchQuery,", "Response: with limit(self.limiter, consume=1): return Response.from_dict(self.session.post(endpoint, json={\"search_params\": query.get(), \"limit\": lim,", "response = self.__get('{}/account/quota'.format(self.base_url)) response.check_errors() return Account.from_dict(response.data.items[0]) if len(response.data.items) > 0", "of SSL/TLS certificates that matched the search query. Allows getting", "matched the search query.\"\"\" response = self.__search('{}/ip/search/count'.format(self.base_url), query) response.check_errors() return", "response.data.total_items def scroll_certificate(self, query: SearchQuery, scroll_id: str = None) ->", "response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id) response.check_errors() ips = list() for", "0) -> CVESearchResults: \"\"\" Returns a list of CVE that" ]
[ "matplotlib.colors import ListedColormap cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50']) cm2 =", "ListedColormap cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50']) cm2 = ListedColormap(['#0000aa', '#ff2020'])", "import ListedColormap cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50']) cm2 = ListedColormap(['#0000aa',", "from matplotlib.colors import ListedColormap cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50']) cm2" ]
[ "# note that we set the 404 status explicitly return", "status explicitly return render_template('403.html.j2'), 403 def internal_error(error): app.logger.error('Server Error: %s',", "explicitly return render_template('404.html.j2'), 404 def page_not_allowed(e): # note that we", "(error)) return render_template('500.html.j2'), 500 def unhandled_exception(e): app.logger.error('Unhandled Exception: %s', (e))", "note that we set the 403 status explicitly return render_template('403.html.j2'),", "# Error Pages ---------------------------------------------------------------- def page_not_found(e): # note that we", "%s', (error)) return render_template('500.html.j2'), 500 def unhandled_exception(e): app.logger.error('Unhandled Exception: %s',", "<filename>Applications/FlaskApp/errorpages.py from flask import render_template # Error Pages ---------------------------------------------------------------- def", "render_template('404.html.j2'), 404 def page_not_allowed(e): # note that we set the", "def page_not_allowed(e): # note that we set the 403 status", "that we set the 403 status explicitly return render_template('403.html.j2'), 403", "Error Pages ---------------------------------------------------------------- def page_not_found(e): # note that we set", "render_template('500.html.j2'), 500 def unhandled_exception(e): app.logger.error('Unhandled Exception: %s', (e)) return render_template('500.html.j2'),", "Pages ---------------------------------------------------------------- def page_not_found(e): # note that we set the", "we set the 404 status explicitly return render_template('404.html.j2'), 404 def", "the 404 status explicitly return render_template('404.html.j2'), 404 def page_not_allowed(e): #", "the 403 status explicitly return render_template('403.html.j2'), 403 def internal_error(error): app.logger.error('Server", "Error: %s', (error)) return render_template('500.html.j2'), 500 def unhandled_exception(e): app.logger.error('Unhandled Exception:", "set the 404 status explicitly return render_template('404.html.j2'), 404 def page_not_allowed(e):", "from flask import render_template # Error Pages ---------------------------------------------------------------- def page_not_found(e):", "404 status explicitly return render_template('404.html.j2'), 404 def page_not_allowed(e): # note", "render_template # Error Pages ---------------------------------------------------------------- def page_not_found(e): # note that", "note that we set the 404 status explicitly return render_template('404.html.j2'),", "we set the 403 status explicitly return render_template('403.html.j2'), 403 def", "403 def internal_error(error): app.logger.error('Server Error: %s', (error)) return render_template('500.html.j2'), 500", "403 status explicitly return render_template('403.html.j2'), 403 def internal_error(error): app.logger.error('Server Error:", "return render_template('500.html.j2'), 500 def unhandled_exception(e): app.logger.error('Unhandled Exception: %s', (e)) return", "flask import render_template # Error Pages ---------------------------------------------------------------- def page_not_found(e): #", "explicitly return render_template('403.html.j2'), 403 def internal_error(error): app.logger.error('Server Error: %s', (error))", "500 def unhandled_exception(e): app.logger.error('Unhandled Exception: %s', (e)) return render_template('500.html.j2'), 501", "import render_template # Error Pages ---------------------------------------------------------------- def page_not_found(e): # note", "---------------------------------------------------------------- def page_not_found(e): # note that we set the 404", "status explicitly return render_template('404.html.j2'), 404 def page_not_allowed(e): # note that", "def page_not_found(e): # note that we set the 404 status", "page_not_found(e): # note that we set the 404 status explicitly", "404 def page_not_allowed(e): # note that we set the 403", "return render_template('404.html.j2'), 404 def page_not_allowed(e): # note that we set", "return render_template('403.html.j2'), 403 def internal_error(error): app.logger.error('Server Error: %s', (error)) return", "set the 403 status explicitly return render_template('403.html.j2'), 403 def internal_error(error):", "that we set the 404 status explicitly return render_template('404.html.j2'), 404", "page_not_allowed(e): # note that we set the 403 status explicitly", "# note that we set the 403 status explicitly return", "def internal_error(error): app.logger.error('Server Error: %s', (error)) return render_template('500.html.j2'), 500 def", "render_template('403.html.j2'), 403 def internal_error(error): app.logger.error('Server Error: %s', (error)) return render_template('500.html.j2'),", "internal_error(error): app.logger.error('Server Error: %s', (error)) return render_template('500.html.j2'), 500 def unhandled_exception(e):", "app.logger.error('Server Error: %s', (error)) return render_template('500.html.j2'), 500 def unhandled_exception(e): app.logger.error('Unhandled" ]
[ "import gym import ctypes import numpy as np from gym", "if seed is None: return [self.env.get_seed()] else: if not isinstance(seed,", "def render(self, mode='human'): return None def reset(self): self.state = np.array(self.env.reset())", "= BlackJackCPP(natural) self.action_space = spaces.Discrete(2) self.observation_space = spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11),", "natural def seed(self, seed=None): if seed is None: return [self.env.get_seed()]", "spaces.Discrete(2) self.observation_space = spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2) )) self.state =", "import ctypes import numpy as np from gym import spaces", "from gym import spaces class BlackJack(gym.Env): def __init__(self, natural=False): self.env", "= spaces.Discrete(2) self.observation_space = spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2) )) self.state", "def step(self, action): assert self.action_space.contains(action) state, reward, done = self.env.step(action)", "from .._BlackJack import BlackJackCPP import gym import ctypes import numpy", ".._BlackJack import BlackJackCPP import gym import ctypes import numpy as", "ctypes.c_uint32(seed).value self.env.set_seed(seed) return [seed] def step(self, action): assert self.action_space.contains(action) state,", "= np.array(state) return self.state, reward, done, {} def render(self, mode='human'):", "__init__(self, natural=False): self.env = BlackJackCPP(natural) self.action_space = spaces.Discrete(2) self.observation_space =", "if not isinstance(seed, ctypes.c_uint32): seed = ctypes.c_uint32(seed).value self.env.set_seed(seed) return [seed]", "import spaces class BlackJack(gym.Env): def __init__(self, natural=False): self.env = BlackJackCPP(natural)", "not isinstance(seed, ctypes.c_uint32): seed = ctypes.c_uint32(seed).value self.env.set_seed(seed) return [seed] def", "class BlackJack(gym.Env): def __init__(self, natural=False): self.env = BlackJackCPP(natural) self.action_space =", "[self.env.get_seed()] else: if not isinstance(seed, ctypes.c_uint32): seed = ctypes.c_uint32(seed).value self.env.set_seed(seed)", "spaces.Discrete(11), spaces.Discrete(2) )) self.state = None self.natural = natural def", "reward, done, {} def render(self, mode='human'): return None def reset(self):", "def seed(self, seed=None): if seed is None: return [self.env.get_seed()] else:", "seed = ctypes.c_uint32(seed).value self.env.set_seed(seed) return [seed] def step(self, action): assert", "as np from gym import spaces class BlackJack(gym.Env): def __init__(self,", "action): assert self.action_space.contains(action) state, reward, done = self.env.step(action) self.state =", "{} def render(self, mode='human'): return None def reset(self): self.state =", "= self.env.step(action) self.state = np.array(state) return self.state, reward, done, {}", "done = self.env.step(action) self.state = np.array(state) return self.state, reward, done,", ")) self.state = None self.natural = natural def seed(self, seed=None):", "ctypes import numpy as np from gym import spaces class", "state, reward, done = self.env.step(action) self.state = np.array(state) return self.state,", "spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2) )) self.state = None self.natural = natural", "None: return [self.env.get_seed()] else: if not isinstance(seed, ctypes.c_uint32): seed =", "self.env = BlackJackCPP(natural) self.action_space = spaces.Discrete(2) self.observation_space = spaces.Tuple(( spaces.Discrete(32),", "mode='human'): return None def reset(self): self.state = np.array(self.env.reset()) return self.state", "render(self, mode='human'): return None def reset(self): self.state = np.array(self.env.reset()) return", "BlackJackCPP import gym import ctypes import numpy as np from", "self.state = None self.natural = natural def seed(self, seed=None): if", "np.array(state) return self.state, reward, done, {} def render(self, mode='human'): return", "= spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2) )) self.state = None self.natural", "return [seed] def step(self, action): assert self.action_space.contains(action) state, reward, done", "numpy as np from gym import spaces class BlackJack(gym.Env): def", "done, {} def render(self, mode='human'): return None def reset(self): self.state", "seed(self, seed=None): if seed is None: return [self.env.get_seed()] else: if", "reward, done = self.env.step(action) self.state = np.array(state) return self.state, reward,", "np from gym import spaces class BlackJack(gym.Env): def __init__(self, natural=False):", "import numpy as np from gym import spaces class BlackJack(gym.Env):", "self.action_space.contains(action) state, reward, done = self.env.step(action) self.state = np.array(state) return", "= ctypes.c_uint32(seed).value self.env.set_seed(seed) return [seed] def step(self, action): assert self.action_space.contains(action)", "natural=False): self.env = BlackJackCPP(natural) self.action_space = spaces.Discrete(2) self.observation_space = spaces.Tuple((", "self.env.set_seed(seed) return [seed] def step(self, action): assert self.action_space.contains(action) state, reward,", "[seed] def step(self, action): assert self.action_space.contains(action) state, reward, done =", "return [self.env.get_seed()] else: if not isinstance(seed, ctypes.c_uint32): seed = ctypes.c_uint32(seed).value", "BlackJack(gym.Env): def __init__(self, natural=False): self.env = BlackJackCPP(natural) self.action_space = spaces.Discrete(2)", "isinstance(seed, ctypes.c_uint32): seed = ctypes.c_uint32(seed).value self.env.set_seed(seed) return [seed] def step(self,", "assert self.action_space.contains(action) state, reward, done = self.env.step(action) self.state = np.array(state)", "None self.natural = natural def seed(self, seed=None): if seed is", "else: if not isinstance(seed, ctypes.c_uint32): seed = ctypes.c_uint32(seed).value self.env.set_seed(seed) return", "self.state, reward, done, {} def render(self, mode='human'): return None def", "step(self, action): assert self.action_space.contains(action) state, reward, done = self.env.step(action) self.state", "= None self.natural = natural def seed(self, seed=None): if seed", "gym import ctypes import numpy as np from gym import", "gym import spaces class BlackJack(gym.Env): def __init__(self, natural=False): self.env =", "self.state = np.array(state) return self.state, reward, done, {} def render(self,", "spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2) )) self.state = None self.natural =", "seed=None): if seed is None: return [self.env.get_seed()] else: if not", "spaces class BlackJack(gym.Env): def __init__(self, natural=False): self.env = BlackJackCPP(natural) self.action_space", "self.action_space = spaces.Discrete(2) self.observation_space = spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2) ))", "self.env.step(action) self.state = np.array(state) return self.state, reward, done, {} def", "def __init__(self, natural=False): self.env = BlackJackCPP(natural) self.action_space = spaces.Discrete(2) self.observation_space", "seed is None: return [self.env.get_seed()] else: if not isinstance(seed, ctypes.c_uint32):", "import BlackJackCPP import gym import ctypes import numpy as np", "is None: return [self.env.get_seed()] else: if not isinstance(seed, ctypes.c_uint32): seed", "BlackJackCPP(natural) self.action_space = spaces.Discrete(2) self.observation_space = spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2)", "= natural def seed(self, seed=None): if seed is None: return", "return self.state, reward, done, {} def render(self, mode='human'): return None", "ctypes.c_uint32): seed = ctypes.c_uint32(seed).value self.env.set_seed(seed) return [seed] def step(self, action):", "self.observation_space = spaces.Tuple(( spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2) )) self.state = None", "spaces.Discrete(2) )) self.state = None self.natural = natural def seed(self,", "self.natural = natural def seed(self, seed=None): if seed is None:" ]
[ "root_flatten = Flatten(pipeline=self.pipeline) pbegin = pvalue.PBegin(self.pipeline) pcoll_read = pbegin |", "root_flatten]) pbegin_consumers = [ c.transform for c in self.visitor.value_to_consumers[pbegin] ]", "SplitNumbersFn(DoFn): def process(self, element): if element < 0: yield pvalue.TaggedOutput('tag_negative',", "from apache_beam.transforms import Create from apache_beam.transforms import DoFn from apache_beam.transforms", "[str(t) for t in v_out_of_order.value_to_consumers[k]] for k in v_out_of_order.value_to_consumers }", "c in self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names), 3) def test_side_inputs(self):", "Flatten from apache_beam.transforms import ParDo # Disable frequent lint warning", "tmp p = beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner', options=None) v_out_of_order = ConsumerTrackingPipelineVisitor()", "2.0 # (the \"License\"); you may not use this file", "[] | 'flatten' >> root_flatten self.pipeline.visit(self.visitor) root_transforms = [t.transform for", "+ expanded CoGBK self.assertEqual(len(self.visitor.views), 0) def test_visitor_not_sorted(self): p = Pipeline()", "# pylint: disable=expression-not-assigned from apache_beam.testing.test_stream import TestStream p | TestStream().add_elements([''])", "self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names), 3) def test_side_inputs(self): class SplitNumbersFn(DoFn): def process(self,", "= pvalue.PBegin(self.pipeline) pcoll_read = pbegin | 'read' >> root_read pcoll_read", "consumer_tracking_pipeline_visitor.\"\"\" # pytype: skip-file import logging import unittest import apache_beam", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "lint warning due to pipe operator for chaining transforms. #", "pipe operator for chaining transforms. # pylint: disable=expression-not-assigned # pylint:", "test_side_inputs(self): class SplitNumbersFn(DoFn): def process(self, element): if element < 0:", "apache_beam as beam from apache_beam import pvalue from apache_beam.pipeline import", "in self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2) self.assertGreater( len(self.visitor.step_names), 3) # 2 creates", "equal. out_of_order_labels = { str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]]", "= result _process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor) root_transforms = [t.transform for t", "k in v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels, original_labels) if __name__ == '__main__':", "| 'read' >> root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive, negative =", "to string to assert they are equal. out_of_order_labels = {", "root = out_of_order_graph.components.transforms[root_id] tmp = root.subtransforms[0] root.subtransforms[0] = root.subtransforms[1] root.subtransforms[1]", "disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self): self.pipeline = Pipeline(DirectRunner()) self.visitor =", "t in v_out_of_order.value_to_consumers[k]] for k in v_out_of_order.value_to_consumers } original_labels =", "process(self, element, negatives): yield element def _process_numbers(pcoll, negatives): first_output =", "emails = self.pipeline | 'email' >> Create([('joe', '<EMAIL>')]) phones =", "_process_numbers(pcoll, negatives): first_output = ( pcoll | 'process numbers step", "p = Pipeline() # pylint: disable=expression-not-assigned from apache_beam.testing.test_stream import TestStream", "= self.pipeline | 'phone' >> Create([('mary', '111-222-3333')]) {'emails': emails, 'phones':", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "negatives): yield element def _process_numbers(pcoll, negatives): first_output = ( pcoll", "= ( first_output | 'process numbers step 2' >> ParDo(ProcessNumbersFn(),", "from apache_beam.transforms import ParDo # Disable frequent lint warning due", "\"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\" # pytype: skip-file import logging import unittest", "ParDo # Disable frequent lint warning due to pipe operator", "transforms. # pylint: disable=expression-not-assigned # pylint: disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def", "p | TestStream().add_elements(['']) | beam.Map(lambda _: _) original_graph = p.to_runner_api(return_context=False)", "under the License is distributed on an \"AS IS\" BASIS,", "for t in self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views), 1)", "[t.transform for t in self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2) self.assertGreater( len(self.visitor.step_names), 3)", "to pipe operator for chaining transforms. # pylint: disable=expression-not-assigned #", "License for the specific language governing permissions and # limitations", "# 2 creates + expanded CoGBK self.assertEqual(len(self.visitor.views), 0) def test_visitor_not_sorted(self):", "distributed with # this work for additional information regarding copyright", "AsList(negative)) self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertEqual(root_transforms,", "information regarding copyright ownership. # The ASF licenses this file", "{ str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]] for k in", "for t in self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read, root_flatten]) pbegin_consumers = [", "Convert to string to assert they are equal. out_of_order_labels =", "class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self): self.pipeline = Pipeline(DirectRunner()) self.visitor = ConsumerTrackingPipelineVisitor()", "out_of_order_graph, runner='BundleBasedDirectRunner', options=None) v_out_of_order = ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p = beam.Pipeline().from_runner_api(", "= out_of_order_graph.root_transform_ids[0] root = out_of_order_graph.components.transforms[root_id] tmp = root.subtransforms[0] root.subtransforms[0] =", "for k in v_out_of_order.value_to_consumers } original_labels = { str(k): [str(t)", "numbers step 1' >> ParDo(ProcessNumbersFn(), negatives)) second_output = ( first_output", "_) original_graph = p.to_runner_api(return_context=False) out_of_order_graph = p.to_runner_api(return_context=False) root_id = out_of_order_graph.root_transform_ids[0]", "numbers step 2' >> ParDo(ProcessNumbersFn(), negatives)) output_pc = ((first_output, second_output)", "pvalue from apache_beam.pipeline import Pipeline from apache_beam.pvalue import AsList from", "TestStream().add_elements(['']) | beam.Map(lambda _: _) original_graph = p.to_runner_api(return_context=False) out_of_order_graph =", "1' >> ParDo(ProcessNumbersFn(), negatives)) second_output = ( first_output | 'process", "apache_beam.transforms import ParDo # Disable frequent lint warning due to", "def process(self, element, negatives): yield element def _process_numbers(pcoll, negatives): first_output", "element, negatives): yield element def _process_numbers(pcoll, negatives): first_output = (", "ownership. # The ASF licenses this file to You under", "software # distributed under the License is distributed on an", "options=None) v_out_of_order = ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p = beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner',", "with # this work for additional information regarding copyright ownership.", "pvalue.TaggedOutput('tag_negative', element) else: yield element class ProcessNumbersFn(DoFn): def process(self, element,", "# Convert to string to assert they are equal. out_of_order_labels", "main='positive')) positive, negative = result _process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor) root_transforms =", "setUp(self): self.pipeline = Pipeline(DirectRunner()) self.visitor = ConsumerTrackingPipelineVisitor() def test_root_transforms(self): root_read", "in self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read, root_flatten]) pbegin_consumers = [ c.transform for", "= root.subtransforms[1] root.subtransforms[1] = tmp p = beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner',", "'phones': phones} | CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms = [t.transform for t", "compliance with # the License. You may obtain a copy", "logging import unittest import apache_beam as beam from apache_beam import", "0) def test_visitor_not_sorted(self): p = Pipeline() # pylint: disable=expression-not-assigned from", "licenses this file to You under the Apache License, Version", "| 'flatten' >> root_flatten self.pipeline.visit(self.visitor) root_transforms = [t.transform for t", "beam.Flatten()) return output_pc root_read = beam.Impulse() result = ( self.pipeline", "expanded CoGBK self.assertEqual(len(self.visitor.views), 0) def test_visitor_not_sorted(self): p = Pipeline() #", "element def _process_numbers(pcoll, negatives): first_output = ( pcoll | 'process", "| beam.Map(lambda _: _) original_graph = p.to_runner_api(return_context=False) out_of_order_graph = p.to_runner_api(return_context=False)", "if element < 0: yield pvalue.TaggedOutput('tag_negative', element) else: yield element", "AsList from apache_beam.runners.direct import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from", "CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertEqual(len(root_transforms),", "import apache_beam as beam from apache_beam import pvalue from apache_beam.pipeline", "Create([('joe', '<EMAIL>')]) phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')])", "as beam from apache_beam import pvalue from apache_beam.pipeline import Pipeline", "# The ASF licenses this file to You under the", "# pytype: skip-file import logging import unittest import apache_beam as", "p.visit(v_out_of_order) p = beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner', options=None) v_original = ConsumerTrackingPipelineVisitor()", "p = beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner', options=None) v_original = ConsumerTrackingPipelineVisitor() p.visit(v_original)", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "(ASF) under one or more # contributor license agreements. See", "use this file except in compliance with # the License.", "See the License for the specific language governing permissions and", "3) def test_side_inputs(self): class SplitNumbersFn(DoFn): def process(self, element): if element", "pcoll | 'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives)) second_output", "c.transform for c in self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names), 3)", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def test_co_group_by_key(self): emails =", "to in writing, software # distributed under the License is", "= [t.transform for t in self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names), 5)", "# See the License for the specific language governing permissions", "skip-file import logging import unittest import apache_beam as beam from", "pbegin | 'read' >> root_read pcoll_read | FlatMap(lambda x: x)", "DoFn from apache_beam.transforms import FlatMap from apache_beam.transforms import Flatten from", "language governing permissions and # limitations under the License. #", "additional information regarding copyright ownership. # The ASF licenses this", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "| CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms]", "self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", ">> beam.Flatten()) return output_pc root_read = beam.Impulse() result = (", "copyright ownership. # The ASF licenses this file to You", "ConsumerTrackingPipelineVisitor() def test_root_transforms(self): root_read = beam.Impulse() root_flatten = Flatten(pipeline=self.pipeline) pbegin", "self.pipeline | 'read' >> root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive, negative", "| 'email' >> Create([('joe', '<EMAIL>')]) phones = self.pipeline | 'phone'", "operator for chaining transforms. # pylint: disable=expression-not-assigned # pylint: disable=pointless-statement", "= Flatten(pipeline=self.pipeline) pbegin = pvalue.PBegin(self.pipeline) pcoll_read = pbegin | 'read'", "for chaining transforms. # pylint: disable=expression-not-assigned # pylint: disable=pointless-statement class", "= beam.Impulse() result = ( self.pipeline | 'read' >> root_read", "| 'flatten results' >> beam.Flatten()) return output_pc root_read = beam.Impulse()", "= ((first_output, second_output) | 'flatten results' >> beam.Flatten()) return output_pc", "License, Version 2.0 # (the \"License\"); you may not use", "may not use this file except in compliance with #", "agreed to in writing, software # distributed under the License", "output_pc = ((first_output, second_output) | 'flatten results' >> beam.Flatten()) return", ">> root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive, negative = result _process_numbers(positive,", "3) # 2 creates + expanded CoGBK self.assertEqual(len(self.visitor.views), 0) def", "to the Apache Software Foundation (ASF) under one or more", "negatives)) second_output = ( first_output | 'process numbers step 2'", "distributed under the License is distributed on an \"AS IS\"", "process(self, element): if element < 0: yield pvalue.TaggedOutput('tag_negative', element) else:", "( first_output | 'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives))", "original_graph = p.to_runner_api(return_context=False) out_of_order_graph = p.to_runner_api(return_context=False) root_id = out_of_order_graph.root_transform_ids[0] root", "= p.to_runner_api(return_context=False) root_id = out_of_order_graph.root_transform_ids[0] root = out_of_order_graph.components.transforms[root_id] tmp =", "assert they are equal. out_of_order_labels = { str(k): [str(t) for", "under the Apache License, Version 2.0 # (the \"License\"); you", "# the License. You may obtain a copy of the", "express or implied. # See the License for the specific", "this work for additional information regarding copyright ownership. # The", "Licensed to the Apache Software Foundation (ASF) under one or", "[t.transform for t in self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read, root_flatten]) pbegin_consumers =", "self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names), 3) def test_side_inputs(self): class SplitNumbersFn(DoFn):", "root.subtransforms[0] root.subtransforms[0] = root.subtransforms[1] root.subtransforms[1] = tmp p = beam.Pipeline().from_runner_api(", "first_output = ( pcoll | 'process numbers step 1' >>", "v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels, original_labels) if __name__ == '__main__': logging.getLogger().setLevel(logging.DEBUG) unittest.main()", "writing, software # distributed under the License is distributed on", "pytype: skip-file import logging import unittest import apache_beam as beam", "= p.to_runner_api(return_context=False) out_of_order_graph = p.to_runner_api(return_context=False) root_id = out_of_order_graph.root_transform_ids[0] root =", "you may not use this file except in compliance with", "| 'phone' >> Create([('mary', '111-222-3333')]) {'emails': emails, 'phones': phones} |", "| 'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives)) second_output =", "import ParDo # Disable frequent lint warning due to pipe", "self.assertGreater( len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK self.assertEqual(len(self.visitor.views),", "the License. You may obtain a copy of the License", "under the License. # \"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\" # pytype: skip-file", "root.subtransforms[0] = root.subtransforms[1] root.subtransforms[1] = tmp p = beam.Pipeline().from_runner_api( out_of_order_graph,", "| 'read' >> root_read pcoll_read | FlatMap(lambda x: x) []", "the License. # \"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\" # pytype: skip-file import", "Create([('mary', '111-222-3333')]) {'emails': emails, 'phones': phones} | CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms", "CONDITIONS OF ANY KIND, either express or implied. # See", "Version 2.0 # (the \"License\"); you may not use this", "string to assert they are equal. out_of_order_labels = { str(k):", "apache_beam.pipeline import Pipeline from apache_beam.pvalue import AsList from apache_beam.runners.direct import", "p.to_runner_api(return_context=False) root_id = out_of_order_graph.root_transform_ids[0] root = out_of_order_graph.components.transforms[root_id] tmp = root.subtransforms[0]", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "original_labels = { str(k): [str(t) for t in v_original.value_to_consumers[k]] for", "( self.pipeline | 'read' >> root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive,", "pbegin = pvalue.PBegin(self.pipeline) pcoll_read = pbegin | 'read' >> root_read", "pylint: disable=expression-not-assigned from apache_beam.testing.test_stream import TestStream p | TestStream().add_elements(['']) |", "= ConsumerTrackingPipelineVisitor() def test_root_transforms(self): root_read = beam.Impulse() root_flatten = Flatten(pipeline=self.pipeline)", "unittest import apache_beam as beam from apache_beam import pvalue from", "apache_beam.transforms import DoFn from apache_beam.transforms import FlatMap from apache_beam.transforms import", "'flatten results' >> beam.Flatten()) return output_pc root_read = beam.Impulse() result", "_: _) original_graph = p.to_runner_api(return_context=False) out_of_order_graph = p.to_runner_api(return_context=False) root_id =", "p.to_runner_api(return_context=False) out_of_order_graph = p.to_runner_api(return_context=False) root_id = out_of_order_graph.root_transform_ids[0] root = out_of_order_graph.components.transforms[root_id]", "second_output) | 'flatten results' >> beam.Flatten()) return output_pc root_read =", "class SplitNumbersFn(DoFn): def process(self, element): if element < 0: yield", "root_id = out_of_order_graph.root_transform_ids[0] root = out_of_order_graph.components.transforms[root_id] tmp = root.subtransforms[0] root.subtransforms[0]", "ConsumerTrackingPipelineVisitor() p.visit(v_original) # Convert to string to assert they are", "= self.pipeline | 'email' >> Create([('joe', '<EMAIL>')]) phones = self.pipeline", "(the \"License\"); you may not use this file except in", "OR CONDITIONS OF ANY KIND, either express or implied. #", "from apache_beam.testing.test_stream import TestStream p | TestStream().add_elements(['']) | beam.Map(lambda _:", "t in self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read, root_flatten]) pbegin_consumers = [ c.transform", "negatives)) output_pc = ((first_output, second_output) | 'flatten results' >> beam.Flatten())", "the License is distributed on an \"AS IS\" BASIS, #", "= out_of_order_graph.components.transforms[root_id] tmp = root.subtransforms[0] root.subtransforms[0] = root.subtransforms[1] root.subtransforms[1] =", "in v_out_of_order.value_to_consumers } original_labels = { str(k): [str(t) for t", "= beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner', options=None) v_out_of_order = ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p", "def _process_numbers(pcoll, negatives): first_output = ( pcoll | 'process numbers", "more # contributor license agreements. See the NOTICE file distributed", "self.assertCountEqual(root_transforms, [root_read, root_flatten]) pbegin_consumers = [ c.transform for c in", "root.subtransforms[1] = tmp p = beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner', options=None) v_out_of_order", "law or agreed to in writing, software # distributed under", "options=None) v_original = ConsumerTrackingPipelineVisitor() p.visit(v_original) # Convert to string to", ">> Create([('mary', '111-222-3333')]) {'emails': emails, 'phones': phones} | CoGroupByKey() self.pipeline.visit(self.visitor)", "= [t.transform for t in self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2) self.assertGreater( len(self.visitor.step_names),", "Apache Software Foundation (ASF) under one or more # contributor", "{'emails': emails, 'phones': phones} | CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms = [t.transform", "disable=expression-not-assigned # pylint: disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self): self.pipeline =", "ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive, negative = result _process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor) root_transforms", "element): if element < 0: yield pvalue.TaggedOutput('tag_negative', element) else: yield", "import Create from apache_beam.transforms import DoFn from apache_beam.transforms import FlatMap", "[ c.transform for c in self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names),", "root_read = beam.Impulse() root_flatten = Flatten(pipeline=self.pipeline) pbegin = pvalue.PBegin(self.pipeline) pcoll_read", "self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read,", "the NOTICE file distributed with # this work for additional", "pbegin_consumers = [ c.transform for c in self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers,", "v_out_of_order.value_to_consumers } original_labels = { str(k): [str(t) for t in", "pcoll_read | FlatMap(lambda x: x) [] | 'flatten' >> root_flatten", "((first_output, second_output) | 'flatten results' >> beam.Flatten()) return output_pc root_read", "may obtain a copy of the License at # #", "the Apache License, Version 2.0 # (the \"License\"); you may", "str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]] for k in v_out_of_order.value_to_consumers", "apache_beam.pvalue import AsList from apache_beam.runners.direct import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import", "# (the \"License\"); you may not use this file except", "= tmp p = beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner', options=None) v_out_of_order =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.visitor = ConsumerTrackingPipelineVisitor() def test_root_transforms(self): root_read = beam.Impulse() root_flatten =", "pcoll_read = pbegin | 'read' >> root_read pcoll_read | FlatMap(lambda", "governing permissions and # limitations under the License. # \"\"\"Tests", "beam.Impulse() result = ( self.pipeline | 'read' >> root_read |", "# Licensed to the Apache Software Foundation (ASF) under one", "import Pipeline from apache_beam.pvalue import AsList from apache_beam.runners.direct import DirectRunner", "test_co_group_by_key(self): emails = self.pipeline | 'email' >> Create([('joe', '<EMAIL>')]) phones", "= pbegin | 'read' >> root_read pcoll_read | FlatMap(lambda x:", "root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read, root_flatten])", "in compliance with # the License. You may obtain a", "Create from apache_beam.transforms import DoFn from apache_beam.transforms import FlatMap from", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "def test_root_transforms(self): root_read = beam.Impulse() root_flatten = Flatten(pipeline=self.pipeline) pbegin =", "license agreements. See the NOTICE file distributed with # this", "import TestStream p | TestStream().add_elements(['']) | beam.Map(lambda _: _) original_graph", "[str(t) for t in v_original.value_to_consumers[k]] for k in v_original.value_to_consumers }", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "yield pvalue.TaggedOutput('tag_negative', element) else: yield element class ProcessNumbersFn(DoFn): def process(self,", "contributor license agreements. See the NOTICE file distributed with #", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "yield element class ProcessNumbersFn(DoFn): def process(self, element, negatives): yield element", "= ( self.pipeline | 'read' >> root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive'))", "self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def test_co_group_by_key(self): emails = self.pipeline |", "pvalue.AsList)) def test_co_group_by_key(self): emails = self.pipeline | 'email' >> Create([('joe',", "test_root_transforms(self): root_read = beam.Impulse() root_flatten = Flatten(pipeline=self.pipeline) pbegin = pvalue.PBegin(self.pipeline)", "2) self.assertGreater( len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK", "import unittest import apache_beam as beam from apache_beam import pvalue", "results' >> beam.Flatten()) return output_pc root_read = beam.Impulse() result =", "beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner', options=None) v_original = ConsumerTrackingPipelineVisitor() p.visit(v_original) # Convert", "apache_beam.transforms import Flatten from apache_beam.transforms import ParDo # Disable frequent", "v_original.value_to_consumers[k]] for k in v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels, original_labels) if __name__", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "for k in v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels, original_labels) if __name__ ==", "apache_beam.runners.direct import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from apache_beam.transforms import", "file except in compliance with # the License. You may", "this file except in compliance with # the License. You", "= beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner', options=None) v_original = ConsumerTrackingPipelineVisitor() p.visit(v_original) #", "Pipeline from apache_beam.pvalue import AsList from apache_beam.runners.direct import DirectRunner from", "test_visitor_not_sorted(self): p = Pipeline() # pylint: disable=expression-not-assigned from apache_beam.testing.test_stream import", "element) else: yield element class ProcessNumbersFn(DoFn): def process(self, element, negatives):", "ParDo(ProcessNumbersFn(), negatives)) second_output = ( first_output | 'process numbers step", "Apache License, Version 2.0 # (the \"License\"); you may not", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "beam.Impulse() root_flatten = Flatten(pipeline=self.pipeline) pbegin = pvalue.PBegin(self.pipeline) pcoll_read = pbegin", "t in v_original.value_to_consumers[k]] for k in v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels, original_labels)", "apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from apache_beam.transforms import CoGroupByKey from apache_beam.transforms import", "FlatMap from apache_beam.transforms import Flatten from apache_beam.transforms import ParDo #", "apache_beam.transforms import CoGroupByKey from apache_beam.transforms import Create from apache_beam.transforms import", "and # limitations under the License. # \"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\"", "or implied. # See the License for the specific language", "ParDo(ProcessNumbersFn(), negatives)) output_pc = ((first_output, second_output) | 'flatten results' >>", "result _process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in", "# pylint: disable=expression-not-assigned # pylint: disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self):", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "= Pipeline(DirectRunner()) self.visitor = ConsumerTrackingPipelineVisitor() def test_root_transforms(self): root_read = beam.Impulse()", "Pipeline() # pylint: disable=expression-not-assigned from apache_beam.testing.test_stream import TestStream p |", "p.visit(v_original) # Convert to string to assert they are equal.", "= [ c.transform for c in self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers, [root_read])", "root_read pcoll_read | FlatMap(lambda x: x) [] | 'flatten' >>", "in self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList))", "\"License\"); you may not use this file except in compliance", "except in compliance with # the License. You may obtain", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "file to You under the Apache License, Version 2.0 #", "0: yield pvalue.TaggedOutput('tag_negative', element) else: yield element class ProcessNumbersFn(DoFn): def", "import Flatten from apache_beam.transforms import ParDo # Disable frequent lint", "self.assertEqual(len(root_transforms), 2) self.assertGreater( len(self.visitor.step_names), 3) # 2 creates + expanded", "root_read = beam.Impulse() result = ( self.pipeline | 'read' >>", "= ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p = beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner', options=None) v_original", "disable=expression-not-assigned from apache_beam.testing.test_stream import TestStream p | TestStream().add_elements(['']) | beam.Map(lambda", "regarding copyright ownership. # The ASF licenses this file to", "under one or more # contributor license agreements. See the", "self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def test_co_group_by_key(self):", "| TestStream().add_elements(['']) | beam.Map(lambda _: _) original_graph = p.to_runner_api(return_context=False) out_of_order_graph", "yield element def _process_numbers(pcoll, negatives): first_output = ( pcoll |", "NOTICE file distributed with # this work for additional information", "root.subtransforms[1] root.subtransforms[1] = tmp p = beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner', options=None)", "due to pipe operator for chaining transforms. # pylint: disable=expression-not-assigned", "in v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels, original_labels) if __name__ == '__main__': logging.getLogger().setLevel(logging.DEBUG)", "original_graph, runner='BundleBasedDirectRunner', options=None) v_original = ConsumerTrackingPipelineVisitor() p.visit(v_original) # Convert to", "# Disable frequent lint warning due to pipe operator for", "limitations under the License. # \"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\" # pytype:", "2 creates + expanded CoGBK self.assertEqual(len(self.visitor.views), 0) def test_visitor_not_sorted(self): p", "ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self): self.pipeline = Pipeline(DirectRunner()) self.visitor = ConsumerTrackingPipelineVisitor() def", "self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read])", "= { str(k): [str(t) for t in v_original.value_to_consumers[k]] for k", "# # Unless required by applicable law or agreed to", "def test_side_inputs(self): class SplitNumbersFn(DoFn): def process(self, element): if element <", "for c in self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names), 3) def", "= beam.Impulse() root_flatten = Flatten(pipeline=self.pipeline) pbegin = pvalue.PBegin(self.pipeline) pcoll_read =", "for t in v_out_of_order.value_to_consumers[k]] for k in v_out_of_order.value_to_consumers } original_labels", "self.pipeline = Pipeline(DirectRunner()) self.visitor = ConsumerTrackingPipelineVisitor() def test_root_transforms(self): root_read =", "are equal. out_of_order_labels = { str(k): [str(t) for t in", "'<EMAIL>')]) phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')]) {'emails':", "file distributed with # this work for additional information regarding", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "'read' >> root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive, negative = result", ">> ParDo(ProcessNumbersFn(), negatives)) output_pc = ((first_output, second_output) | 'flatten results'", "| 'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives)) output_pc =", "beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner', options=None) v_out_of_order = ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p =", "pylint: disable=expression-not-assigned # pylint: disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self): self.pipeline", "_process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms]", "for additional information regarding copyright ownership. # The ASF licenses", "from apache_beam.runners.direct import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from apache_beam.transforms", "second_output = ( first_output | 'process numbers step 2' >>", "ASF licenses this file to You under the Apache License,", "'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives)) output_pc = ((first_output,", "root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive, negative = result _process_numbers(positive, AsList(negative))", "[root_read]) self.assertEqual(len(self.visitor.step_names), 3) def test_side_inputs(self): class SplitNumbersFn(DoFn): def process(self, element):", "Software Foundation (ASF) under one or more # contributor license", "import AsList from apache_beam.runners.direct import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor", "for consumer_tracking_pipeline_visitor.\"\"\" # pytype: skip-file import logging import unittest import", "( pcoll | 'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives))", "Pipeline(DirectRunner()) self.visitor = ConsumerTrackingPipelineVisitor() def test_root_transforms(self): root_read = beam.Impulse() root_flatten", "implied. # See the License for the specific language governing", "runner='BundleBasedDirectRunner', options=None) v_out_of_order = ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p = beam.Pipeline().from_runner_api( original_graph,", "x) [] | 'flatten' >> root_flatten self.pipeline.visit(self.visitor) root_transforms = [t.transform", "= { str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]] for k", "permissions and # limitations under the License. # \"\"\"Tests for", ">> root_read pcoll_read | FlatMap(lambda x: x) [] | 'flatten'", "def test_visitor_not_sorted(self): p = Pipeline() # pylint: disable=expression-not-assigned from apache_beam.testing.test_stream", "for t in self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2) self.assertGreater( len(self.visitor.step_names), 3) #", "[t.transform for t in self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views),", "beam.Map(lambda _: _) original_graph = p.to_runner_api(return_context=False) out_of_order_graph = p.to_runner_api(return_context=False) root_id", "p = beam.Pipeline().from_runner_api( out_of_order_graph, runner='BundleBasedDirectRunner', options=None) v_out_of_order = ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order)", "emails, 'phones': phones} | CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms = [t.transform for", "class ProcessNumbersFn(DoFn): def process(self, element, negatives): yield element def _process_numbers(pcoll,", "by applicable law or agreed to in writing, software #", "str(k): [str(t) for t in v_original.value_to_consumers[k]] for k in v_original.value_to_consumers", "not use this file except in compliance with # the", "ConsumerTrackingPipelineVisitor from apache_beam.transforms import CoGroupByKey from apache_beam.transforms import Create from", "t in self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2) self.assertGreater( len(self.visitor.step_names), 3) # 2", "import DoFn from apache_beam.transforms import FlatMap from apache_beam.transforms import Flatten", "frequent lint warning due to pipe operator for chaining transforms.", "from apache_beam.transforms import DoFn from apache_beam.transforms import FlatMap from apache_beam.transforms", "v_out_of_order.value_to_consumers[k]] for k in v_out_of_order.value_to_consumers } original_labels = { str(k):", "the Apache Software Foundation (ASF) under one or more #", "import FlatMap from apache_beam.transforms import Flatten from apache_beam.transforms import ParDo", "root_flatten self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertCountEqual(root_transforms,", "ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p = beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner', options=None) v_original =", "from apache_beam.pipeline import Pipeline from apache_beam.pvalue import AsList from apache_beam.runners.direct", "CoGBK self.assertEqual(len(self.visitor.views), 0) def test_visitor_not_sorted(self): p = Pipeline() # pylint:", "out_of_order_labels = { str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]] for", "# # Licensed to the Apache Software Foundation (ASF) under", "result = ( self.pipeline | 'read' >> root_read | ParDo(SplitNumbersFn()).with_outputs('tag_negative',", "out_of_order_graph.components.transforms[root_id] tmp = root.subtransforms[0] root.subtransforms[0] = root.subtransforms[1] root.subtransforms[1] = tmp", "import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from apache_beam.transforms import CoGroupByKey", "<gh_stars>1-10 # # Licensed to the Apache Software Foundation (ASF)", "v_out_of_order = ConsumerTrackingPipelineVisitor() p.visit(v_out_of_order) p = beam.Pipeline().from_runner_api( original_graph, runner='BundleBasedDirectRunner', options=None)", "apache_beam.transforms import Create from apache_beam.transforms import DoFn from apache_beam.transforms import", "= ConsumerTrackingPipelineVisitor() p.visit(v_original) # Convert to string to assert they", "DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from apache_beam.transforms import CoGroupByKey from", "= ( pcoll | 'process numbers step 1' >> ParDo(ProcessNumbersFn(),", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "output_pc root_read = beam.Impulse() result = ( self.pipeline | 'read'", "Unless required by applicable law or agreed to in writing,", "from apache_beam.pvalue import AsList from apache_beam.runners.direct import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor", "TestStream p | TestStream().add_elements(['']) | beam.Map(lambda _: _) original_graph =", "# pylint: disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self): self.pipeline = Pipeline(DirectRunner())", "Flatten(pipeline=self.pipeline) pbegin = pvalue.PBegin(self.pipeline) pcoll_read = pbegin | 'read' >>", "[root_read]) self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def test_co_group_by_key(self): emails", "t in self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names), 5) self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0],", "len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK self.assertEqual(len(self.visitor.views), 0)", "negatives): first_output = ( pcoll | 'process numbers step 1'", "self.assertEqual(len(self.visitor.step_names), 3) def test_side_inputs(self): class SplitNumbersFn(DoFn): def process(self, element): if", "the specific language governing permissions and # limitations under the", "warning due to pipe operator for chaining transforms. # pylint:", "pvalue.PBegin(self.pipeline) pcoll_read = pbegin | 'read' >> root_read pcoll_read |", "FlatMap(lambda x: x) [] | 'flatten' >> root_flatten self.pipeline.visit(self.visitor) root_transforms", "in self.visitor.value_to_consumers[pbegin] ] self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names), 3) def test_side_inputs(self): class", "applicable law or agreed to in writing, software # distributed", "x: x) [] | 'flatten' >> root_flatten self.pipeline.visit(self.visitor) root_transforms =", "= [t.transform for t in self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read, root_flatten]) pbegin_consumers", "apache_beam.transforms import FlatMap from apache_beam.transforms import Flatten from apache_beam.transforms import", "in v_out_of_order.value_to_consumers[k]] for k in v_out_of_order.value_to_consumers } original_labels = {", "import CoGroupByKey from apache_beam.transforms import Create from apache_beam.transforms import DoFn", "# contributor license agreements. See the NOTICE file distributed with", "# this work for additional information regarding copyright ownership. #", "in writing, software # distributed under the License is distributed", "element < 0: yield pvalue.TaggedOutput('tag_negative', element) else: yield element class", "with # the License. You may obtain a copy of", "'phone' >> Create([('mary', '111-222-3333')]) {'emails': emails, 'phones': phones} | CoGroupByKey()", "from apache_beam import pvalue from apache_beam.pipeline import Pipeline from apache_beam.pvalue", "root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertEqual(root_transforms, [root_read]) self.assertEqual(len(self.visitor.step_names),", "# limitations under the License. # \"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\" #", "1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def test_co_group_by_key(self): emails = self.pipeline | 'email'", "creates + expanded CoGBK self.assertEqual(len(self.visitor.views), 0) def test_visitor_not_sorted(self): p =", "phones} | CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in", "this file to You under the Apache License, Version 2.0", "tmp = root.subtransforms[0] root.subtransforms[0] = root.subtransforms[1] root.subtransforms[1] = tmp p", "def test_co_group_by_key(self): emails = self.pipeline | 'email' >> Create([('joe', '<EMAIL>')])", "they are equal. out_of_order_labels = { str(k): [str(t) for t", "CoGroupByKey from apache_beam.transforms import Create from apache_beam.transforms import DoFn from", "< 0: yield pvalue.TaggedOutput('tag_negative', element) else: yield element class ProcessNumbersFn(DoFn):", "self.assertEqual(len(self.visitor.views), 0) def test_visitor_not_sorted(self): p = Pipeline() # pylint: disable=expression-not-assigned", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "= root.subtransforms[0] root.subtransforms[0] = root.subtransforms[1] root.subtransforms[1] = tmp p =", "for t in v_original.value_to_consumers[k]] for k in v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels,", "agreements. See the NOTICE file distributed with # this work", "Foundation (ASF) under one or more # contributor license agreements.", ">> ParDo(ProcessNumbersFn(), negatives)) second_output = ( first_output | 'process numbers", "ProcessNumbersFn(DoFn): def process(self, element, negatives): yield element def _process_numbers(pcoll, negatives):", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'read' >> root_read pcoll_read | FlatMap(lambda x: x) [] |", "v_original = ConsumerTrackingPipelineVisitor() p.visit(v_original) # Convert to string to assert", "self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def test_co_group_by_key(self): emails = self.pipeline | 'email' >>", "in v_original.value_to_consumers[k]] for k in v_original.value_to_consumers } self.assertDictEqual(out_of_order_labels, original_labels) if", ">> root_flatten self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms]", "[root_read, root_flatten]) pbegin_consumers = [ c.transform for c in self.visitor.value_to_consumers[pbegin]", "'111-222-3333')]) {'emails': emails, 'phones': phones} | CoGroupByKey() self.pipeline.visit(self.visitor) root_transforms =", "beam from apache_beam import pvalue from apache_beam.pipeline import Pipeline from", "self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2) self.assertGreater( len(self.visitor.step_names), 3) # 2 creates +", "the License for the specific language governing permissions and #", "# \"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\" # pytype: skip-file import logging import", "See the NOTICE file distributed with # this work for", "either express or implied. # See the License for the", "pylint: disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase): def setUp(self): self.pipeline = Pipeline(DirectRunner()) self.visitor", "to You under the Apache License, Version 2.0 # (the", "apache_beam.testing.test_stream import TestStream p | TestStream().add_elements(['']) | beam.Map(lambda _: _)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "else: yield element class ProcessNumbersFn(DoFn): def process(self, element, negatives): yield", "first_output | 'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives)) output_pc", "or more # contributor license agreements. See the NOTICE file", "License. # \"\"\"Tests for consumer_tracking_pipeline_visitor.\"\"\" # pytype: skip-file import logging", "step 1' >> ParDo(ProcessNumbersFn(), negatives)) second_output = ( first_output |", "element class ProcessNumbersFn(DoFn): def process(self, element, negatives): yield element def", ">> Create([('joe', '<EMAIL>')]) phones = self.pipeline | 'phone' >> Create([('mary',", "You under the Apache License, Version 2.0 # (the \"License\");", "Disable frequent lint warning due to pipe operator for chaining", "import pvalue from apache_beam.pipeline import Pipeline from apache_beam.pvalue import AsList", "self.pipeline | 'phone' >> Create([('mary', '111-222-3333')]) {'emails': emails, 'phones': phones}", "root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2) self.assertGreater(", "import logging import unittest import apache_beam as beam from apache_beam", "step 2' >> ParDo(ProcessNumbersFn(), negatives)) output_pc = ((first_output, second_output) |", "from apache_beam.transforms import FlatMap from apache_beam.transforms import Flatten from apache_beam.transforms", "} original_labels = { str(k): [str(t) for t in v_original.value_to_consumers[k]]", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "def process(self, element): if element < 0: yield pvalue.TaggedOutput('tag_negative', element)", "] self.assertCountEqual(pbegin_consumers, [root_read]) self.assertEqual(len(self.visitor.step_names), 3) def test_side_inputs(self): class SplitNumbersFn(DoFn): def", "The ASF licenses this file to You under the Apache", "self.visitor.root_transforms] self.assertCountEqual(root_transforms, [root_read, root_flatten]) pbegin_consumers = [ c.transform for c", "from apache_beam.transforms import CoGroupByKey from apache_beam.transforms import Create from apache_beam.transforms", "5) self.assertEqual(len(self.visitor.views), 1) self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList)) def test_co_group_by_key(self): emails = self.pipeline", "return output_pc root_read = beam.Impulse() result = ( self.pipeline |", "self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in self.visitor.root_transforms] self.assertEqual(len(root_transforms), 2)", "| ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive')) positive, negative = result _process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor)", "2' >> ParDo(ProcessNumbersFn(), negatives)) output_pc = ((first_output, second_output) | 'flatten", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "from apache_beam.transforms import Flatten from apache_beam.transforms import ParDo # Disable", "positive, negative = result _process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor) root_transforms = [t.transform", "from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from apache_beam.transforms import CoGroupByKey from apache_beam.transforms", "k in v_out_of_order.value_to_consumers } original_labels = { str(k): [str(t) for", "# distributed under the License is distributed on an \"AS", "def setUp(self): self.pipeline = Pipeline(DirectRunner()) self.visitor = ConsumerTrackingPipelineVisitor() def test_root_transforms(self):", "# Unless required by applicable law or agreed to in", "'email' >> Create([('joe', '<EMAIL>')]) phones = self.pipeline | 'phone' >>", "phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')]) {'emails': emails,", "| FlatMap(lambda x: x) [] | 'flatten' >> root_flatten self.pipeline.visit(self.visitor)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "runner='BundleBasedDirectRunner', options=None) v_original = ConsumerTrackingPipelineVisitor() p.visit(v_original) # Convert to string", "import ConsumerTrackingPipelineVisitor from apache_beam.transforms import CoGroupByKey from apache_beam.transforms import Create", "'flatten' >> root_flatten self.pipeline.visit(self.visitor) root_transforms = [t.transform for t in", "out_of_order_graph = p.to_runner_api(return_context=False) root_id = out_of_order_graph.root_transform_ids[0] root = out_of_order_graph.components.transforms[root_id] tmp", "License. You may obtain a copy of the License at", "self.pipeline | 'email' >> Create([('joe', '<EMAIL>')]) phones = self.pipeline |", "You may obtain a copy of the License at #", "one or more # contributor license agreements. See the NOTICE", "work for additional information regarding copyright ownership. # The ASF", "out_of_order_graph.root_transform_ids[0] root = out_of_order_graph.components.transforms[root_id] tmp = root.subtransforms[0] root.subtransforms[0] = root.subtransforms[1]", "'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives)) second_output = (", "chaining transforms. # pylint: disable=expression-not-assigned # pylint: disable=pointless-statement class ConsumerTrackingPipelineVisitorTest(unittest.TestCase):", "to assert they are equal. out_of_order_labels = { str(k): [str(t)", "negative = result _process_numbers(positive, AsList(negative)) self.pipeline.visit(self.visitor) root_transforms = [t.transform for", "apache_beam import pvalue from apache_beam.pipeline import Pipeline from apache_beam.pvalue import", "= Pipeline() # pylint: disable=expression-not-assigned from apache_beam.testing.test_stream import TestStream p", "{ str(k): [str(t) for t in v_original.value_to_consumers[k]] for k in" ]
[ "gluon.utils import web2py_uuid from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders from", "app(self.environ, self.start_response) return lambda caller=caller, app=app: caller(app) return middleware def", "# build environment for controller and view # ################################################## environment", "Dec 2037 23:59:59 GMT' response.stream(static_file, request=request) # ################################################## # fill", "request.tickets_db: ticket = e.log(request) or 'unknown' # rollback if response._custom_rollback:", "exists(password_file): return else: password = '' elif password.startswith('<pam_user:'): # use", "# ################################################## # run controller # ################################################## if global_settings.debugging and", "global_settings.applications_parent # backward compatibility create_missing_folders() # set up logging for", "environment so it may have weird behavior in some cases", "set default view, controller can override it response.view = '%s/%s.%s'", "applications/, site-packages/ etc) # defaults to that directory set sys.path", "import Request, Response, Session from gluon.compileapp import build_environment, run_models_in, \\", "as applications_parent (for backward compatibility) web2py_path = global_settings.applications_parent # backward", "profiler_dir)} self.server = rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads,", "THREAD_LOCAL as rwthread, \\ try_rewrite_on_error, fixup_missing_path_info from gluon import newcron", "and session_id here, # accept_language is validated in languages #", "IPv6 client = '::1' else: client = '127.0.0.1' # IPv4", "dynamic page. It first runs all models, then runs the", "\"\"\" a wsgi app that does logging and profiling and", "Response() session = Session() env = request.env #env.web2py_path = global_settings.applications_parent", "None if client in (None, '', 'unknown'): g = regex_client.search(eget('remote_addr',", "cProfile prof = cProfile.Profile() prof.enable() ret[0] = wsgiapp(environ, responder2) prof.disable()", "= response @property def environ(self): if not hasattr(self, '_environ'): new_environ", "import_all # DO NOT REMOVE PART OF FREEZE PROCESS import", "prof.dump_stats(destfile) try: line = '%s, %s, %s, %s, %s, %s,", "run_controller_in, run_view_in from gluon.contenttype import contenttype from pydal.base import BaseAdapter", "from gluon.validators import CRYPT from gluon.html import URL, xmlescape from", "and then tries to render the output using a view/template.", "# rewrite incoming URL # parse rewritten header variables #", "function must run from the [application] folder. A typical example", "compensates for fcgi missing path_info and query_string - validates the", "# parse rewritten header variables # parse rewritten URL #", "# use the pam password for specified user cpassword =", "= single_cookie.strip() if single_cookie: try: request.cookies.load(single_cookie) except Cookie.CookieError: pass #", "# ################################################## if env.http_cookie: for single_cookie in env.http_cookie.split(';'): single_cookie =", "is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do", "ssl_certificate or not ssl_private_key: logger.info('SSL is off') elif not rocket.ssl:", "request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace') if request.ajax: if response.flash: http_response.headers['web2py-component-flash'] =", "os import re import copy import sys import time import", "able to use several interfaces - must be list of", "= ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests = 0 # gc", "access the requested application # ################################################## disabled = pjoin(request.folder, 'DISABLED')", "= [ip, port] if not ssl_certificate or not ssl_private_key: logger.info('SSL", "= None try: try: try: # ################################################## # handle fcgi", "request = Request(environ) response = Response() session = Session() env", "################################################## app = request.application # must go after url_in! if", "after exec, not always, once every 100 requests global requests", "requests. if request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace') if request.ajax: if response.flash:", "is OFF') elif not exists(ssl_certificate): logger.warning('unable to open SSL certificate.", "session file # ################################################## if not env.web2py_disable_session: session.connect(request, response) #", "restore it! Thanks ozancag import locale locale.setlocale(locale.LC_CTYPE, \"C\") # IMPORTANT,", "specified user cpassword = password[1:-1] else: # use provided password", "import gc import os import re import copy import sys", "web2py_uuid from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders from gluon.globals import", "\"Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/\" else:", "in messageboxhandler, changes locale ... import gluon.messageboxhandler logging.gluon = gluon", "profiler_dir: profiler_dir = abspath(profiler_dir) logger.warn('profiler is on. will use dir", "| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon wsgi application --------------------------- \"\"\"", "'', '', locals()) ticket = e.log(request) or 'unrecoverable' http_response =", "- see http://packages.python.org/rocket/\" if path: # if a path is", "logging and profiling and calls wsgibase \"\"\" status_headers = []", "in DB if not request.tickets_db: ticket = e.log(request) or 'unknown'", "the web server \"\"\" newcron.stopcron() self.server.stop(stoplogging) try: os.unlink(self.pid_filename) except: pass", "from gluon.fileutils import abspath, write_file from gluon.settings import global_settings from", "Args: wsgiapp: the base application logfilename: where to store apache-compatible", "expose wsgi hooks for convenience # ################################################## request.wsgi = LazyWSGI(environ,", "start(self): \"\"\" start the web server \"\"\" try: signal.signal(signal.SIGTERM, lambda", "server_name: server_name = socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME = server_name", "if env.http_cookie: for single_cookie in env.http_cookie.split(';'): single_cookie = single_cookie.strip() if", "response.session_file.close() session._unlock(response) http_response, new_environ = try_rewrite_on_error( http_response, request, environ, ticket)", "b'')) if response.js: http_response.headers['web2py-component-command'] = \\ urllib2.quote(response.js.replace('\\n', '')) # ##################################################", "port] if not ssl_certificate or not ssl_private_key: logger.info('SSL is off')", "% profiler_dir) def app_with_logging(environ, responder): \"\"\" a wsgi app that", "This function is used to generate a dynamic page. It", "not request.tickets_db: ticket = e.log(request) or 'unknown' # rollback if", "status_headers.append(h) return responder(s, h) time_in = time.time() ret = [0]", "unless web2py_path is changed via the web2py.py -f folder option", "generates a wsgi application that does logging and profiling and", "try: from gluon import rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable", "(socket.gaierror, TypeError): pass global_settings.local_hosts = list(local_hosts) else: local_hosts = global_settings.local_hosts", "inserted path to script directory into sys.path # applications_parent (path", "for specified user cpassword = password[1:-1] else: # use provided", "if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') # if tickets in db,", "success, commit database # ################################################## if response.do_not_commit is True: BaseAdapter.close_all_instances(None)", "isinstance(data, list): return data return [data] for item in middleware_apps:", "by apache mod_wsgi (or any WSGI-compatible server). - fills request", "'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d %b %Y", "################################################## # on success, try store session in database #", "= pid_filename if not server_name: server_name = socket.gethostname() logger.info('starting web", "% 100 or 0 if not requests: gc.collect() # end", "(status_headers[0])[:3], time.time() - time_in, ) if not logfilename: sys.stdout.write(line) elif", "import global_settings from gluon.utils import web2py_uuid from gluon.admin import add_path_first,", "new_environ return self._environ def start_response(self, status='200', headers=[], exec_info=None): \"\"\" in", "application # ################################################## disabled = pjoin(request.folder, 'DISABLED') if not exists(request.folder):", "store profile files \"\"\" if profilerfilename is not None: raise", "five0three = os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise HTTP(503, file(five0three, 'r').read()) else:", "= os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\")) except: # fails on GAE or", "new_environ['wsgi.version'] = 1 self._environ = new_environ return self._environ def start_response(self,", "the path in url The url path must be either:", "data # ################################################## #parse_get_post_vars(request, environ) # ################################################## # expose wsgi", "= (g.group() or '').split(',')[0] if g else None if client", "on application error, rollback database # ################################################## try: if response._custom_rollback:", "% ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3],", "try: try: try: # ################################################## # handle fcgi missing path_info", "if request.body: request.body.close() # ################################################## # on application error, rollback", "not exists(ssl_certificate): logger.warning('unable to open SSL certificate. SSL is OFF')", "version_info.close() global_settings.web2py_version = raw_version_string web2py_version = global_settings.web2py_version except: raise RuntimeError(\"Cannot", "database! # ################################################## if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) # Set", "build missing folders # ################################################## create_missing_app_folders(request) # ################################################## # get", "path=None, interfaces=None # Rocket is able to use several interfaces", "server (Rocket) \"\"\" def __init__( self, ip='1172.16.17.32', port=8000, password='', pid_filename='httpserver.pid',", "to dir %s\" % profiler_dir) def app_with_logging(environ, responder): \"\"\" a", "OFF') elif not exists(ssl_private_key): logger.warning('unable to open SSL private key.", "A typical example would be the call to the url", "after trying to commit database! # ################################################## if not env.web2py_disable_session:", "local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass", "= 1 self._environ = new_environ return self._environ def start_response(self, status='200',", "ticket) except: if request.body: request.body.close() # ################################################## # on application", "#parse_get_post_vars(request, environ) # ################################################## # expose wsgi hooks for convenience", "must run from the [application] folder. A typical example would", "# Remarks: # calling script has inserted path to script", "if single_cookie: try: request.cookies.load(single_cookie) except Cookie.CookieError: pass # single invalid", "interfaces=None # Rocket is able to use several interfaces -", "build_environment, run_models_in, \\ run_controller_in, run_view_in from gluon.contenttype import contenttype from", "is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is", "not server_name: server_name = socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME =", "socket-tuples as string ): \"\"\" starts the web server. \"\"\"", "tested for rocket parameter correctness # not necessarily completely tested", "process models, controller and view (if required) # ################################################## run_models_in(environment)", "response @property def environ(self): if not hasattr(self, '_environ'): new_environ =", "\"\"\" eget = environ.get current.__dict__.clear() request = Request(environ) response =", "datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in,", "import URL, xmlescape from gluon.utils import is_valid_ip_address, getipaddrinfo from gluon.rewrite", "s.stop()) except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self, stoplogging=False): \"\"\"", "# on success, commit database # ################################################## if response.do_not_commit is", "# DO NOT REMOVE PART OF FREEZE PROCESS import gc", "chars = string.letters + string.digits password = ''.join([random.choice(chars) for _", "import string from gluon._compat import Cookie, urllib2 #from thread import", "import copy import sys import time import datetime import signal", "global_settings.web2py_version except: raise RuntimeError(\"Cannot determine web2py version\") try: from gluon", "on. will use dir %s', profiler_dir) if not os.path.isdir(profiler_dir): try:", "the path to this file which may be Library.zip #", "applications \"\"\" self.response.status = str(status).split(' ', 1)[0] self.response.headers = dict(headers)", "a simulated environment so it may have weird behavior in", "environ, ticket) if not http_response: return wsgibase(new_environ, responder) if global_settings.web2py_crontype", "elif isinstance(logfilename, str): write_file(logfilename, line, 'a') else: logfilename.write(line) except: pass", "profiler_dir: ret[0] = wsgiapp(environ, responder2) else: import cProfile prof =", "if response.do_not_commit is True: BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit')", "a path is specified change the global variables so that", "import gluon.messageboxhandler logging.gluon = gluon # so we must restore", "logging.config.fileConfig(\"logging.conf\") save_password(password, port) self.pid_filename = pid_filename if not server_name: server_name", "for _ in range(8)]) cpassword = CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************')", "behavior in some cases \"\"\" def middleware(f): def app(environ, start_response):", "exception on Python 2.5: # NameError: name 'gluon' is not", "gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except HTTP as hr: http_response =", "in a call to [function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html", "it not set # ################################################## default_headers = [ ('Content-Type', contenttype('.'", "port): \"\"\" Used by main() to save the password in", "session) # set default view, controller can override it response.view", "set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn()", "add_path_first, create_missing_folders, create_missing_app_folders from gluon.globals import current # Remarks: #", "the environment variables First tries 'http_x_forwarded_for', secondly 'remote_addr' if all", "gluon.utils import is_valid_ip_address, getipaddrinfo from gluon.rewrite import load as load_routes,", "the output using a view/template. this function must run from", "# ################################################## # log tickets before rollback if not in", "several interfaces - must be list of socket-tuples as string", "locale ... import gluon.messageboxhandler logging.gluon = gluon # so we", "sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is ON') app_info =", "url_in! if not global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if", "import import_all # DO NOT REMOVE PART OF FREEZE PROCESS", "is changed via the web2py.py -f folder option # main.web2py_path", "cid = env.http_web2py_component_element, is_local = (env.remote_addr in local_hosts and client", "################################################## # on success, commit database # ################################################## if response.do_not_commit", "abspath('parameters_%i.py' % port) if password == '<random>': # make up", "dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions are: - application,", "s=self: s.stop()) except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self, stoplogging=False):", "Security Checks: validate URL and session_id here, # accept_language is", "or create new session file # ################################################## if not env.web2py_disable_session:", "web2py_path is changed via the web2py.py -f folder option #", "URL and session_id here, # accept_language is validated in languages", "a wsgi app that does logging and profiling and calls", "either: 1. for static pages: - /<application>/static/<file> 2. for dynamic", "exists(request.folder): if app == rwthread.routes.default_application \\ and app != 'welcome':", "= False, is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \\ request.env.http_x_forwarded_proto", "False: import import_all # DO NOT REMOVE PART OF FREEZE", "version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string web2py_version = global_settings.web2py_version except: raise", "wsgibase(environ, responder): \"\"\" The gluon wsgi application. The first function", "error, rollback database # ################################################## try: if response._custom_rollback: response._custom_rollback() else:", "**kargs: \\ self.response.write(escape=False, *args, **kargs) def middleware(self, *middleware_apps): \"\"\" In", "def wsgibase(environ, responder): \"\"\" The gluon wsgi application. The first", "% client) return client def serve_controller(request, response, session): \"\"\" This", "# ################################################## # expose wsgi hooks for convenience # ##################################################", "password if any if exists(password_file): return else: password = ''", "request.env #env.web2py_path = global_settings.applications_parent env.web2py_version = web2py_version #env.update(global_settings) static_file =", "interfaces parameter - see http://packages.python.org/rocket/\" else: raise \"Wrong format for", "session not in db try store session on filesystem #", "environ(self): if not hasattr(self, '_environ'): new_environ = self.wsgi_environ new_environ['wsgi.input'] =", "password.startswith('<pam_user:'): # use the pam password for specified user cpassword", "component requests. if request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace') if request.ajax: if", "\"\"\" eget = env.get g = regex_client.search(eget('http_x_forwarded_for', '')) client =", "return lambda *args, **kargs: \\ self.response.write(escape=False, *args, **kargs) def middleware(self,", "Cookie, urllib2 #from thread import allocate_lock from gluon.fileutils import abspath,", "%s' % ticket) finally: if response and hasattr(response, 'session_file') \\", "re import copy import sys import time import datetime import", "redirect from gluon.globals import Request, Response, Session from gluon.compileapp import", "if static_file: if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \\ = 'attachment' if", "%d %b %Y %H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')] for key,", "= rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout),", "containing applications/ # and routes.py # The two are identical", "addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass global_settings.local_hosts", "= path os.chdir(path) load_routes() for p in (path, abspath('site-packages'), \"\"):", "server...') rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT = socket_timeout sock_list = [ip,", "controller can override it response.view = '%s/%s.%s' % (request.controller, request.function,", "request.tickets_db: ticket = e.log(request) or 'unknown' http_response = \\ HTTP(500,", "accept_language is validated in languages # pattern used to validate", "http://packages.python.org/rocket/\" if path: # if a path is specified change", "client = '::1' else: client = '127.0.0.1' # IPv4 if", "from gluon._compat import Cookie, urllib2 #from thread import allocate_lock from", "is able to use several interfaces - must be list", "environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() -", "if version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires'] = 'Thu, 31", "This needed to prevent exception on Python 2.5: # NameError:", "is_shell = False, is_scheduler = False, is_https = env.wsgi_url_scheme in", "= env.get g = regex_client.search(eget('http_x_forwarded_for', '')) client = (g.group() or", "logging for subsequent imports import logging import logging.config # This", "OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is ON')", "self.response.status = str(status).split(' ', 1)[0] self.response.headers = dict(headers) return lambda", "from gluon.contenttype import contenttype from pydal.base import BaseAdapter from gluon.validators", "% (request.controller, request.function, request.extension) # also, make sure the flash", "...) # # this is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))", "= regex_client.search(eget('remote_addr', '')) if g: client = g.group() elif env.http_host.startswith('['):", "middleware_apps: app = item(app) def caller(app): return app(self.environ, self.start_response) return", "info - compensates for fcgi missing path_info and query_string -", "global_settings.debugging and app != \"admin\": import gluon.debug # activate the", "################################################## # if session not in db try store session", "change the global variables so that web2py # runs from", "ssl_private_key: logger.info('SSL is off') elif not rocket.ssl: logger.warning('Python \"ssl\" module", "request.ajax: if response.flash: http_response.headers['web2py-component-flash'] = \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if response.js:", "make sure the flash is passed through # ################################################## #", "environment variables First tries 'http_x_forwarded_for', secondly 'remote_addr' if all fails,", "be Library.zip # gluon_parent is the directory containing gluon, web2py.py,", "garbage collection logic # ################################################## # set default headers it", "or 'unrecoverable' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket", "invalid cookie ignore # ################################################## # try load session or", "hasattr(response, 'session_file') \\ and response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ =", "user cpassword = password[1:-1] else: # use provided password cpassword", "# run controller # ################################################## if global_settings.debugging and app !=", "web2py_path path = os.path.normpath(path) web2py_path = path global_settings.applications_parent = path", "headers # ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket = None except RestrictedError", "\"\"\" Guesses the client address from the environment variables First", "will use dir %s', profiler_dir) if not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir)", "= 0 # gc timer # Security Checks: validate URL", "sock_list = [ip, port] if not ssl_certificate or not ssl_private_key:", "HTTP(response.status, page, **response.headers) class LazyWSGI(object): def __init__(self, environ, request, response):", "ticket = e.log(request) or 'unknown' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket", "header so client can distinguish component requests. if request.cid: http_response.headers.setdefault(", "dir %s\" % profiler_dir) def app_with_logging(environ, responder): \"\"\" a wsgi", "open( filepath, 'w' ) filehandle.close() os.unlink(filepath) except IOError: raise BaseException(\"Unable", "requests = ('requests' in globals()) and (requests + 1) %", "stop cron and the web server \"\"\" newcron.stopcron() self.server.stop(stoplogging) try:", "is_valid_ip_address, getipaddrinfo from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL", "also contain '-', '=', '.' and '/' \"\"\" eget =", "secondly 'remote_addr' if all fails, assume '127.0.0.1' or '::1' (running", "except: pass e = RestrictedError('Framework', '', '', locals()) ticket =", "rocket parameter correctness # not necessarily completely tested (e.g. content", "exists(disabled): five0three = os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise HTTP(503, file(five0three, 'r').read())", "ticket) finally: if response and hasattr(response, 'session_file') \\ and response.session_file:", "caller=caller, app=app: caller(app) return middleware def wsgibase(environ, responder): \"\"\" The", "self._environ = new_environ return self._environ def start_response(self, status='200', headers=[], exec_info=None):", "The naming conventions are: - application, controller, function and extension", "see http://packages.python.org/rocket/\" else: raise \"Wrong format for rocket interfaces parameter", "The two are identical unless web2py_path is changed via the", "_handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else: raise HTTP(404,", "client == env.remote_addr), is_shell = False, is_scheduler = False, is_https", "request, response) # ################################################## # load cookies # ################################################## if", "must be done after trying to commit database! # ##################################################", "rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) except: if request.body:", "and extension may only contain `[a-zA-Z0-9_]` - file and sub", "self.server = rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size),", "item(app) def caller(app): return app(self.environ, self.start_response) return lambda caller=caller, app=app:", "typical example would be the call to the url /[application]/[controller]/[function]", "apache mod_wsgi (or any WSGI-compatible server). - fills request with", "store session in database # ################################################## if not env.web2py_disable_session: session._try_store_in_db(request,", "redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else: raise HTTP(404, rwthread.routes.error_message % 'invalid", "Cookie.CookieError: pass # single invalid cookie ignore # ################################################## #", ") def start(self): \"\"\" start the web server \"\"\" try:", "store it in db if request.tickets_db: ticket = e.log(request) or", "if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password, port) self.pid_filename = pid_filename if not", "web2py_error='ticket %s' % ticket) finally: if response and hasattr(response, 'session_file')", "ip-format) import types if isinstance(interfaces, list): for i in interfaces:", "DO NOT REMOVE PART OF FREEZE PROCESS import gc import", "run_models_in, \\ run_controller_in, run_view_in from gluon.contenttype import contenttype from pydal.base", "is missing logging.basicConfig() logger = logging.getLogger(\"web2py\") from gluon.restricted import RestrictedError", "used to validate client address regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ##", "= 'max-age=315360000' response.headers[ 'Expires'] = 'Thu, 31 Dec 2037 23:59:59", "no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d %b %Y %H:%M:%S", "'invalid request', web2py_error='invalid application') elif not request.is_local and exists(disabled): five0three", "run_models_in(environment) response._view_environment = copy.copy(environment) page = run_controller_in(request.controller, request.function, environment) if", "to save the password in the parameters_port.py file. \"\"\" password_file", "# set default headers it not set # ################################################## default_headers", "HTTPS_SCHEMES or \\ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \\ or env.https ==", "http_response.headers['web2py-component-command'] = \\ urllib2.quote(response.js.replace('\\n', '')) # ################################################## # store cookies", "env.get g = regex_client.search(eget('http_x_forwarded_for', '')) client = (g.group() or '').split(',')[0]", "debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except HTTP as hr: http_response", "and calls wsgibase \"\"\" status_headers = [] def responder2(s, h):", "'_environ'): new_environ = self.wsgi_environ new_environ['wsgi.input'] = self.request.body new_environ['wsgi.version'] = 1", "if not in DB if not request.tickets_db: ticket = e.log(request)", "controller, and then tries to render the output using a", "session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket = None except RestrictedError as e: if", "= global_settings.applications_parent # backward compatibility create_missing_folders() # set up logging", "def start_response(self, status='200', headers=[], exec_info=None): \"\"\" in controller you can", "= False, is_scheduler = False, is_https = env.wsgi_url_scheme in HTTPS_SCHEMES", "raise HTTP(400, \"Bad Request (request.client=%s)\" % client) return client def", "# -*- coding: utf-8 -*- \"\"\" | This file is", "be the call to the url /[application]/[controller]/[function] that would result", "e.log(request) or 'unknown' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket),", "thread import allocate_lock from gluon.fileutils import abspath, write_file from gluon.settings", "import allocate_lock from gluon.fileutils import abspath, write_file from gluon.settings import", "gluon.fileutils import abspath, write_file from gluon.settings import global_settings from gluon.utils", "for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions are: -", "\"<html><body><h1>Temporarily down for maintenance</h1></body></html>\") # ################################################## # build missing folders", "for IPV6 try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string =", "session._try_store_in_db(request, response) # ################################################## # on success, commit database #", "env.http_cookie.split(';'): single_cookie = single_cookie.strip() if single_cookie: try: request.cookies.load(single_cookie) except Cookie.CookieError:", "elif not rocket.ssl: logger.warning('Python \"ssl\" module unavailable. SSL is OFF')", "rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else: raise HTTP(404, rwthread.routes.error_message %", "# get the GET and POST data # ################################################## #parse_get_post_vars(request,", "to store profile files \"\"\" if profilerfilename is not None:", "b, s=self: s.stop()) except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self,", "= Request(environ) response = Response() session = Session() env =", "if global_settings.debugging and app != \"admin\": import gluon.debug # activate", "items # ################################################## app = request.application # must go after", "calling script has inserted path to script directory into sys.path", "or 'unknown' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket", "else: password = '' elif password.startswith('<pam_user:'): # use the pam", "new session file # ################################################## if not env.web2py_disable_session: session.connect(request, response)", "cProfile.Profile() prof.enable() ret[0] = wsgiapp(environ, responder2) prof.disable() destfile = pjoin(profiler_dir,", "destfile = pjoin(profiler_dir, \"req_%s.prof\" % web2py_uuid()) prof.dump_stats(destfile) try: line =", "this is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we", "if all fails, assume '127.0.0.1' or '::1' (running locally) \"\"\"", "\\ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \\ or env.https == 'on' )", "not http_response: return wsgibase(new_environ, responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start()", "Rocket') load_routes() HTTPS_SCHEMES = set(('https', 'HTTPS')) def get_client(env): \"\"\" Guesses", "filehandle = open( filepath, 'w' ) filehandle.close() os.unlink(filepath) except IOError:", "gluon.messageboxhandler logging.gluon = gluon # so we must restore it!", "hasattr(self, '_environ'): new_environ = self.wsgi_environ new_environ['wsgi.input'] = self.request.body new_environ['wsgi.version'] =", "= \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if response.js: http_response.headers['web2py-component-command'] = \\ urllib2.quote(response.js.replace('\\n',", "to account for IPV6 try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r')", "missing folders # ################################################## create_missing_app_folders(request) # ################################################## # get the", "request with info - the environment variables, replacing '.' with", "with '_' - adds web2py path and version info -", "h): \"\"\" wsgi responder app \"\"\" status_headers.append(s) status_headers.append(h) return responder(s,", "h) time_in = time.time() ret = [0] if not profiler_dir:", "client def serve_controller(request, response, session): \"\"\" This function is used", "to commit database! # ################################################## if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response)", "result in a call to [function]() in applications/[application]/[controller].py rendered by", "middleware2, ...) to decorate actions with WSGI middleware. actions must", "contain '-', '=', '.' and '/' \"\"\" eget = environ.get", "'attachment' if version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires'] = 'Thu,", "= '127.0.0.1' # IPv4 if not is_valid_ip_address(client): raise HTTP(400, \"Bad", "globals()) and (requests + 1) % 100 or 0 if", "'DISABLED') if not exists(request.folder): if app == rwthread.routes.default_application \\ and", "the import Tkinter in messageboxhandler, changes locale ... import gluon.messageboxhandler", "responder(s, h) time_in = time.time() ret = [0] if not", "= cProfile.Profile() prof.enable() ret[0] = wsgiapp(environ, responder2) prof.disable() destfile =", "rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False,", "BaseException(\"Unable to write to dir %s\" % profiler_dir) def app_with_logging(environ,", "header variables # parse rewritten URL # serve file if", "controller, function and extension may only contain `[a-zA-Z0-9_]` - file", "args=app)) else: raise HTTP(404, rwthread.routes.error_message % 'invalid request', web2py_error='invalid application')", "web2py_error='ticket %s' % ticket) except: if request.body: request.body.close() # ##################################################", "# this is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because", "#!/bin/env python # -*- coding: utf-8 -*- \"\"\" | This", "http_response.to(responder, env=env) def save_password(password, port): \"\"\" Used by main() to", "the current password if any if exists(password_file): return else: password", "password = '' elif password.startswith('<pam_user:'): # use the pam password", "ret = [0] if not profiler_dir: ret[0] = wsgiapp(environ, responder2)", "not global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0]", "global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') load_routes() HTTPS_SCHEMES = set(('https', 'HTTPS'))", "'max-age=315360000' response.headers[ 'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT'", "raise \"Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/\"", "not env.web2py_disable_session: session._try_store_in_db(request, response) # ################################################## # on success, commit", "using a view/template. this function must run from the [application]", "any WSGI-compatible server). - fills request with info - the", "# IMPORTANT, web2py requires locale \"C\" exists = os.path.exists pjoin", "wsgi application. The first function called when a page is", "response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ################################################## # if session not", "BaseException(\"Can't create dir %s\" % profiler_dir) filepath = pjoin(profiler_dir, 'wtest')", "BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ################################################## # if session not in", "must be tested for rocket parameter correctness # not necessarily", "*args, **kargs) def middleware(self, *middleware_apps): \"\"\" In you controller use::", "current # Remarks: # calling script has inserted path to", "copy import sys import time import datetime import signal import", "''.join([random.choice(chars) for _ in range(8)]) cpassword = CRYPT()(password)[0] print('******************* IMPORTANT!!!", "messageboxhandler, changes locale ... import gluon.messageboxhandler logging.gluon = gluon #", "s.stop()) signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop()) except: pass write_file(self.pid_filename,", "with info - the environment variables, replacing '.' with '_'", "if not ssl_certificate or not ssl_private_key: logger.info('SSL is off') elif", "directory containing gluon, web2py.py, logging.conf # and the handlers. #", "################################################## # build environment for controller and view # ##################################################", "= list(local_hosts) else: local_hosts = global_settings.local_hosts client = get_client(env) x_req_with", "= abspath('applications', app) + os.sep, ajax = x_req_with == 'xmlhttprequest',", "on application error, rollback database # ################################################## # log tickets", "status_headers = [] def responder2(s, h): \"\"\" wsgi responder app", "for static pages: - /<application>/static/<file> 2. for dynamic pages: -", "from __future__ import print_function if False: import import_all # DO", "not ssl_private_key: logger.info('SSL is off') elif not rocket.ssl: logger.warning('Python \"ssl\"", "None except RestrictedError as e: if request.body: request.body.close() # ##################################################", "in db, reconnect and store it in db if request.tickets_db:", "gluon wsgi application. The first function called when a page", "\"\"\" stop cron and the web server \"\"\" newcron.stopcron() self.server.stop(stoplogging)", "client address regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ## to account for", "regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ## to account for IPV6 try:", "is specified, it must be tested for rocket parameter correctness", "# ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket = None except RestrictedError as", "# and the handlers. # applications_parent (web2py_path) is the directory", "if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is ON') app_info = {'wsgi_app': appfactory(wsgibase,", "client can distinguish component requests. if request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace')", "application error, rollback database # ################################################## try: if response._custom_rollback: response._custom_rollback()", "# (\"\", gluon_parent/site-packages, gluon_parent, ...) # # this is wrong:", "uses a simulated environment so it may have weird behavior", "new_environ = self.wsgi_environ new_environ['wsgi.input'] = self.request.body new_environ['wsgi.version'] = 1 self._environ", "is_local = (env.remote_addr in local_hosts and client == env.remote_addr), is_shell", "distinguish component requests. if request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace') if request.ajax:", "exists(ssl_certificate): logger.warning('unable to open SSL certificate. SSL is OFF') elif", "The first function called when a page is requested (static", "set default headers it not set # ################################################## default_headers =", "path global_settings.applications_parent = path os.chdir(path) load_routes() for p in (path,", "in some cases \"\"\" def middleware(f): def app(environ, start_response): data", "set sys.path to # (\"\", gluon_parent/site-packages, gluon_parent, ...) # #", "env.http_cookie: for single_cookie in env.http_cookie.split(';'): single_cookie = single_cookie.strip() if single_cookie:", "single_cookie in env.http_cookie.split(';'): single_cookie = single_cookie.strip() if single_cookie: try: request.cookies.load(single_cookie)", "fcgi missing path_info and query_string # select rewrite parameters #", "database # ################################################## # log tickets before rollback if not", "method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def start(self):", "a page is requested (static or dynamic). It can be", "time import datetime import signal import socket import random import", "if not exists(request.folder): if app == rwthread.routes.default_application \\ and app", "be tested for rocket parameter correctness # not necessarily completely", "actions with WSGI middleware. actions must return strings. uses a", "%H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')] for key, value in default_headers:", "want the path to this file which may be Library.zip", "port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None,", "**response.headers) class LazyWSGI(object): def __init__(self, environ, request, response): self.wsgi_environ =", "= g.group() elif env.http_host.startswith('['): # IPv6 client = '::1' else:", "# ################################################## #parse_get_post_vars(request, environ) # ################################################## # expose wsgi hooks", "profiler_dir = abspath(profiler_dir) logger.warn('profiler is on. will use dir %s',", "to prevent exception on Python 2.5: # NameError: name 'gluon'", "= '::1' else: client = '127.0.0.1' # IPv4 if not", "import time import datetime import signal import socket import random", "runs the function in the controller, and then tries to", "save the password in the parameters_port.py file. \"\"\" password_file =", "if not isinstance(i, tuple): raise \"Wrong format for rocket interfaces", "WSGI-compatible server). - fills request with info - the environment", "'.' and '/' \"\"\" eget = environ.get current.__dict__.clear() request =", "run_view_in(response._view_environment) # logic to garbage collect after exec, not always,", "allocate_lock from gluon.fileutils import abspath, write_file from gluon.settings import global_settings", "local_hosts = global_settings.local_hosts client = get_client(env) x_req_with = str(env.http_x_requested_with).lower() cmd_opts", "wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not", "the parameters_port.py file. \"\"\" password_file = abspath('parameters_%i.py' % port) if", "files \"\"\" if profilerfilename is not None: raise BaseException(\"Deprecated API\")", "server_name=None, request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None, # Rocket does not use", "except: raise RuntimeError(\"Cannot determine web2py version\") try: from gluon import", "there instead of cwd or os.environ['web2py_path'] global web2py_path path =", "\"\"\" try: signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda", "#env.update(global_settings) static_file = False http_response = None try: try: try:", "path_info and query_string - validates the path in url The", "in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers) class LazyWSGI(object):", "and response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ = try_rewrite_on_error( http_response, request,", "file(five0three, 'r').read()) else: raise HTTP(503, \"<html><body><h1>Temporarily down for maintenance</h1></body></html>\") #", "IPv4 if not is_valid_ip_address(client): raise HTTP(400, \"Bad Request (request.client=%s)\" %", "middleware(f): def app(environ, start_response): data = f() start_response(self.response.status, self.response.headers.items()) if", "to validate client address regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ## to", "= self.request.body new_environ['wsgi.version'] = 1 self._environ = new_environ return self._environ", "defined # See http://bugs.python.org/issue1436 # attention!, the import Tkinter in", "WSGI middleware. actions must return strings. uses a simulated environment", "Checks: validate URL and session_id here, # accept_language is validated", "1)[0] self.response.headers = dict(headers) return lambda *args, **kargs: \\ self.response.write(escape=False,", "return http_response.to(responder, env=env) def save_password(password, port): \"\"\" Used by main()", "this must be done after trying to commit database! #", "missing logging.basicConfig() logger = logging.getLogger(\"web2py\") from gluon.restricted import RestrictedError from", "logging.config # This needed to prevent exception on Python 2.5:", "env.web2py_status_code or response.status if static_file: if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \\", "if not request.tickets_db: ticket = e.log(request) or 'unknown' # rollback", "write_file(logfilename, line, 'a') else: logfilename.write(line) except: pass return ret[0] return", "RestrictedError from gluon.http import HTTP, redirect from gluon.globals import Request,", "where to store profile files \"\"\" if profilerfilename is not", "pam password for specified user cpassword = password[1:-1] else: #", "can use: - request.wsgi.environ - request.wsgi.start_response to call third party", "- the environment variables, replacing '.' with '_' - adds", "gluon.globals import current # Remarks: # calling script has inserted", "os.makedirs(profiler_dir) except: raise BaseException(\"Can't create dir %s\" % profiler_dir) filepath", "default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers) class LazyWSGI(object): def", "os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\")) except: # fails on GAE or when", "== '<recycle>': # reuse the current password if any if", "# reuse the current password if any if exists(password_file): return", "applications_parent (for backward compatibility) web2py_path = global_settings.applications_parent # backward compatibility", "web2py requires locale \"C\" exists = os.path.exists pjoin = os.path.join", "environ) response.status = env.web2py_status_code or response.status if static_file: if eget('QUERY_STRING',", "isinstance(page, dict): response._vars = page response._view_environment.update(page) page = run_view_in(response._view_environment) #", "# on application error, rollback database # ################################################## try: if", "specified change the global variables so that web2py # runs", "validated in languages # pattern used to validate client address", "dir %s', profiler_dir) if not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except: raise", "string.letters + string.digits password = ''.join([random.choice(chars) for _ in range(8)])", "- /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions are: - application, controller, function", "% port) if password == '<random>': # make up a", "(request.controller, request.function, request.extension) # also, make sure the flash is", "instead of cwd or os.environ['web2py_path'] global web2py_path path = os.path.normpath(path)", "\"\"\" generates a wsgi application that does logging and profiling", "logger.warning('Python \"ssl\" module unavailable. SSL is OFF') elif not exists(ssl_certificate):", "page. It first runs all models, then runs the function", "view (if required) # ################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page", "tuple): raise \"Wrong format for rocket interfaces parameter - see", "(path, abspath('site-packages'), \"\"): add_path_first(p) if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password, port) self.pid_filename", "web2py version\") try: from gluon import rocket except: if not", ") filehandle.close() os.unlink(filepath) except IOError: raise BaseException(\"Unable to write to", "by applications/[application]/views/[controller]/[function].html \"\"\" # ################################################## # build environment for controller", "hooks for convenience # ################################################## request.wsgi = LazyWSGI(environ, request, response)", "HTTP(400, \"Bad Request (request.client=%s)\" % client) return client def serve_controller(request,", "locale locale.setlocale(locale.LC_CTYPE, \"C\") # IMPORTANT, web2py requires locale \"C\" exists", "applications/[application]/views/[controller]/[function].html \"\"\" # ################################################## # build environment for controller and", "% web2py_uuid()) prof.dump_stats(destfile) try: line = '%s, %s, %s, %s,", "in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html \"\"\" # ################################################## # build", "request items # ################################################## app = request.application # must go", "then tries to render the output using a view/template. this", "rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') load_routes()", "client = get_client(env) x_req_with = str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options request.update(", "#env.web2py_path = global_settings.applications_parent env.web2py_version = web2py_version #env.update(global_settings) static_file = False", "run_controller_in(request.controller, request.function, environment) if isinstance(page, dict): response._vars = page response._view_environment.update(page)", "NOT REMOVE PART OF FREEZE PROCESS import gc import os", "import Tkinter in messageboxhandler, changes locale ... import gluon.messageboxhandler logging.gluon", "[data] for item in middleware_apps: app = item(app) def caller(app):", "response.status if static_file: if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \\ = 'attachment'", "profile files \"\"\" if profilerfilename is not None: raise BaseException(\"Deprecated", "# logic to garbage collect after exec, not always, once", "where to store apache-compatible requests log profiler_dir: where to store", "= self.wsgi_environ new_environ['wsgi.input'] = self.request.body new_environ['wsgi.version'] = 1 self._environ =", "success, try store session in database # ################################################## if not", "if session not in db try store session on filesystem", "= environ.get current.__dict__.clear() request = Request(environ) response = Response() session", "global web2py_path path = os.path.normpath(path) web2py_path = path global_settings.applications_parent =", "url path must be either: 1. for static pages: -", "version info - compensates for fcgi missing path_info and query_string", "client = g.group() elif env.http_host.startswith('['): # IPv6 client = '::1'", "activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except HTTP as", "HttpServer(object): \"\"\" the web2py web server (Rocket) \"\"\" def __init__(", "must restore it! Thanks ozancag import locale locale.setlocale(locale.LC_CTYPE, \"C\") #", "import datetime import signal import socket import random import string", "default headers it not set # ################################################## default_headers = [", "in db if request.tickets_db: ticket = e.log(request) or 'unknown' http_response", "set up logging for subsequent imports import logging import logging.config", "'<random>': # make up a new password chars = string.letters", "view/template. this function must run from the [application] folder. A", "env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror,", "changed via the web2py.py -f folder option # main.web2py_path is", "socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)]) if", "passed through # ################################################## # process models, controller and view", "the web2py web server (Rocket) \"\"\" def __init__( self, ip='1172.16.17.32',", "profiling and calls wsgibase Args: wsgiapp: the base application logfilename:", "local_hosts and client == env.remote_addr), is_shell = False, is_scheduler =", "ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is ON') app_info = {'wsgi_app': appfactory(wsgibase, log_filename,", "environ) = url_in(request, environ) response.status = env.web2py_status_code or response.status if", "trying to commit database! # ################################################## if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request,", "not necessarily completely tested (e.g. content of tuples or ip-format)", "necessarily completely tested (e.g. content of tuples or ip-format) import", "self.wsgi_environ = environ self.request = request self.response = response @property", "log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10,", "pass e = RestrictedError('Framework', '', '', locals()) ticket = e.log(request)", "= [ ('Content-Type', contenttype('.' + request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate,", "gluon.globals import Request, Response, Session from gluon.compileapp import build_environment, run_models_in,", "= \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket)", "if not global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([", "is passed through # ################################################## # process models, controller and", "= global_settings.cmd_options request.update( client = client, folder = abspath('applications', app)", "validate client address regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ## to account", "and the web server \"\"\" newcron.stopcron() self.server.stop(stoplogging) try: os.unlink(self.pid_filename) except:", "# handle fcgi missing path_info and query_string # select rewrite", "# applications_parent (web2py_path) is the directory containing applications/ # and", "self, ip='1172.16.17.32', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None,", "See http://bugs.python.org/issue1436 # attention!, the import Tkinter in messageboxhandler, changes", "BaseAdapter from gluon.validators import CRYPT from gluon.html import URL, xmlescape", "responder2) else: import cProfile prof = cProfile.Profile() prof.enable() ret[0] =", "\"ssl\" module unavailable. SSL is OFF') elif not exists(ssl_certificate): logger.warning('unable", "not defined # See http://bugs.python.org/issue1436 # attention!, the import Tkinter", "fp = open(password_file, 'w') if password: fp.write('password=\"%s\"\\n' % cpassword) else:", "# ################################################## fixup_missing_path_info(environ) (static_file, version, environ) = url_in(request, environ) response.status", "environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, ) if", "environment) if isinstance(page, dict): response._vars = page response._view_environment.update(page) page =", "= get_client(env) x_req_with = str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options request.update( client", "db, reconnect and store it in db if request.tickets_db: ticket", "is \"%s\"' % password) print('*********************************************************') elif password == '<recycle>': #", "build_environment(request, response, session) # set default view, controller can override", "import socket import random import string from gluon._compat import Cookie,", "disabled = pjoin(request.folder, 'DISABLED') if not exists(request.folder): if app ==", "to garbage collect after exec, not always, once every 100", "return client def serve_controller(request, response, session): \"\"\" This function is", "except: if not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') load_routes() HTTPS_SCHEMES", "cwd or os.environ['web2py_path'] global web2py_path path = os.path.normpath(path) web2py_path =", "any if exists(password_file): return else: password = '' elif password.startswith('<pam_user:'):", "then runs the function in the controller, and then tries", "environ) # ################################################## # expose wsgi hooks for convenience #", "return middleware def wsgibase(environ, responder): \"\"\" The gluon wsgi application.", "== '<random>': # make up a new password chars =", "the web2py Web Framework | Copyrighted by <NAME> <<EMAIL>> |", "BaseAdapter.close_all_instances('rollback') # if tickets in db, reconnect and store it", "and view (if required) # ################################################## run_models_in(environment) response._view_environment = copy.copy(environment)", "compatibility) web2py_path = global_settings.applications_parent # backward compatibility create_missing_folders() # set", "file is part of the web2py Web Framework | Copyrighted", "current password if any if exists(password_file): return else: password =", "def __init__(self, environ, request, response): self.wsgi_environ = environ self.request =", "and query_string - validates the path in url The url", "specified, it must be tested for rocket parameter correctness #", "+ string.digits password = ''.join([random.choice(chars) for _ in range(8)]) cpassword", "', 1)[0] self.response.headers = dict(headers) return lambda *args, **kargs: \\", "application logfilename: where to store apache-compatible requests log profiler_dir: where", "urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if response.js: http_response.headers['web2py-component-command'] = \\ urllib2.quote(response.js.replace('\\n', '')) #", "datetime import signal import socket import random import string from", "wsgi hooks for convenience # ################################################## request.wsgi = LazyWSGI(environ, request,", "handlers. # applications_parent (web2py_path) is the directory containing applications/ #", "\"\"\" from __future__ import print_function if False: import import_all #", "min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def start(self): \"\"\" start", "queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def start(self): \"\"\" start the web", "exec, not always, once every 100 requests global requests requests", "in globals()) and (requests + 1) % 100 or 0", "ret[0] return app_with_logging class HttpServer(object): \"\"\" the web2py web server", "because we do not want the path to this file", "gluon.http import HTTP, redirect from gluon.globals import Request, Response, Session", "signal import socket import random import string from gluon._compat import", "- validates the path in url The url path must", "= RestrictedError('Framework', '', '', locals()) ticket = e.log(request) or 'unrecoverable'", "create dir %s\" % profiler_dir) filepath = pjoin(profiler_dir, 'wtest') try:", "################################################## # load cookies # ################################################## if env.http_cookie: for single_cookie", "fixup_missing_path_info(environ) (static_file, version, environ) = url_in(request, environ) response.status = env.web2py_status_code", "tuples or ip-format) import types if isinstance(interfaces, list): for i", "# ################################################## # on application error, rollback database # ##################################################", "# Security Checks: validate URL and session_id here, # accept_language", "2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions are:", "response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') # if tickets in db, reconnect and", "self._environ def start_response(self, status='200', headers=[], exec_info=None): \"\"\" in controller you", "open SSL private key. SSL is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate])", "f() start_response(self.response.status, self.response.headers.items()) if isinstance(data, list): return data return [data]", "see http://packages.python.org/rocket/\" if path: # if a path is specified", "cpassword = CRYPT()(password)[0] fp = open(password_file, 'w') if password: fp.write('password=\"%s\"\\n'", "global_settings.local_hosts client = get_client(env) x_req_with = str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options", "is ON') app_info = {'wsgi_app': appfactory(wsgibase, log_filename, profiler_dir)} self.server =", "contain `[a-zA-Z0-9_]` - file and sub may also contain '-',", "**kargs) def middleware(self, *middleware_apps): \"\"\" In you controller use:: @request.wsgi.middleware(middleware1,", "-*- coding: utf-8 -*- \"\"\" | This file is part", "time.time() - time_in, ) if not logfilename: sys.stdout.write(line) elif isinstance(logfilename,", "else: logfilename.write(line) except: pass return ret[0] return app_with_logging class HttpServer(object):", "controller and view # ################################################## environment = build_environment(request, response, session)", "'::1']) if not global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn)", "'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def save_password(password, port): \"\"\" Used", "end garbage collection logic # ################################################## # set default headers", "of socket-tuples as string ): \"\"\" starts the web server.", "profiling and calls wsgibase \"\"\" status_headers = [] def responder2(s,", "getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)])", "function is used to generate a dynamic page. It first", "urllib2.quote(response.js.replace('\\n', '')) # ################################################## # store cookies in headers #", "same as applications_parent (for backward compatibility) web2py_path = global_settings.applications_parent #", "if not global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not", "fcgi missing path_info and query_string - validates the path in", "GAE or when logfile is missing logging.basicConfig() logger = logging.getLogger(\"web2py\")", "the global variables so that web2py # runs from there", "port) self.pid_filename = pid_filename if not server_name: server_name = socket.gethostname()", "time.gmtime())), ('Pragma', 'no-cache')] for key, value in default_headers: response.headers.setdefault(key, value)", "http_response.headers['web2py-component-flash'] = \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if response.js: http_response.headers['web2py-component-command'] = \\", "# make up a new password chars = string.letters +", "hr: http_response = hr if static_file: return http_response.to(responder, env=env) if", "the controller, and then tries to render the output using", "!= 'welcome': redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler", "application --------------------------- \"\"\" from __future__ import print_function if False: import", "web2py Web Framework | Copyrighted by <NAME> <<EMAIL>> | License:", "prevent exception on Python 2.5: # NameError: name 'gluon' is", "== env.remote_addr), is_shell = False, is_scheduler = False, is_https =", "password == '<random>': # make up a new password chars", "as e: if request.body: request.body.close() # ################################################## # on application", "the environment variables, replacing '.' with '_' - adds web2py", "env = request.env #env.web2py_path = global_settings.applications_parent env.web2py_version = web2py_version #env.update(global_settings)", "and version info - compensates for fcgi missing path_info and", "try: os.makedirs(profiler_dir) except: raise BaseException(\"Can't create dir %s\" % profiler_dir)", "str): write_file(logfilename, line, 'a') else: logfilename.write(line) except: pass return ret[0]", "base application logfilename: where to store apache-compatible requests log profiler_dir:", "== 'on' ) request.url = environ['PATH_INFO'] # ################################################## # access", "################################################## environment = build_environment(request, response, session) # set default view,", "response._vars = page response._view_environment.update(page) page = run_view_in(response._view_environment) # logic to", "sub may also contain '-', '=', '.' and '/' \"\"\"", "as hr: http_response = hr if static_file: return http_response.to(responder, env=env)", "for item in middleware_apps: app = item(app) def caller(app): return", "is_valid_ip_address(client): raise HTTP(400, \"Bad Request (request.client=%s)\" % client) return client", "return lambda caller=caller, app=app: caller(app) return middleware def wsgibase(environ, responder):", "exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password, port) self.pid_filename = pid_filename if not server_name:", "Copyrighted by <NAME> <<EMAIL>> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon", "for addrinfo in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for", "= rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else: raise HTTP(404, rwthread.routes.error_message", "of the web2py Web Framework | Copyrighted by <NAME> <<EMAIL>>", "False, is_scheduler = False, is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or", "%f\\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'],", "logic to garbage collect after exec, not always, once every", "web2py_version = global_settings.web2py_version except: raise RuntimeError(\"Cannot determine web2py version\") try:", "in database # ################################################## if not env.web2py_disable_session: session._try_store_in_db(request, response) #", "################################################## if not env.web2py_disable_session: session._try_store_in_db(request, response) # ################################################## # on", "two are identical unless web2py_path is changed via the web2py.py", "log_filename, profiler_dir)} self.server = rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads,", "list of socket-tuples as string ): \"\"\" starts the web", "= server_name rocket.SOCKET_TIMEOUT = socket_timeout sock_list = [ip, port] if", "= client, folder = abspath('applications', app) + os.sep, ajax =", "logger.info('SSL is ON') app_info = {'wsgi_app': appfactory(wsgibase, log_filename, profiler_dir)} self.server", "through # ################################################## # process models, controller and view (if", "in HTTPS_SCHEMES \\ or env.https == 'on' ) request.url =", "elif password.startswith('<pam_user:'): # use the pam password for specified user", "if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework',", "backward compatibility create_missing_folders() # set up logging for subsequent imports", "by main() to save the password in the parameters_port.py file.", "create_missing_folders, create_missing_app_folders from gluon.globals import current # Remarks: # calling", "= e.log(request) or 'unknown' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket %", "wsgi application --------------------------- \"\"\" from __future__ import print_function if False:", "app \"\"\" status_headers.append(s) status_headers.append(h) return responder(s, h) time_in = time.time()", "locale.setlocale(locale.LC_CTYPE, \"C\") # IMPORTANT, web2py requires locale \"C\" exists =", "cpassword = CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************') print('your admin password is", "xmlescape from gluon.utils import is_valid_ip_address, getipaddrinfo from gluon.rewrite import load", "may be Library.zip # gluon_parent is the directory containing gluon,", "controller you can use: - request.wsgi.environ - request.wsgi.start_response to call", "(g.group() or '').split(',')[0] if g else None if client in", "raise HTTP(response.status, page, **response.headers) class LazyWSGI(object): def __init__(self, environ, request,", "elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ################################################## # if session", "not profiler_dir: ret[0] = wsgiapp(environ, responder2) else: import cProfile prof", "= web2py_version #env.update(global_settings) static_file = False http_response = None try:", "load as load_routes, url_in, THREAD_LOCAL as rwthread, \\ try_rewrite_on_error, fixup_missing_path_info", "and POST data # ################################################## #parse_get_post_vars(request, environ) # ################################################## #", "maintenance</h1></body></html>\") # ################################################## # build missing folders # ################################################## create_missing_app_folders(request)", "os.path.normpath(path) web2py_path = path global_settings.applications_parent = path os.chdir(path) load_routes() for", "ticket = e.log(request) or 'unknown' # rollback if response._custom_rollback: response._custom_rollback()", "query_string # select rewrite parameters # rewrite incoming URL #", "folders # ################################################## create_missing_app_folders(request) # ################################################## # get the GET", "socket_timeout sock_list = [ip, port] if not ssl_certificate or not", "gluon # so we must restore it! Thanks ozancag import", "use provided password cpassword = CRYPT()(password)[0] fp = open(password_file, 'w')", "LazyWSGI(object): def __init__(self, environ, request, response): self.wsgi_environ = environ self.request", "os.sep, ajax = x_req_with == 'xmlhttprequest', cid = env.http_web2py_component_element, is_local", "('Pragma', 'no-cache')] for key, value in default_headers: response.headers.setdefault(key, value) raise", "This file is part of the web2py Web Framework |", "sure the flash is passed through # ################################################## # process", "tickets in db, reconnect and store it in db if", "for rocket parameter correctness # not necessarily completely tested (e.g.", "= Session() env = request.env #env.web2py_path = global_settings.applications_parent env.web2py_version =", "not isinstance(i, tuple): raise \"Wrong format for rocket interfaces parameter", "off') elif not rocket.ssl: logger.warning('Python \"ssl\" module unavailable. SSL is", "( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time()", "is not defined # See http://bugs.python.org/issue1436 # attention!, the import", "logic # ################################################## # set default headers it not set", "a shutdown timeout path=None, interfaces=None # Rocket is able to", "Used by main() to save the password in the parameters_port.py", "e: if request.body: request.body.close() # ################################################## # on application error,", "/<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions are: - application, controller, function and", "this function must run from the [application] folder. A typical", "*middleware_apps): \"\"\" In you controller use:: @request.wsgi.middleware(middleware1, middleware2, ...) to", "of tuples or ip-format) import types if isinstance(interfaces, list): for", "in (None, '', 'unknown'): g = regex_client.search(eget('remote_addr', '')) if g:", "rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT = socket_timeout sock_list = [ip, port]", "local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([", "# store cookies in headers # ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket", "import locale locale.setlocale(locale.LC_CTYPE, \"C\") # IMPORTANT, web2py requires locale \"C\"", "# ################################################## environment = build_environment(request, response, session) # set default", "you controller use:: @request.wsgi.middleware(middleware1, middleware2, ...) to decorate actions with", "headers it not set # ################################################## default_headers = [ ('Content-Type',", "is the same as applications_parent (for backward compatibility) web2py_path =", "#from thread import allocate_lock from gluon.fileutils import abspath, write_file from", "os.path.exists pjoin = os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\")) except: # fails on", "sys.path # applications_parent (path to applications/, site-packages/ etc) # defaults", "fails on GAE or when logfile is missing logging.basicConfig() logger", "response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ = try_rewrite_on_error( http_response, request, environ,", "in request items # ################################################## app = request.application # must", "unavailable. SSL is OFF') elif not exists(ssl_certificate): logger.warning('unable to open", "local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae: try: fqdn", "g: client = g.group() elif env.http_host.startswith('['): # IPv6 client =", "= abspath('parameters_%i.py' % port) if password == '<random>': # make", "2.5: # NameError: name 'gluon' is not defined # See", "static_file = False http_response = None try: try: try: #", "if isinstance(page, dict): response._vars = page response._view_environment.update(page) page = run_view_in(response._view_environment)", "if request.body: request.body.close() if hasattr(current, 'request'): # ################################################## # on", "new_environ['wsgi.input'] = self.request.body new_environ['wsgi.version'] = 1 self._environ = new_environ return", "web server...') rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT = socket_timeout sock_list =", "in local_hosts and client == env.remote_addr), is_shell = False, is_scheduler", "gluon_parent is the directory containing gluon, web2py.py, logging.conf # and", "if hasattr(current, 'request'): # ################################################## # on success, try store", "if request.tickets_db: ticket = e.log(request) or 'unknown' http_response = \\", "shutdown_timeout=None, # Rocket does not use a shutdown timeout path=None,", "'HTTPS')) def get_client(env): \"\"\" Guesses the client address from the", "logging.config.fileConfig(abspath(\"logging.conf\")) except: # fails on GAE or when logfile is", "build environment for controller and view # ################################################## environment =", "is off') elif not rocket.ssl: logger.warning('Python \"ssl\" module unavailable. SSL", "logging.gluon = gluon # so we must restore it! Thanks", "backward compatibility) web2py_path = global_settings.applications_parent # backward compatibility create_missing_folders() #", "'session_file') \\ and response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ = try_rewrite_on_error(", "\"\"\" def middleware(f): def app(environ, start_response): data = f() start_response(self.response.status,", "file which may be Library.zip # gluon_parent is the directory", "dict(ticket=ticket), web2py_error='ticket %s' % ticket) except: if request.body: request.body.close() #", "== 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def save_password(password, port): \"\"\"", "# ################################################## # process models, controller and view (if required)", "response.status = env.web2py_status_code or response.status if static_file: if eget('QUERY_STRING', '').startswith('attachment'):", "# NameError: name 'gluon' is not defined # See http://bugs.python.org/issue1436", "elif env.http_host.startswith('['): # IPv6 client = '::1' else: client =", "= {'wsgi_app': appfactory(wsgibase, log_filename, profiler_dir)} self.server = rocket.Rocket(interfaces or tuple(sock_list),", "be either: 1. for static pages: - /<application>/static/<file> 2. for", "the directory containing applications/ # and routes.py # The two", "so we must restore it! Thanks ozancag import locale locale.setlocale(locale.LC_CTYPE,", "else: BaseAdapter.close_all_instances('rollback') # if tickets in db, reconnect and store", "else: BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework', '', '', locals())", "run_view_in from gluon.contenttype import contenttype from pydal.base import BaseAdapter from", "lambda *args, **kargs: \\ self.response.write(escape=False, *args, **kargs) def middleware(self, *middleware_apps):", "always, once every 100 requests global requests requests = ('requests'", "BaseException(\"Deprecated API\") if profiler_dir: profiler_dir = abspath(profiler_dir) logger.warn('profiler is on.", "url The url path must be either: 1. for static", "db if request.tickets_db: ticket = e.log(request) or 'unknown' http_response =", "# activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except HTTP", "<filename>gluon/main.py #!/bin/env python # -*- coding: utf-8 -*- \"\"\" |", "load_routes, url_in, THREAD_LOCAL as rwthread, \\ try_rewrite_on_error, fixup_missing_path_info from gluon", "logging.conf # and the handlers. # applications_parent (web2py_path) is the", "= e.log(request) or 'unknown' # rollback if response._custom_rollback: response._custom_rollback() else:", "timeout=10, socket_timeout=1, shutdown_timeout=None, # Rocket does not use a shutdown", "% password) print('*********************************************************') elif password == '<recycle>': # reuse the", "ON') app_info = {'wsgi_app': appfactory(wsgibase, log_filename, profiler_dir)} self.server = rocket.Rocket(interfaces", "page = run_view_in(response._view_environment) # logic to garbage collect after exec,", "elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else:", "middleware. actions must return strings. uses a simulated environment so", "you can use: - request.wsgi.environ - request.wsgi.start_response to call third", "# parse rewritten URL # serve file if static #", "= global_settings.web2py_version except: raise RuntimeError(\"Cannot determine web2py version\") try: from", "locals()) ticket = e.log(request) or 'unrecoverable' http_response = \\ HTTP(500,", "WSGI applications \"\"\" self.response.status = str(status).split(' ', 1)[0] self.response.headers =", "not request.is_local and exists(disabled): five0three = os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise", "Guesses the client address from the environment variables First tries", "pid_filename if not server_name: server_name = socket.gethostname() logger.info('starting web server...')", "can override it response.view = '%s/%s.%s' % (request.controller, request.function, request.extension)", "on success, try store session in database # ################################################## if", "self.start_response) return lambda caller=caller, app=app: caller(app) return middleware def wsgibase(environ,", "single_cookie.strip() if single_cookie: try: request.cookies.load(single_cookie) except Cookie.CookieError: pass # single", "self.request.body new_environ['wsgi.version'] = 1 self._environ = new_environ return self._environ def", "random import string from gluon._compat import Cookie, urllib2 #from thread", "= pjoin(request.folder, 'DISABLED') if not exists(request.folder): if app == rwthread.routes.default_application", "reuse the current password if any if exists(password_file): return else:", "HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) except: if", "import newcron __all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests =", "exists = os.path.exists pjoin = os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\")) except: #", "- adds web2py path and version info - compensates for", "\"C\" exists = os.path.exists pjoin = os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\")) except:", "# process models, controller and view (if required) # ##################################################", "we do not want the path to this file which", "web2py.py -f folder option # main.web2py_path is the same as", "server_name rocket.SOCKET_TIMEOUT = socket_timeout sock_list = [ip, port] if not", "list): for i in interfaces: if not isinstance(i, tuple): raise", "from gluon.restricted import RestrictedError from gluon.http import HTTP, redirect from", "or os.environ['web2py_path'] global web2py_path path = os.path.normpath(path) web2py_path = path", "__all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests = 0 #", "if tickets in db, reconnect and store it in db", "tested (e.g. content of tuples or ip-format) import types if", "app == rwthread.routes.default_application \\ and app != 'welcome': redirect(URL('welcome', 'default',", "function and extension may only contain `[a-zA-Z0-9_]` - file and", "does logging and profiling and calls wsgibase Args: wsgiapp: the", "client) return client def serve_controller(request, response, session): \"\"\" This function", "HTTPS_SCHEMES = set(('https', 'HTTPS')) def get_client(env): \"\"\" Guesses the client", "fp.write('password=\"%s\"\\n' % cpassword) else: fp.write('password=None\\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None,", "interfaces parameter - see http://packages.python.org/rocket/\" if path: # if a", "except IOError: raise BaseException(\"Unable to write to dir %s\" %", "from gluon.globals import current # Remarks: # calling script has", "import print_function if False: import import_all # DO NOT REMOVE", "################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket = None except RestrictedError as e:", "BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ################################################## # if", "str(status).split(' ', 1)[0] self.response.headers = dict(headers) return lambda *args, **kargs:", "starts the web server. \"\"\" if interfaces: # if interfaces", "folder option # main.web2py_path is the same as applications_parent (for", "GMT' response.stream(static_file, request=request) # ################################################## # fill in request items", "the url /[application]/[controller]/[function] that would result in a call to", "create new session file # ################################################## if not env.web2py_disable_session: session.connect(request,", "rendered by applications/[application]/views/[controller]/[function].html \"\"\" # ################################################## # build environment for", "# so we must restore it! Thanks ozancag import locale", "app) + os.sep, ajax = x_req_with == 'xmlhttprequest', cid =", "or 0 if not requests: gc.collect() # end garbage collection", "\\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) except:", "calls wsgibase \"\"\" status_headers = [] def responder2(s, h): \"\"\"", "): \"\"\" starts the web server. \"\"\" if interfaces: #", "First tries 'http_x_forwarded_for', secondly 'remote_addr' if all fails, assume '127.0.0.1'", "a, b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop())", "appfactory(wsgibase, log_filename, profiler_dir)} self.server = rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info,", "not global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae:", "response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework', '', '',", "| Copyrighted by <NAME> <<EMAIL>> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The", "request.url = environ['PATH_INFO'] # ################################################## # access the requested application", "we must restore it! Thanks ozancag import locale locale.setlocale(locale.LC_CTYPE, \"C\")", "%s, %s, %s, %f\\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'],", "create_missing_app_folders(request) # ################################################## # get the GET and POST data", "containing gluon, web2py.py, logging.conf # and the handlers. # applications_parent", "version, environ) = url_in(request, environ) response.status = env.web2py_status_code or response.status", "to render the output using a view/template. this function must", "= os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise HTTP(503, file(five0three, 'r').read()) else: raise", "if not hasattr(self, '_environ'): new_environ = self.wsgi_environ new_environ['wsgi.input'] = self.request.body", "ticket = None except RestrictedError as e: if request.body: request.body.close()", "'appfactory', 'HttpServer'] requests = 0 # gc timer # Security", "time_in, ) if not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename,", "caller(app) return middleware def wsgibase(environ, responder): \"\"\" The gluon wsgi", "the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except HTTP as hr:", "imports import logging import logging.config # This needed to prevent", "sys.path to # (\"\", gluon_parent/site-packages, gluon_parent, ...) # # this", "('Content-Type', contenttype('.' + request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'),", "to the url /[application]/[controller]/[function] that would result in a call", "except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self, stoplogging=False): \"\"\" stop", "start_response(self.response.status, self.response.headers.items()) if isinstance(data, list): return data return [data] for", "= CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************') print('your admin password is \"%s\"'", "except (socket.gaierror, TypeError): pass global_settings.local_hosts = list(local_hosts) else: local_hosts =", "= [0] if not profiler_dir: ret[0] = wsgiapp(environ, responder2) else:", "may also contain '-', '=', '.' and '/' \"\"\" eget", "if not env.web2py_disable_session: session._try_store_in_db(request, response) # ################################################## # on success,", "logging import logging.config # This needed to prevent exception on", "# this must be done after trying to commit database!", "\\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally:", "defaults to that directory set sys.path to # (\"\", gluon_parent/site-packages,", "for key, value in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page,", "# rollback if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') # if tickets", "raise BaseException(\"Deprecated API\") if profiler_dir: profiler_dir = abspath(profiler_dir) logger.warn('profiler is", "create_missing_folders() # set up logging for subsequent imports import logging", "print('******************* IMPORTANT!!! ************************') print('your admin password is \"%s\"' % password)", "is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \\ request.env.http_x_forwarded_proto in HTTPS_SCHEMES", "# try load session or create new session file #", "import CRYPT from gluon.html import URL, xmlescape from gluon.utils import", "directory into sys.path # applications_parent (path to applications/, site-packages/ etc)", "urllib2 #from thread import allocate_lock from gluon.fileutils import abspath, write_file", "# ################################################## # on success, try store session in database", "from the environment variables First tries 'http_x_forwarded_for', secondly 'remote_addr' if", "http_response, new_environ = try_rewrite_on_error( http_response, request, environ, ticket) if not", "return self._environ def start_response(self, status='200', headers=[], exec_info=None): \"\"\" in controller", "responder): \"\"\" The gluon wsgi application. The first function called", "and (requests + 1) % 100 or 0 if not", "- application, controller, function and extension may only contain `[a-zA-Z0-9_]`", "env.web2py_disable_session: session.connect(request, response) # ################################################## # run controller # ##################################################", "= global_settings.applications_parent env.web2py_version = web2py_version #env.update(global_settings) static_file = False http_response", "and app != \"admin\": import gluon.debug # activate the debugger", "runs all models, then runs the function in the controller,", "environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, ) if not logfilename: sys.stdout.write(line)", "# fails on GAE or when logfile is missing logging.basicConfig()", "print('*********************************************************') elif password == '<recycle>': # reuse the current password", "HTTP(503, \"<html><body><h1>Temporarily down for maintenance</h1></body></html>\") # ################################################## # build missing", "value in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers) class", "python # -*- coding: utf-8 -*- \"\"\" | This file", "responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def", "private key. SSL is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate:", "and client == env.remote_addr), is_shell = False, is_scheduler = False,", "password in the parameters_port.py file. \"\"\" password_file = abspath('parameters_%i.py' %", "FREEZE PROCESS import gc import os import re import copy", "`[a-zA-Z0-9_]` - file and sub may also contain '-', '=',", "request.application # must go after url_in! if not global_settings.local_hosts: local_hosts", "elif not exists(ssl_private_key): logger.warning('unable to open SSL private key. SSL", "GET and POST data # ################################################## #parse_get_post_vars(request, environ) # ##################################################", "# attention!, the import Tkinter in messageboxhandler, changes locale ...", "not exists(request.folder): if app == rwthread.routes.default_application \\ and app !=", "and profiling and calls wsgibase \"\"\" status_headers = [] def", "% ticket) finally: if response and hasattr(response, 'session_file') \\ and", "error, rollback database # ################################################## # log tickets before rollback", "request self.response = response @property def environ(self): if not hasattr(self,", "################################################## # expose wsgi hooks for convenience # ################################################## request.wsgi", "...) to decorate actions with WSGI middleware. actions must return", "min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None, # Rocket does", "completely tested (e.g. content of tuples or ip-format) import types", "collection logic # ################################################## # set default headers it not", "################################################## # set default headers it not set # ##################################################", "'w') if password: fp.write('password=\"%s\"\\n' % cpassword) else: fp.write('password=None\\n') fp.close() def", "path to script directory into sys.path # applications_parent (path to", "# runs from there instead of cwd or os.environ['web2py_path'] global", "account for IPV6 try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string", "pydal.base import BaseAdapter from gluon.validators import CRYPT from gluon.html import", "missing path_info and query_string - validates the path in url", "for single_cookie in env.http_cookie.split(';'): single_cookie = single_cookie.strip() if single_cookie: try:", "gluon.debug # activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except", "request.function, environment) if isinstance(page, dict): response._vars = page response._view_environment.update(page) page", "2037 23:59:59 GMT' response.stream(static_file, request=request) # ################################################## # fill in", "'unknown' # rollback if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') # if", "0 if not requests: gc.collect() # end garbage collection logic", "i in interfaces: if not isinstance(i, tuple): raise \"Wrong format", "be done after trying to commit database! # ################################################## if", "or '::1' (running locally) \"\"\" eget = env.get g =", "\"\"\" wsgi responder app \"\"\" status_headers.append(s) status_headers.append(h) return responder(s, h)", "Remarks: # calling script has inserted path to script directory", "request.cookies.load(single_cookie) except Cookie.CookieError: pass # single invalid cookie ignore #", "\"\"): add_path_first(p) if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password, port) self.pid_filename = pid_filename", "mod_wsgi (or any WSGI-compatible server). - fills request with info", "http://packages.python.org/rocket/\" else: raise \"Wrong format for rocket interfaces parameter -", "= copy.copy(environment) page = run_controller_in(request.controller, request.function, environment) if isinstance(page, dict):", "else: local_hosts = global_settings.local_hosts client = get_client(env) x_req_with = str(env.http_x_requested_with).lower()", "must return strings. uses a simulated environment so it may", "routes.py # The two are identical unless web2py_path is changed", "self.wsgi_environ new_environ['wsgi.input'] = self.request.body new_environ['wsgi.version'] = 1 self._environ = new_environ", "response) # ################################################## # on success, commit database # ##################################################", "handle_signals=False, ) def start(self): \"\"\" start the web server \"\"\"", "logfilename.write(line) except: pass return ret[0] return app_with_logging class HttpServer(object): \"\"\"", "view, controller can override it response.view = '%s/%s.%s' % (request.controller,", "################################################## #parse_get_post_vars(request, environ) # ################################################## # expose wsgi hooks for", "response) # Set header so client can distinguish component requests.", "every 100 requests global requests requests = ('requests' in globals())", "variables First tries 'http_x_forwarded_for', secondly 'remote_addr' if all fails, assume", "requests global requests requests = ('requests' in globals()) and (requests", "part of the web2py Web Framework | Copyrighted by <NAME>", "requires locale \"C\" exists = os.path.exists pjoin = os.path.join try:", "global_settings.applications_parent env.web2py_version = web2py_version #env.update(global_settings) static_file = False http_response =", "app that does logging and profiling and calls wsgibase \"\"\"", "import os import re import copy import sys import time", "pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self, stoplogging=False): \"\"\" stop cron", "page = run_controller_in(request.controller, request.function, environment) if isinstance(page, dict): response._vars =", "rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally: if response", "REMOVE PART OF FREEZE PROCESS import gc import os import", "try: line = '%s, %s, %s, %s, %s, %s, %f\\n'", "\"\"\" The gluon wsgi application. The first function called when", "determine web2py version\") try: from gluon import rocket except: if", "requests requests = ('requests' in globals()) and (requests + 1)", "session): \"\"\" This function is used to generate a dynamic", "% cpassword) else: fp.write('password=None\\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None):", "isinstance(logfilename, str): write_file(logfilename, line, 'a') else: logfilename.write(line) except: pass return", "variables so that web2py # runs from there instead of", "can distinguish component requests. if request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace') if", "# ################################################## # try load session or create new session", "request.body.close() if hasattr(current, 'request'): # ################################################## # on success, try", "that does logging and profiling and calls wsgibase \"\"\" status_headers", "1. for static pages: - /<application>/static/<file> 2. for dynamic pages:", "%s' % ticket) except: if request.body: request.body.close() # ################################################## #", "'')) if g: client = g.group() elif env.http_host.startswith('['): # IPv6", "get the GET and POST data # ################################################## #parse_get_post_vars(request, environ)", "format for rocket interfaces parameter - see http://packages.python.org/rocket/\" else: raise", "for rocket interfaces parameter - see http://packages.python.org/rocket/\" else: raise \"Wrong", "may have weird behavior in some cases \"\"\" def middleware(f):", "not in db try store session on filesystem # this", "('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')]", "make up a new password chars = string.letters + string.digits", "logger.info('starting web server...') rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT = socket_timeout sock_list", "ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None, # Rocket", "print('your admin password is \"%s\"' % password) print('*********************************************************') elif password", "wsgibase(new_environ, responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env)", "data return [data] for item in middleware_apps: app = item(app)", "filehandle.close() os.unlink(filepath) except IOError: raise BaseException(\"Unable to write to dir", "gc timer # Security Checks: validate URL and session_id here,", "'_' - adds web2py path and version info - compensates", "pjoin(request.folder, 'DISABLED') if not exists(request.folder): if app == rwthread.routes.default_application \\", "and app != 'welcome': redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler: _handler", "compatibility create_missing_folders() # set up logging for subsequent imports import", "not None: raise BaseException(\"Deprecated API\") if profiler_dir: profiler_dir = abspath(profiler_dir)", "parameter - see http://packages.python.org/rocket/\" else: raise \"Wrong format for rocket", "identical unless web2py_path is changed via the web2py.py -f folder", "line, 'a') else: logfilename.write(line) except: pass return ret[0] return app_with_logging", "pattern used to validate client address regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') #", "else: raise HTTP(503, \"<html><body><h1>Temporarily down for maintenance</h1></body></html>\") # ################################################## #", "and '/' \"\"\" eget = environ.get current.__dict__.clear() request = Request(environ)", "contenttype('.' + request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires',", "with WSGI middleware. actions must return strings. uses a simulated", "also, make sure the flash is passed through # ##################################################", "fails, assume '127.0.0.1' or '::1' (running locally) \"\"\" eget =", "that would result in a call to [function]() in applications/[application]/[controller].py", "LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon wsgi application --------------------------- \"\"\" from __future__", "list): return data return [data] for item in middleware_apps: app", "address from the environment variables First tries 'http_x_forwarded_for', secondly 'remote_addr'", "except: pass return ret[0] return app_with_logging class HttpServer(object): \"\"\" the", "else: # use provided password cpassword = CRYPT()(password)[0] fp =", "does not use a shutdown timeout path=None, interfaces=None # Rocket", "request.wsgi.start_response to call third party WSGI applications \"\"\" self.response.status =", "file # ################################################## if not env.web2py_disable_session: session.connect(request, response) # ##################################################", "= request self.response = response @property def environ(self): if not", "# calling script has inserted path to script directory into", "\\ and response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ = try_rewrite_on_error( http_response,", "some cases \"\"\" def middleware(f): def app(environ, start_response): data =", "application that does logging and profiling and calls wsgibase Args:", "try store session on filesystem # this must be done", "ret[0] = wsgiapp(environ, responder2) prof.disable() destfile = pjoin(profiler_dir, \"req_%s.prof\" %", "-f folder option # main.web2py_path is the same as applications_parent", "store cookies in headers # ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket =", "import is_valid_ip_address, getipaddrinfo from gluon.rewrite import load as load_routes, url_in,", "required) # ################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page = run_controller_in(request.controller,", "SSL is OFF') elif not exists(ssl_certificate): logger.warning('unable to open SSL", "response) # ################################################## # run controller # ################################################## if global_settings.debugging", "Request (request.client=%s)\" % client) return client def serve_controller(request, response, session):", "global_settings.cmd_options request.update( client = client, folder = abspath('applications', app) +", "request.body.close() # ################################################## # on application error, rollback database #", "OFF') elif not exists(ssl_certificate): logger.warning('unable to open SSL certificate. SSL", "open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string", "def middleware(f): def app(environ, start_response): data = f() start_response(self.response.status, self.response.headers.items())", "'xmlhttprequest', cid = env.http_web2py_component_element, is_local = (env.remote_addr in local_hosts and", "'unknown'): g = regex_client.search(eget('remote_addr', '')) if g: client = g.group()", "weird behavior in some cases \"\"\" def middleware(f): def app(environ,", "Tkinter in messageboxhandler, changes locale ... import gluon.messageboxhandler logging.gluon =", "path is specified change the global variables so that web2py", "self.response.headers.items()) if isinstance(data, list): return data return [data] for item", "logging and profiling and calls wsgibase Args: wsgiapp: the base", "raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string web2py_version = global_settings.web2py_version", "prof.disable() destfile = pjoin(profiler_dir, \"req_%s.prof\" % web2py_uuid()) prof.dump_stats(destfile) try: line", "__future__ import print_function if False: import import_all # DO NOT", "server). - fills request with info - the environment variables,", "HTTP(404, rwthread.routes.error_message % 'invalid request', web2py_error='invalid application') elif not request.is_local", "False, is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \\ request.env.http_x_forwarded_proto in", "= pjoin(profiler_dir, 'wtest') try: filehandle = open( filepath, 'w' )", "/<application>/static/<file> 2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions", "or response.status if static_file: if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \\ =", "directory set sys.path to # (\"\", gluon_parent/site-packages, gluon_parent, ...) #", "start_response): data = f() start_response(self.response.status, self.response.headers.items()) if isinstance(data, list): return", "response and hasattr(response, 'session_file') \\ and response.session_file: response.session_file.close() session._unlock(response) http_response,", "'%s/%s.%s' % (request.controller, request.function, request.extension) # also, make sure the", "not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') load_routes() HTTPS_SCHEMES = set(('https',", "single invalid cookie ignore # ################################################## # try load session", "the web2py.py -f folder option # main.web2py_path is the same", "Python 2.5: # NameError: name 'gluon' is not defined #", "# IPv6 client = '::1' else: client = '127.0.0.1' #", "logger.warning('unable to open SSL private key. SSL is OFF') else:", "wsgiapp: the base application logfilename: where to store apache-compatible requests", "request.function, request.extension) # also, make sure the flash is passed", "= string.letters + string.digits password = ''.join([random.choice(chars) for _ in", "request.body: request.body.close() # ################################################## # on application error, rollback database", "BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework', '', '', locals()) ticket", "try: signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda a,", "profiler_dir) filepath = pjoin(profiler_dir, 'wtest') try: filehandle = open( filepath,", "(or any WSGI-compatible server). - fills request with info -", "Set header so client can distinguish component requests. if request.cid:", "import RestrictedError from gluon.http import HTTP, redirect from gluon.globals import", "path must be either: 1. for static pages: - /<application>/static/<file>", "to call third party WSGI applications \"\"\" self.response.status = str(status).split('", "rewrite incoming URL # parse rewritten header variables # parse", "wsgi application that does logging and profiling and calls wsgibase", "import Rocket') load_routes() HTTPS_SCHEMES = set(('https', 'HTTPS')) def get_client(env): \"\"\"", "or not ssl_private_key: logger.info('SSL is off') elif not rocket.ssl: logger.warning('Python", "# applications_parent (path to applications/, site-packages/ etc) # defaults to", "env=env) if request.body: request.body.close() if hasattr(current, 'request'): # ################################################## #", "not hasattr(self, '_environ'): new_environ = self.wsgi_environ new_environ['wsgi.input'] = self.request.body new_environ['wsgi.version']", "################################################## # handle fcgi missing path_info and query_string # select", "not set # ################################################## default_headers = [ ('Content-Type', contenttype('.' +", "to open SSL private key. SSL is OFF') else: sock_list.extend([ssl_private_key,", "%s, %s, %s, %s, %f\\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),", "# gc timer # Security Checks: validate URL and session_id", "NameError: name 'gluon' is not defined # See http://bugs.python.org/issue1436 #", "elif password == '<recycle>': # reuse the current password if", "pjoin = os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\")) except: # fails on GAE", "request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None, # Rocket does not use a", "has inserted path to script directory into sys.path # applications_parent", "interfaces is specified, it must be tested for rocket parameter", "@property def environ(self): if not hasattr(self, '_environ'): new_environ = self.wsgi_environ", "call third party WSGI applications \"\"\" self.response.status = str(status).split(' ',", "application') elif not request.is_local and exists(disabled): five0three = os.path.join(request.folder,'static','503.html') if", "# if interfaces is specified, it must be tested for", "TypeError): pass global_settings.local_hosts = list(local_hosts) else: local_hosts = global_settings.local_hosts client", "cpassword = password[1:-1] else: # use provided password cpassword =", "= Response() session = Session() env = request.env #env.web2py_path =", "password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None,", "try: logging.config.fileConfig(abspath(\"logging.conf\")) except: # fails on GAE or when logfile", "request.wsgi.environ - request.wsgi.start_response to call third party WSGI applications \"\"\"", "else: import cProfile prof = cProfile.Profile() prof.enable() ret[0] = wsgiapp(environ,", "request, response): self.wsgi_environ = environ self.request = request self.response =", "that does logging and profiling and calls wsgibase Args: wsgiapp:", "not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except: raise BaseException(\"Can't create dir %s\"", "application, controller, function and extension may only contain `[a-zA-Z0-9_]` -", "client in (None, '', 'unknown'): g = regex_client.search(eget('remote_addr', '')) if", "# ################################################## app = request.application # must go after url_in!", "gluon.compileapp import build_environment, run_models_in, \\ run_controller_in, run_view_in from gluon.contenttype import", "Rocket is able to use several interfaces - must be", "serve_controller(request, response, session): \"\"\" This function is used to generate", "server \"\"\" try: signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop()) signal.signal(signal.SIGINT,", "elif not request.is_local and exists(disabled): five0three = os.path.join(request.folder,'static','503.html') if os.path.exists(five0three):", "which may be Library.zip # gluon_parent is the directory containing", "# because we do not want the path to this", "= run_view_in(response._view_environment) # logic to garbage collect after exec, not", "################################################## # store cookies in headers # ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies)", "global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def save_password(password, port):", "\\ = 'attachment' if version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires']", "directory containing applications/ # and routes.py # The two are", "= open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version =", "call to [function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html \"\"\" #", "password chars = string.letters + string.digits password = ''.join([random.choice(chars) for", "response.do_not_commit is True: BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') #", "'<recycle>': # reuse the current password if any if exists(password_file):", "= open(password_file, 'w') if password: fp.write('password=\"%s\"\\n' % cpassword) else: fp.write('password=None\\n')", "web2py.py, logging.conf # and the handlers. # applications_parent (web2py_path) is", "raise HTTP(404, rwthread.routes.error_message % 'invalid request', web2py_error='invalid application') elif not", "status_headers.append(s) status_headers.append(h) return responder(s, h) time_in = time.time() ret =", "def stop(self, stoplogging=False): \"\"\" stop cron and the web server", "# ################################################## # handle fcgi missing path_info and query_string #", "raw_version_string web2py_version = global_settings.web2py_version except: raise RuntimeError(\"Cannot determine web2py version\")", "client = '127.0.0.1' # IPv4 if not is_valid_ip_address(client): raise HTTP(400,", "= socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)])", "in db try store session on filesystem # this must", "global_settings.web2py_version = raw_version_string web2py_version = global_settings.web2py_version except: raise RuntimeError(\"Cannot determine", "so it may have weird behavior in some cases \"\"\"", "################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page = run_controller_in(request.controller, request.function, environment)", "site-packages/ etc) # defaults to that directory set sys.path to", "to open SSL certificate. SSL is OFF') elif not exists(ssl_private_key):", "= logging.getLogger(\"web2py\") from gluon.restricted import RestrictedError from gluon.http import HTTP,", "ajax = x_req_with == 'xmlhttprequest', cid = env.http_web2py_component_element, is_local =", "if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def save_password(password,", "\\ try_rewrite_on_error, fixup_missing_path_info from gluon import newcron __all__ = ['wsgibase',", "run controller # ################################################## if global_settings.debugging and app != \"admin\":", "server_name = socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT", "os.path.exists(five0three): raise HTTP(503, file(five0three, 'r').read()) else: raise HTTP(503, \"<html><body><h1>Temporarily down", "password[1:-1] else: # use provided password cpassword = CRYPT()(password)[0] fp", "self.response = response @property def environ(self): if not hasattr(self, '_environ'):", "middleware def wsgibase(environ, responder): \"\"\" The gluon wsgi application. The", "in controller you can use: - request.wsgi.environ - request.wsgi.start_response to", "'request'): # ################################################## # on success, try store session in", "ip='1172.16.17.32', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None,", "raise BaseException(\"Unable to write to dir %s\" % profiler_dir) def", "pre-check=0'), ('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())), ('Pragma',", "100 requests global requests requests = ('requests' in globals()) and", "# single invalid cookie ignore # ################################################## # try load", "# if session not in db try store session on", "responder2(s, h): \"\"\" wsgi responder app \"\"\" status_headers.append(s) status_headers.append(h) return", "(static_file, version, environ) = url_in(request, environ) response.status = env.web2py_status_code or", "LazyWSGI(environ, request, response) # ################################################## # load cookies # ##################################################", "... import gluon.messageboxhandler logging.gluon = gluon # so we must", "environ, request, response): self.wsgi_environ = environ self.request = request self.response", "logging.basicConfig() logger = logging.getLogger(\"web2py\") from gluon.restricted import RestrictedError from gluon.http", "31 Dec 2037 23:59:59 GMT' response.stream(static_file, request=request) # ################################################## #", "profiler_dir) def app_with_logging(environ, responder): \"\"\" a wsgi app that does", "request.is_local and exists(disabled): five0three = os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise HTTP(503,", "dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally: if response and hasattr(response,", "actions must return strings. uses a simulated environment so it", "str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options request.update( client = client, folder =", "\"\"\" Used by main() to save the password in the", "replacing '.' with '_' - adds web2py path and version", "select rewrite parameters # rewrite incoming URL # parse rewritten", "request.update( client = client, folder = abspath('applications', app) + os.sep,", "extension may only contain `[a-zA-Z0-9_]` - file and sub may", "= set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae: try: fqdn =", "################################################## if global_settings.debugging and app != \"admin\": import gluon.debug #", "applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html \"\"\" # ################################################## # build environment", "= x_req_with == 'xmlhttprequest', cid = env.http_web2py_component_element, is_local = (env.remote_addr", "or dynamic). It can be called by paste.httpserver or by", "validates the path in url The url path must be", "API\") if profiler_dir: profiler_dir = abspath(profiler_dir) logger.warn('profiler is on. will", "except: raise BaseException(\"Can't create dir %s\" % profiler_dir) filepath =", "[0] if not profiler_dir: ret[0] = wsgiapp(environ, responder2) else: import", "(e.g. content of tuples or ip-format) import types if isinstance(interfaces,", "first function called when a page is requested (static or", "client = (g.group() or '').split(',')[0] if g else None if", "# set default view, controller can override it response.view =", "response.view = '%s/%s.%s' % (request.controller, request.function, request.extension) # also, make", "'127.0.0.1' # IPv4 if not is_valid_ip_address(client): raise HTTP(400, \"Bad Request", "timeout=int(timeout), handle_signals=False, ) def start(self): \"\"\" start the web server", "Rocket does not use a shutdown timeout path=None, interfaces=None #", "current.__dict__.clear() request = Request(environ) response = Response() session = Session()", "eget = env.get g = regex_client.search(eget('http_x_forwarded_for', '')) client = (g.group()", "= LazyWSGI(environ, request, response) # ################################################## # load cookies #", "1) % 100 or 0 if not requests: gc.collect() #", "filepath = pjoin(profiler_dir, 'wtest') try: filehandle = open( filepath, 'w'", "max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def start(self): \"\"\" start the", "SSL certificate. SSL is OFF') elif not exists(ssl_private_key): logger.warning('unable to", "the handlers. # applications_parent (web2py_path) is the directory containing applications/", "the GET and POST data # ################################################## #parse_get_post_vars(request, environ) #", "RestrictedError as e: if request.body: request.body.close() # ################################################## # on", "in HTTPS_SCHEMES or \\ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \\ or env.https", "time_in = time.time() ret = [0] if not profiler_dir: ret[0]", "option # main.web2py_path is the same as applications_parent (for backward", "OF FREEZE PROCESS import gc import os import re import", "logfile is missing logging.basicConfig() logger = logging.getLogger(\"web2py\") from gluon.restricted import", "if not requests: gc.collect() # end garbage collection logic #", "gluon import rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable to import", "render the output using a view/template. this function must run", "and profiling and calls wsgibase Args: wsgiapp: the base application", "-*- \"\"\" | This file is part of the web2py", "# This needed to prevent exception on Python 2.5: #", "return strings. uses a simulated environment so it may have", "- request.wsgi.environ - request.wsgi.start_response to call third party WSGI applications", "on success, commit database # ################################################## if response.do_not_commit is True:", "pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5,", "isinstance(i, tuple): raise \"Wrong format for rocket interfaces parameter -", "interfaces: # if interfaces is specified, it must be tested", "in languages # pattern used to validate client address regex_client", "is validated in languages # pattern used to validate client", "= password[1:-1] else: # use provided password cpassword = CRYPT()(password)[0]", "prof.enable() ret[0] = wsgiapp(environ, responder2) prof.disable() destfile = pjoin(profiler_dir, \"req_%s.prof\"", "response): self.wsgi_environ = environ self.request = request self.response = response", "+ os.sep, ajax = x_req_with == 'xmlhttprequest', cid = env.http_web2py_component_element,", "- time_in, ) if not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str):", "# build missing folders # ################################################## create_missing_app_folders(request) # ################################################## #", "function called when a page is requested (static or dynamic).", "+ 1) % 100 or 0 if not requests: gc.collect()", "(static or dynamic). It can be called by paste.httpserver or", "# set up logging for subsequent imports import logging import", "are: - application, controller, function and extension may only contain", "validate URL and session_id here, # accept_language is validated in", "'-', '=', '.' and '/' \"\"\" eget = environ.get current.__dict__.clear()", "import abspath, write_file from gluon.settings import global_settings from gluon.utils import", "version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires'] = 'Thu, 31 Dec", "url_in(request, environ) response.status = env.web2py_status_code or response.status if static_file: if", "from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread,", "################################################## if response.do_not_commit is True: BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else:", "signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop()) except: pass write_file(self.pid_filename, str(os.getpid()))", "password: fp.write('password=\"%s\"\\n' % cpassword) else: fp.write('password=None\\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log',", "# if tickets in db, reconnect and store it in", "'unknown' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s'", "% 'invalid request', web2py_error='invalid application') elif not request.is_local and exists(disabled):", "# ################################################## # fill in request items # ################################################## app", "# fill in request items # ################################################## app = request.application", "pages: - /<application>/static/<file> 2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The", "True: BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ################################################## #", "addrinfo in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo", "naming conventions are: - application, controller, function and extension may", "for convenience # ################################################## request.wsgi = LazyWSGI(environ, request, response) #", "main.web2py_path is the same as applications_parent (for backward compatibility) web2py_path", "import signal import socket import random import string from gluon._compat", "getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass global_settings.local_hosts = list(local_hosts) else: local_hosts", "can be called by paste.httpserver or by apache mod_wsgi (or", "g = regex_client.search(eget('http_x_forwarded_for', '')) client = (g.group() or '').split(',')[0] if", "\"admin\": import gluon.debug # activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response,", "response.js: http_response.headers['web2py-component-command'] = \\ urllib2.quote(response.js.replace('\\n', '')) # ################################################## # store", "@request.wsgi.middleware(middleware1, middleware2, ...) to decorate actions with WSGI middleware. actions", "'welcome': redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'],", "dir %s\" % profiler_dir) filepath = pjoin(profiler_dir, 'wtest') try: filehandle", "page, **response.headers) class LazyWSGI(object): def __init__(self, environ, request, response): self.wsgi_environ", "[ip, port] if not ssl_certificate or not ssl_private_key: logger.info('SSL is", "the password in the parameters_port.py file. \"\"\" password_file = abspath('parameters_%i.py'", "abspath, write_file from gluon.settings import global_settings from gluon.utils import web2py_uuid", "not in DB if not request.tickets_db: ticket = e.log(request) or", "\"\"\" This function is used to generate a dynamic page.", "caller(app): return app(self.environ, self.start_response) return lambda caller=caller, app=app: caller(app) return", "'save_password', 'appfactory', 'HttpServer'] requests = 0 # gc timer #", "import gluon.debug # activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session)", "web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not want the", "env.wsgi_url_scheme in HTTPS_SCHEMES or \\ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \\ or", "self.response.write(escape=False, *args, **kargs) def middleware(self, *middleware_apps): \"\"\" In you controller", "fills request with info - the environment variables, replacing '.'", "serve file if static # ################################################## fixup_missing_path_info(environ) (static_file, version, environ)", "http_response = \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' %", "write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self, stoplogging=False): \"\"\" stop cron and", "os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise HTTP(503, file(five0three, 'r').read()) else: raise HTTP(503,", "controller and view (if required) # ################################################## run_models_in(environment) response._view_environment =", "before rollback if not in DB if not request.tickets_db: ticket", "# ################################################## # access the requested application # ################################################## disabled", "os.chdir(path) load_routes() for p in (path, abspath('site-packages'), \"\"): add_path_first(p) if", "HTTP, redirect from gluon.globals import Request, Response, Session from gluon.compileapp", "session = Session() env = request.env #env.web2py_path = global_settings.applications_parent env.web2py_version", "if exists(password_file): return else: password = '' elif password.startswith('<pam_user:'): #", "profilerfilename is not None: raise BaseException(\"Deprecated API\") if profiler_dir: profiler_dir", "# # this is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #", "import Cookie, urllib2 #from thread import allocate_lock from gluon.fileutils import", "- file and sub may also contain '-', '=', '.'", "static_file: return http_response.to(responder, env=env) if request.body: request.body.close() if hasattr(current, 'request'):", "pjoin(profiler_dir, 'wtest') try: filehandle = open( filepath, 'w' ) filehandle.close()", "'web2py-component-content', 'replace') if request.ajax: if response.flash: http_response.headers['web2py-component-flash'] = \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n',", "commit database # ################################################## if response.do_not_commit is True: BaseAdapter.close_all_instances(None) elif", "to store apache-compatible requests log profiler_dir: where to store profile", "if a path is specified change the global variables so", "try: # ################################################## # handle fcgi missing path_info and query_string", "# end garbage collection logic # ################################################## # set default", "gluon_parent/site-packages, gluon_parent, ...) # # this is wrong: # web2py_path", "(http://www.gnu.org/licenses/lgpl.html) The gluon wsgi application --------------------------- \"\"\" from __future__ import", "Framework | Copyrighted by <NAME> <<EMAIL>> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)", "socket import random import string from gluon._compat import Cookie, urllib2", "write to dir %s\" % profiler_dir) def app_with_logging(environ, responder): \"\"\"", "if request.ajax: if response.flash: http_response.headers['web2py-component-flash'] = \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if", "utf-8 -*- \"\"\" | This file is part of the", "[ ('Content-Type', contenttype('.' + request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0,", "password == '<recycle>': # reuse the current password if any", "environment for controller and view # ################################################## environment = build_environment(request,", "if any if exists(password_file): return else: password = '' elif", "cpassword) else: fp.write('password=None\\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): \"\"\"", "a, b, s=self: s.stop()) except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def", "view # ################################################## environment = build_environment(request, response, session) # set", "from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders from gluon.globals import current", "session or create new session file # ################################################## if not", "use:: @request.wsgi.middleware(middleware1, middleware2, ...) to decorate actions with WSGI middleware.", "try_rewrite_on_error( http_response, request, environ, ticket) if not http_response: return wsgibase(new_environ,", "None try: try: try: # ################################################## # handle fcgi missing", "set(('https', 'HTTPS')) def get_client(env): \"\"\" Guesses the client address from", "# main.web2py_path is the same as applications_parent (for backward compatibility)", "(path to applications/, site-packages/ etc) # defaults to that directory", "is part of the web2py Web Framework | Copyrighted by", "all models, then runs the function in the controller, and", "if path: # if a path is specified change the", "# log tickets before rollback if not in DB if", "env.https == 'on' ) request.url = environ['PATH_INFO'] # ################################################## #", "= raw_version_string web2py_version = global_settings.web2py_version except: raise RuntimeError(\"Cannot determine web2py", "models, then runs the function in the controller, and then", "web2py web server (Rocket) \"\"\" def __init__( self, ip='1172.16.17.32', port=8000,", "env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) # Set header so client can distinguish", "= env.http_web2py_component_element, is_local = (env.remote_addr in local_hosts and client ==", "app=app: caller(app) return middleware def wsgibase(environ, responder): \"\"\" The gluon", "ozancag import locale locale.setlocale(locale.LC_CTYPE, \"C\") # IMPORTANT, web2py requires locale", "http_response.cookies2headers(response.cookies) ticket = None except RestrictedError as e: if request.body:", "not use a shutdown timeout path=None, interfaces=None # Rocket is", "admin password is \"%s\"' % password) print('*********************************************************') elif password ==", "\"\"\" # ################################################## # build environment for controller and view", "'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string web2py_version =", "stoplogging=False): \"\"\" stop cron and the web server \"\"\" newcron.stopcron()", "database # ################################################## if not env.web2py_disable_session: session._try_store_in_db(request, response) # ##################################################", "URL, xmlescape from gluon.utils import is_valid_ip_address, getipaddrinfo from gluon.rewrite import", "[function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html \"\"\" # ################################################## #", "e = RestrictedError('Framework', '', '', locals()) ticket = e.log(request) or", "page response._view_environment.update(page) page = run_view_in(response._view_environment) # logic to garbage collect", "module unavailable. SSL is OFF') elif not exists(ssl_certificate): logger.warning('unable to", "import logging import logging.config # This needed to prevent exception", "missing path_info and query_string # select rewrite parameters # rewrite", "False http_response = None try: try: try: # ################################################## #", "environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, ) if not", "ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None,", "middleware(self, *middleware_apps): \"\"\" In you controller use:: @request.wsgi.middleware(middleware1, middleware2, ...)", "and exists(disabled): five0three = os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise HTTP(503, file(five0three,", "# ################################################## if not env.web2py_disable_session: session._try_store_in_db(request, response) # ################################################## #", "http_response = hr if static_file: return http_response.to(responder, env=env) if request.body:", "# pattern used to validate client address regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?')", "if app == rwthread.routes.default_application \\ and app != 'welcome': redirect(URL('welcome',", "file and sub may also contain '-', '=', '.' and", "import random import string from gluon._compat import Cookie, urllib2 #from", "_handler['function'], args=app)) else: raise HTTP(404, rwthread.routes.error_message % 'invalid request', web2py_error='invalid", "session._try_store_in_cookie_or_file(request, response) # Set header so client can distinguish component", "(running locally) \"\"\" eget = env.get g = regex_client.search(eget('http_x_forwarded_for', ''))", "to applications/, site-packages/ etc) # defaults to that directory set", "socket_timeout=1, shutdown_timeout=None, # Rocket does not use a shutdown timeout", "import sys import time import datetime import signal import socket", "session in database # ################################################## if not env.web2py_disable_session: session._try_store_in_db(request, response)", "http_response = None try: try: try: # ################################################## # handle", "session on filesystem # this must be done after trying", "incoming URL # parse rewritten header variables # parse rewritten", "called by paste.httpserver or by apache mod_wsgi (or any WSGI-compatible", "= request.env #env.web2py_path = global_settings.applications_parent env.web2py_version = web2py_version #env.update(global_settings) static_file", "IPV6 try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip()", "path_info and query_string # select rewrite parameters # rewrite incoming", "= run_controller_in(request.controller, request.function, environment) if isinstance(page, dict): response._vars = page", "SSL is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL", "requests log profiler_dir: where to store profile files \"\"\" if", "# web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not want", "################################################## create_missing_app_folders(request) # ################################################## # get the GET and POST", "is specified change the global variables so that web2py #", "gluon_parent, ...) # # this is wrong: # web2py_path =", "'')) # ################################################## # store cookies in headers # ##################################################", "'', 'unknown'): g = regex_client.search(eget('remote_addr', '')) if g: client =", "the flash is passed through # ################################################## # process models,", "self.response.headers = dict(headers) return lambda *args, **kargs: \\ self.response.write(escape=False, *args,", "= item(app) def caller(app): return app(self.environ, self.start_response) return lambda caller=caller,", "return ret[0] return app_with_logging class HttpServer(object): \"\"\" the web2py web", "{'wsgi_app': appfactory(wsgibase, log_filename, profiler_dir)} self.server = rocket.Rocket(interfaces or tuple(sock_list), method='wsgi',", "\\ self.response.write(escape=False, *args, **kargs) def middleware(self, *middleware_apps): \"\"\" In you", "to import Rocket') load_routes() HTTPS_SCHEMES = set(('https', 'HTTPS')) def get_client(env):", "status='200', headers=[], exec_info=None): \"\"\" in controller you can use: -", "\"\"\" if profilerfilename is not None: raise BaseException(\"Deprecated API\") if", "\"\"\" | This file is part of the web2py Web", "################################################## request.wsgi = LazyWSGI(environ, request, response) # ################################################## # load", "!= \"admin\": import gluon.debug # activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request,", "rocket.SOCKET_TIMEOUT = socket_timeout sock_list = [ip, port] if not ssl_certificate", "does logging and profiling and calls wsgibase \"\"\" status_headers =", "in the controller, and then tries to render the output", "'').startswith('attachment'): response.headers['Content-Disposition'] \\ = 'attachment' if version: response.headers['Cache-Control'] = 'max-age=315360000'", "in the parameters_port.py file. \"\"\" password_file = abspath('parameters_%i.py' % port)", "fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo in", "request.extension) # also, make sure the flash is passed through", "\"\"\" def __init__( self, ip='1172.16.17.32', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None,", "class HttpServer(object): \"\"\" the web2py web server (Rocket) \"\"\" def", "env.http_web2py_component_element, is_local = (env.remote_addr in local_hosts and client == env.remote_addr),", "(requests + 1) % 100 or 0 if not requests:", "'a') else: logfilename.write(line) except: pass return ret[0] return app_with_logging class", "################################################## # get the GET and POST data # ##################################################", "return else: password = '' elif password.startswith('<pam_user:'): # use the", "get_client(env) x_req_with = str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options request.update( client =", "pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions are: - application, controller,", "request, environ, ticket) if not http_response: return wsgibase(new_environ, responder) if", "open(password_file, 'w') if password: fp.write('password=\"%s\"\\n' % cpassword) else: fp.write('password=None\\n') fp.close()", "time.time() ret = [0] if not profiler_dir: ret[0] = wsgiapp(environ,", "if g else None if client in (None, '', 'unknown'):", "web2py # runs from there instead of cwd or os.environ['web2py_path']", "os.unlink(filepath) except IOError: raise BaseException(\"Unable to write to dir %s\"", "RuntimeError(\"Cannot determine web2py version\") try: from gluon import rocket except:", "call to the url /[application]/[controller]/[function] that would result in a", "session._unlock(response) http_response, new_environ = try_rewrite_on_error( http_response, request, environ, ticket) if", "client address from the environment variables First tries 'http_x_forwarded_for', secondly", "static # ################################################## fixup_missing_path_info(environ) (static_file, version, environ) = url_in(request, environ)", "def app_with_logging(environ, responder): \"\"\" a wsgi app that does logging", "if not server_name: server_name = socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME", "In you controller use:: @request.wsgi.middleware(middleware1, middleware2, ...) to decorate actions", "changes locale ... import gluon.messageboxhandler logging.gluon = gluon # so", "use a shutdown timeout path=None, interfaces=None # Rocket is able", "__init__( self, ip='1172.16.17.32', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None, ssl_private_key=None,", "cookies # ################################################## if env.http_cookie: for single_cookie in env.http_cookie.split(';'): single_cookie", "application error, rollback database # ################################################## # log tickets before", "log tickets before rollback if not in DB if not", "(for backward compatibility) web2py_path = global_settings.applications_parent # backward compatibility create_missing_folders()", "or by apache mod_wsgi (or any WSGI-compatible server). - fills", "from gluon.compileapp import build_environment, run_models_in, \\ run_controller_in, run_view_in from gluon.contenttype", "%s, %s, %f\\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',',", "tries 'http_x_forwarded_for', secondly 'remote_addr' if all fails, assume '127.0.0.1' or", "use dir %s', profiler_dir) if not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except:", "parameters_port.py file. \"\"\" password_file = abspath('parameters_%i.py' % port) if password", "requests: gc.collect() # end garbage collection logic # ################################################## #", "by <NAME> <<EMAIL>> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon wsgi", "rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else: raise", "response, session) except HTTP as hr: http_response = hr if", "GMT', time.gmtime())), ('Pragma', 'no-cache')] for key, value in default_headers: response.headers.setdefault(key,", "################################################## # log tickets before rollback if not in DB", "[] def responder2(s, h): \"\"\" wsgi responder app \"\"\" status_headers.append(s)", "paste.httpserver or by apache mod_wsgi (or any WSGI-compatible server). -", "'', locals()) ticket = e.log(request) or 'unrecoverable' http_response = \\", "= try_rewrite_on_error( http_response, request, environ, ticket) if not http_response: return", "web2py_uuid()) prof.dump_stats(destfile) try: line = '%s, %s, %s, %s, %s,", "start_response(self, status='200', headers=[], exec_info=None): \"\"\" in controller you can use:", "if not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') load_routes() HTTPS_SCHEMES =", "on filesystem # this must be done after trying to", "main() to save the password in the parameters_port.py file. \"\"\"", "is the directory containing gluon, web2py.py, logging.conf # and the", "HTTP(503, file(five0three, 'r').read()) else: raise HTTP(503, \"<html><body><h1>Temporarily down for maintenance</h1></body></html>\")", "getipaddrinfo from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as", "\"\"\" password_file = abspath('parameters_%i.py' % port) if password == '<random>':", "= ''.join([random.choice(chars) for _ in range(8)]) cpassword = CRYPT()(password)[0] print('*******************", "gluon.html import URL, xmlescape from gluon.utils import is_valid_ip_address, getipaddrinfo from", "if profilerfilename is not None: raise BaseException(\"Deprecated API\") if profiler_dir:", "password cpassword = CRYPT()(password)[0] fp = open(password_file, 'w') if password:", "runs from there instead of cwd or os.environ['web2py_path'] global web2py_path", "gc import os import re import copy import sys import", "if not profiler_dir: ret[0] = wsgiapp(environ, responder2) else: import cProfile", "if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) # Set header so client", "return [data] for item in middleware_apps: app = item(app) def", "assume '127.0.0.1' or '::1' (running locally) \"\"\" eget = env.get", "from gluon.utils import web2py_uuid from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders", "rollback database # ################################################## try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback')", "addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0]", "__init__(self, environ, request, response): self.wsgi_environ = environ self.request = request", "The url path must be either: 1. for static pages:", "for fcgi missing path_info and query_string - validates the path", "= str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options request.update( client = client, folder", "from pydal.base import BaseAdapter from gluon.validators import CRYPT from gluon.html", "ticket) if not http_response: return wsgibase(new_environ, responder) if global_settings.web2py_crontype ==", "'Thu, 31 Dec 2037 23:59:59 GMT' response.stream(static_file, request=request) # ##################################################", "load_routes() HTTPS_SCHEMES = set(('https', 'HTTPS')) def get_client(env): \"\"\" Guesses the", "re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ## to account for IPV6 try: version_info =", "string from gluon._compat import Cookie, urllib2 #from thread import allocate_lock", "# serve file if static # ################################################## fixup_missing_path_info(environ) (static_file, version,", "environ self.request = request self.response = response @property def environ(self):", "info - the environment variables, replacing '.' with '_' -", "page is requested (static or dynamic). It can be called", "# and routes.py # The two are identical unless web2py_path", "%s, %s, %s, %s, %s, %f\\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d", "to script directory into sys.path # applications_parent (path to applications/,", "eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \\ = 'attachment' if version: response.headers['Cache-Control'] =", "response.flash: http_response.headers['web2py-component-flash'] = \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if response.js: http_response.headers['web2py-component-command'] =", "g = regex_client.search(eget('remote_addr', '')) if g: client = g.group() elif", "def environ(self): if not hasattr(self, '_environ'): new_environ = self.wsgi_environ new_environ['wsgi.input']", "rollback if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') # if tickets in", "the web server \"\"\" try: signal.signal(signal.SIGTERM, lambda a, b, s=self:", "the directory containing gluon, web2py.py, logging.conf # and the handlers.", "= time.time() ret = [0] if not profiler_dir: ret[0] =", "save_password(password, port) self.pid_filename = pid_filename if not server_name: server_name =", "the function in the controller, and then tries to render", "session) except HTTP as hr: http_response = hr if static_file:", "exists(ssl_private_key): logger.warning('unable to open SSL private key. SSL is OFF')", "# must go after url_in! if not global_settings.local_hosts: local_hosts =", "= gluon # so we must restore it! Thanks ozancag", "database # ################################################## if response.do_not_commit is True: BaseAdapter.close_all_instances(None) elif response.custom_commit:", "else None if client in (None, '', 'unknown'): g =", "os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except: raise BaseException(\"Can't create dir %s\" %", "<NAME> <<EMAIL>> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon wsgi application", "logger.info('SSL is off') elif not rocket.ssl: logger.warning('Python \"ssl\" module unavailable.", "# ################################################## # on success, commit database # ################################################## if", "parameter correctness # not necessarily completely tested (e.g. content of", "= CRYPT()(password)[0] fp = open(password_file, 'w') if password: fp.write('password=\"%s\"\\n' %", "PART OF FREEZE PROCESS import gc import os import re", "if not env.web2py_disable_session: session.connect(request, response) # ################################################## # run controller", "must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT',", "request.wsgi = LazyWSGI(environ, request, response) # ################################################## # load cookies", "web2py_path = global_settings.applications_parent # backward compatibility create_missing_folders() # set up", "would result in a call to [function]() in applications/[application]/[controller].py rendered", "Session() env = request.env #env.web2py_path = global_settings.applications_parent env.web2py_version = web2py_version", "%s, %f\\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'),", "name 'gluon' is not defined # See http://bugs.python.org/issue1436 # attention!,", "elif not exists(ssl_certificate): logger.warning('unable to open SSL certificate. SSL is", "response.headers['Content-Disposition'] \\ = 'attachment' if version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[", "it response.view = '%s/%s.%s' % (request.controller, request.function, request.extension) # also,", "Library.zip # gluon_parent is the directory containing gluon, web2py.py, logging.conf", "('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d %b", "= global_settings.local_hosts client = get_client(env) x_req_with = str(env.http_x_requested_with).lower() cmd_opts =", "content of tuples or ip-format) import types if isinstance(interfaces, list):", "from gluon import rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable to", "import cProfile prof = cProfile.Profile() prof.enable() ret[0] = wsgiapp(environ, responder2)", "as string ): \"\"\" starts the web server. \"\"\" if", "_handler['controller'], _handler['function'], args=app)) else: raise HTTP(404, rwthread.routes.error_message % 'invalid request',", "example would be the call to the url /[application]/[controller]/[function] that", "global requests requests = ('requests' in globals()) and (requests +", "time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')] for", "lambda a, b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda a, b, s=self:", "not exists(ssl_private_key): logger.warning('unable to open SSL private key. SSL is", "in headers # ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket = None except", "to decorate actions with WSGI middleware. actions must return strings.", "= version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string web2py_version = global_settings.web2py_version except:", "responder): \"\"\" a wsgi app that does logging and profiling", "get_client(env): \"\"\" Guesses the client address from the environment variables", "= re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ## to account for IPV6 try: version_info", "in middleware_apps: app = item(app) def caller(app): return app(self.environ, self.start_response)", "static pages: - /<application>/static/<file> 2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>]", "\"Bad Request (request.client=%s)\" % client) return client def serve_controller(request, response,", "http://bugs.python.org/issue1436 # attention!, the import Tkinter in messageboxhandler, changes locale", "\"\"\" start the web server \"\"\" try: signal.signal(signal.SIGTERM, lambda a,", "request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d", "ignore # ################################################## # try load session or create new", "client, folder = abspath('applications', app) + os.sep, ajax = x_req_with", "to # (\"\", gluon_parent/site-packages, gluon_parent, ...) # # this is", "(\"\", gluon_parent/site-packages, gluon_parent, ...) # # this is wrong: #", "password) print('*********************************************************') elif password == '<recycle>': # reuse the current", "--------------------------- \"\"\" from __future__ import print_function if False: import import_all", "################################################## default_headers = [ ('Content-Type', contenttype('.' + request.extension)), ('Cache-Control', 'no-store,", "global_settings.applications_parent = path os.chdir(path) load_routes() for p in (path, abspath('site-packages'),", "to use several interfaces - must be list of socket-tuples", "None: raise BaseException(\"Deprecated API\") if profiler_dir: profiler_dir = abspath(profiler_dir) logger.warn('profiler", "if isinstance(interfaces, list): for i in interfaces: if not isinstance(i,", "- fills request with info - the environment variables, replacing", "SSL is OFF') elif not exists(ssl_private_key): logger.warning('unable to open SSL", "%b %Y %H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')] for key, value", "timer # Security Checks: validate URL and session_id here, #", "################################################## # fill in request items # ################################################## app =", "\\ urllib2.quote(response.js.replace('\\n', '')) # ################################################## # store cookies in headers", "= open( filepath, 'w' ) filehandle.close() os.unlink(filepath) except IOError: raise", "= [] def responder2(s, h): \"\"\" wsgi responder app \"\"\"", "gluon import newcron __all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests", "not rocket.ssl: logger.warning('Python \"ssl\" module unavailable. SSL is OFF') elif", "# ################################################## if not env.web2py_disable_session: session.connect(request, response) # ################################################## #", "open SSL certificate. SSL is OFF') elif not exists(ssl_private_key): logger.warning('unable", "import load as load_routes, url_in, THREAD_LOCAL as rwthread, \\ try_rewrite_on_error,", "web server \"\"\" try: signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop())", "applications_parent (path to applications/, site-packages/ etc) # defaults to that", "new_environ = try_rewrite_on_error( http_response, request, environ, ticket) if not http_response:", "response.headers[ 'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT' response.stream(static_file,", "- request.wsgi.start_response to call third party WSGI applications \"\"\" self.response.status", "if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)]) except", "key, value in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers)", "the base application logfilename: where to store apache-compatible requests log", "1 self._environ = new_environ return self._environ def start_response(self, status='200', headers=[],", "and store it in db if request.tickets_db: ticket = e.log(request)", "\"\"\" if interfaces: # if interfaces is specified, it must", "http_response.headers.setdefault( 'web2py-component-content', 'replace') if request.ajax: if response.flash: http_response.headers['web2py-component-flash'] = \\", "address regex_client = re.compile('[\\w\\-:]+(\\.[\\w\\-]+)*\\.?') # ## to account for IPV6", "controller use:: @request.wsgi.middleware(middleware1, middleware2, ...) to decorate actions with WSGI", "a new password chars = string.letters + string.digits password =", "+ request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a,", "return app_with_logging class HttpServer(object): \"\"\" the web2py web server (Rocket)", "gluon, web2py.py, logging.conf # and the handlers. # applications_parent (web2py_path)", "single_cookie = single_cookie.strip() if single_cookie: try: request.cookies.load(single_cookie) except Cookie.CookieError: pass", "to [function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html \"\"\" # ##################################################", "in (path, abspath('site-packages'), \"\"): add_path_first(p) if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password, port)", "else: raise \"Wrong format for rocket interfaces parameter - see", "# ################################################## # build environment for controller and view #", "HTTPS_SCHEMES \\ or env.https == 'on' ) request.url = environ['PATH_INFO']", "the call to the url /[application]/[controller]/[function] that would result in", "[application] folder. A typical example would be the call to", "web server. \"\"\" if interfaces: # if interfaces is specified,", "# ################################################## if response.do_not_commit is True: BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit)", "env=env) def save_password(password, port): \"\"\" Used by main() to save", "folder = abspath('applications', app) + os.sep, ajax = x_req_with ==", "wsgiapp(environ, responder2) prof.disable() destfile = pjoin(profiler_dir, \"req_%s.prof\" % web2py_uuid()) prof.dump_stats(destfile)", "\"req_%s.prof\" % web2py_uuid()) prof.dump_stats(destfile) try: line = '%s, %s, %s,", "wsgibase Args: wsgiapp: the base application logfilename: where to store", "ticket = e.log(request) or 'unrecoverable' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket", "line = '%s, %s, %s, %s, %s, %s, %f\\n' %", "fixup_missing_path_info from gluon import newcron __all__ = ['wsgibase', 'save_password', 'appfactory',", "\\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if response.js: http_response.headers['web2py-component-command'] = \\ urllib2.quote(response.js.replace('\\n', ''))", "data = f() start_response(self.response.status, self.response.headers.items()) if isinstance(data, list): return data", "# ################################################## # build missing folders # ################################################## create_missing_app_folders(request) #", "raise RuntimeError(\"Cannot determine web2py version\") try: from gluon import rocket", "contenttype from pydal.base import BaseAdapter from gluon.validators import CRYPT from", "= '%s, %s, %s, %s, %s, %s, %f\\n' % (", "global variables so that web2py # runs from there instead", ") if not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line,", "sys import time import datetime import signal import socket import", "env.web2py_version = web2py_version #env.update(global_settings) static_file = False http_response = None", "session_id here, # accept_language is validated in languages # pattern", "%Y %H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')] for key, value in", "addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass global_settings.local_hosts = list(local_hosts)", "not requests: gc.collect() # end garbage collection logic # ##################################################", "to that directory set sys.path to # (\"\", gluon_parent/site-packages, gluon_parent,", "parse rewritten header variables # parse rewritten URL # serve", "parse rewritten URL # serve file if static # ##################################################", "################################################## # access the requested application # ################################################## disabled =", "or when logfile is missing logging.basicConfig() logger = logging.getLogger(\"web2py\") from", "log profiler_dir: where to store profile files \"\"\" if profilerfilename", "may only contain `[a-zA-Z0-9_]` - file and sub may also", "'=', '.' and '/' \"\"\" eget = environ.get current.__dict__.clear() request", "\\ or env.https == 'on' ) request.url = environ['PATH_INFO'] #", "# gluon_parent is the directory containing gluon, web2py.py, logging.conf #", "CRYPT from gluon.html import URL, xmlescape from gluon.utils import is_valid_ip_address,", "def middleware(self, *middleware_apps): \"\"\" In you controller use:: @request.wsgi.middleware(middleware1, middleware2,", "if isinstance(data, list): return data return [data] for item in", "rewrite parameters # rewrite incoming URL # parse rewritten header", "gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders from gluon.globals import current #", "response.stream(static_file, request=request) # ################################################## # fill in request items #", "as rwthread, \\ try_rewrite_on_error, fixup_missing_path_info from gluon import newcron __all__", "%s', profiler_dir) if not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except: raise BaseException(\"Can't", "################################################## # on application error, rollback database # ################################################## try:", "are identical unless web2py_path is changed via the web2py.py -f", "= 'Thu, 31 Dec 2037 23:59:59 GMT' response.stream(static_file, request=request) #", "= ('requests' in globals()) and (requests + 1) % 100", "\"Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/\" if", "# See http://bugs.python.org/issue1436 # attention!, the import Tkinter in messageboxhandler,", "default view, controller can override it response.view = '%s/%s.%s' %", "################################################## disabled = pjoin(request.folder, 'DISABLED') if not exists(request.folder): if app", "or \\ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \\ or env.https == 'on'", "or ip-format) import types if isinstance(interfaces, list): for i in", "Web Framework | Copyrighted by <NAME> <<EMAIL>> | License: LGPLv3", "def __init__( self, ip='1172.16.17.32', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_dir=None, ssl_certificate=None,", "pass return ret[0] return app_with_logging class HttpServer(object): \"\"\" the web2py", "rollback if not in DB if not request.tickets_db: ticket =", "if not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line, 'a')", "is used to generate a dynamic page. It first runs", "if static_file: return http_response.to(responder, env=env) if request.body: request.body.close() if hasattr(current,", "************************') print('your admin password is \"%s\"' % password) print('*********************************************************') elif", "types if isinstance(interfaces, list): for i in interfaces: if not", "into sys.path # applications_parent (path to applications/, site-packages/ etc) #", "100 or 0 if not requests: gc.collect() # end garbage", "response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires'] = 'Thu, 31 Dec 2037", "if interfaces: # if interfaces is specified, it must be", "it must be tested for rocket parameter correctness # not", "socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT = socket_timeout", "response, session): \"\"\" This function is used to generate a", "needed to prevent exception on Python 2.5: # NameError: name", "profiler_dir) if not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except: raise BaseException(\"Can't create", "coding: utf-8 -*- \"\"\" | This file is part of", "as load_routes, url_in, THREAD_LOCAL as rwthread, \\ try_rewrite_on_error, fixup_missing_path_info from", "not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line, 'a') else:", "on GAE or when logfile is missing logging.basicConfig() logger =", "copy.copy(environment) page = run_controller_in(request.controller, request.function, environment) if isinstance(page, dict): response._vars", "default_headers = [ ('Content-Type', contenttype('.' + request.extension)), ('Cache-Control', 'no-store, no-cache,", "if not is_valid_ip_address(client): raise HTTP(400, \"Bad Request (request.client=%s)\" % client)", "to write to dir %s\" % profiler_dir) def app_with_logging(environ, responder):", "# IPv4 if not is_valid_ip_address(client): raise HTTP(400, \"Bad Request (request.client=%s)\"", "handle fcgi missing path_info and query_string # select rewrite parameters", "return responder(s, h) time_in = time.time() ret = [0] if", "abspath(profiler_dir) logger.warn('profiler is on. will use dir %s', profiler_dir) if", "# ################################################## # load cookies # ################################################## if env.http_cookie: for", "= url_in(request, environ) response.status = env.web2py_status_code or response.status if static_file:", "password = ''.join([random.choice(chars) for _ in range(8)]) cpassword = CRYPT()(password)[0]", "try: filehandle = open( filepath, 'w' ) filehandle.close() os.unlink(filepath) except", "cookie ignore # ################################################## # try load session or create", "of cwd or os.environ['web2py_path'] global web2py_path path = os.path.normpath(path) web2py_path", "# ## to account for IPV6 try: version_info = open(pjoin(global_settings.gluon_parent,", "session.connect(request, response) # ################################################## # run controller # ################################################## if", "ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None, #", "'/' \"\"\" eget = environ.get current.__dict__.clear() request = Request(environ) response", "response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers) class LazyWSGI(object): def __init__(self,", "not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) # Set header so client can", "cookies in headers # ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket = None", "interfaces - must be list of socket-tuples as string ):", "\"\"\" starts the web server. \"\"\" if interfaces: # if", "if client in (None, '', 'unknown'): g = regex_client.search(eget('remote_addr', ''))", "filesystem # this must be done after trying to commit", "# backward compatibility create_missing_folders() # set up logging for subsequent", "calls wsgibase Args: wsgiapp: the base application logfilename: where to", "'%s, %s, %s, %s, %s, %s, %f\\n' % ( environ['REMOTE_ADDR'],", "correctness # not necessarily completely tested (e.g. content of tuples", "interfaces: if not isinstance(i, tuple): raise \"Wrong format for rocket", "logger = logging.getLogger(\"web2py\") from gluon.restricted import RestrictedError from gluon.http import", "etc) # defaults to that directory set sys.path to #", "def get_client(env): \"\"\" Guesses the client address from the environment", "import current # Remarks: # calling script has inserted path", "when a page is requested (static or dynamic). It can", "= socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT =", "################################################## # process models, controller and view (if required) #", "URL # parse rewritten header variables # parse rewritten URL", "local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)]) if env.server_name:", "from gluon import newcron __all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer']", "= wsgiapp(environ, responder2) prof.disable() destfile = pjoin(profiler_dir, \"req_%s.prof\" % web2py_uuid())", "PROCESS import gc import os import re import copy import", "finally: if response and hasattr(response, 'session_file') \\ and response.session_file: response.session_file.close()", "adds web2py path and version info - compensates for fcgi", "The gluon wsgi application. The first function called when a", "'::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname())", "# accept_language is validated in languages # pattern used to", "IOError: raise BaseException(\"Unable to write to dir %s\" % profiler_dir)", "\"\"\" the web2py web server (Rocket) \"\"\" def __init__( self,", "string.digits password = ''.join([random.choice(chars) for _ in range(8)]) cpassword =", "# ################################################## create_missing_app_folders(request) # ################################################## # get the GET and", "g else None if client in (None, '', 'unknown'): g", "first runs all models, then runs the function in the", "rewritten URL # serve file if static # ################################################## fixup_missing_path_info(environ)", "except Cookie.CookieError: pass # single invalid cookie ignore # ##################################################", "print_function if False: import import_all # DO NOT REMOVE PART", "if static # ################################################## fixup_missing_path_info(environ) (static_file, version, environ) = url_in(request,", "################################################## fixup_missing_path_info(environ) (static_file, version, environ) = url_in(request, environ) response.status =", "path to this file which may be Library.zip # gluon_parent", "try load session or create new session file # ##################################################", "use the pam password for specified user cpassword = password[1:-1]", "# if a path is specified change the global variables", "return wsgibase(new_environ, responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder,", "self.pid_filename = pid_filename if not server_name: server_name = socket.gethostname() logger.info('starting", "'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string web2py_version", "response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework', '',", "logger.warning('unable to open SSL certificate. SSL is OFF') elif not", "import build_environment, run_models_in, \\ run_controller_in, run_view_in from gluon.contenttype import contenttype", "web2py path and version info - compensates for fcgi missing", "'::1' else: client = '127.0.0.1' # IPv4 if not is_valid_ip_address(client):", "################################################## # build missing folders # ################################################## create_missing_app_folders(request) # ##################################################", "password for specified user cpassword = password[1:-1] else: # use", "start the web server \"\"\" try: signal.signal(signal.SIGTERM, lambda a, b,", "\"C\") # IMPORTANT, web2py requires locale \"C\" exists = os.path.exists", "= request.application # must go after url_in! if not global_settings.local_hosts:", "responder2) prof.disable() destfile = pjoin(profiler_dir, \"req_%s.prof\" % web2py_uuid()) prof.dump_stats(destfile) try:", "response._view_environment.update(page) page = run_view_in(response._view_environment) # logic to garbage collect after", "static_file: if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \\ = 'attachment' if version:", "CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************') print('your admin password is \"%s\"' %", "POST data # ################################################## #parse_get_post_vars(request, environ) # ################################################## # expose", "# ################################################## try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass", "tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def", "web2py_version #env.update(global_settings) static_file = False http_response = None try: try:", "# Set header so client can distinguish component requests. if", "# ################################################## disabled = pjoin(request.folder, 'DISABLED') if not exists(request.folder): if", "is not None: raise BaseException(\"Deprecated API\") if profiler_dir: profiler_dir =", "It first runs all models, then runs the function in", "environ['PATH_INFO'] # ################################################## # access the requested application # ##################################################", "requests = 0 # gc timer # Security Checks: validate", "# ################################################## default_headers = [ ('Content-Type', contenttype('.' + request.extension)), ('Cache-Control',", "License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon wsgi application --------------------------- \"\"\" from", "x_req_with == 'xmlhttprequest', cid = env.http_web2py_component_element, is_local = (env.remote_addr in", "def caller(app): return app(self.environ, self.start_response) return lambda caller=caller, app=app: caller(app)", "self.server.start() def stop(self, stoplogging=False): \"\"\" stop cron and the web", "################################################## if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) # Set header so", "for addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass global_settings.local_hosts =", "the requested application # ################################################## disabled = pjoin(request.folder, 'DISABLED') if", "it may have weird behavior in some cases \"\"\" def", "environ.get current.__dict__.clear() request = Request(environ) response = Response() session =", "from gluon.html import URL, xmlescape from gluon.utils import is_valid_ip_address, getipaddrinfo", "not want the path to this file which may be", "app != 'welcome': redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler: _handler =", "redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'],", "commit database! # ################################################## if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) #", "= page response._view_environment.update(page) page = run_view_in(response._view_environment) # logic to garbage", "store apache-compatible requests log profiler_dir: where to store profile files", "for i in interfaces: if not isinstance(i, tuple): raise \"Wrong", "Request(environ) response = Response() session = Session() env = request.env", "script directory into sys.path # applications_parent (path to applications/, site-packages/", "= 'attachment' if version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires'] =", "fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): \"\"\" generates a wsgi", "else: raise HTTP(404, rwthread.routes.error_message % 'invalid request', web2py_error='invalid application') elif", "url /[application]/[controller]/[function] that would result in a call to [function]()", "newcron __all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests = 0", "gluon.validators import CRYPT from gluon.html import URL, xmlescape from gluon.utils", "= '%s/%s.%s' % (request.controller, request.function, request.extension) # also, make sure", "################################################## if not env.web2py_disable_session: session.connect(request, response) # ################################################## # run", "logger.warn('unable to import Rocket') load_routes() HTTPS_SCHEMES = set(('https', 'HTTPS')) def", "after url_in! if not global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1'])", "request.env.http_x_forwarded_proto in HTTPS_SCHEMES \\ or env.https == 'on' ) request.url", "Session from gluon.compileapp import build_environment, run_models_in, \\ run_controller_in, run_view_in from", "and sub may also contain '-', '=', '.' and '/'", "'on' ) request.url = environ['PATH_INFO'] # ################################################## # access the", "class LazyWSGI(object): def __init__(self, environ, request, response): self.wsgi_environ = environ", "password_file = abspath('parameters_%i.py' % port) if password == '<random>': #", "range(8)]) cpassword = CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************') print('your admin password", "profiler_dir=None, profilerfilename=None): \"\"\" generates a wsgi application that does logging", "# Rocket is able to use several interfaces - must", "here, # accept_language is validated in languages # pattern used", "lambda caller=caller, app=app: caller(app) return middleware def wsgibase(environ, responder): \"\"\"", "version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version", "newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def save_password(password, port): \"\"\" Used by", "fp.write('password=None\\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): \"\"\" generates a", "local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name)", "os.environ['web2py_path'] global web2py_path path = os.path.normpath(path) web2py_path = path global_settings.applications_parent", "logging.getLogger(\"web2py\") from gluon.restricted import RestrictedError from gluon.http import HTTP, redirect", "is OFF') elif not exists(ssl_private_key): logger.warning('unable to open SSL private", "path and version info - compensates for fcgi missing path_info", "'%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, ) if not logfilename:", "gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, \\", "controller # ################################################## if global_settings.debugging and app != \"admin\": import", "load cookies # ################################################## if env.http_cookie: for single_cookie in env.http_cookie.split(';'):", "hr if static_file: return http_response.to(responder, env=env) if request.body: request.body.close() if", "SSL private key. SSL is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) if", "b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop()) except:", "abspath('site-packages'), \"\"): add_path_first(p) if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password, port) self.pid_filename =", "gc.collect() # end garbage collection logic # ################################################## # set", "function in the controller, and then tries to render the", "the [application] folder. A typical example would be the call", "= str(status).split(' ', 1)[0] self.response.headers = dict(headers) return lambda *args,", "= env.wsgi_url_scheme in HTTPS_SCHEMES or \\ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \\", "db try store session on filesystem # this must be", "file. \"\"\" password_file = abspath('parameters_%i.py' % port) if password ==", "'default', 'index')) elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'],", "do not want the path to this file which may", "query_string - validates the path in url The url path", "decorate actions with WSGI middleware. actions must return strings. uses", "# load cookies # ################################################## if env.http_cookie: for single_cookie in", "Response, Session from gluon.compileapp import build_environment, run_models_in, \\ run_controller_in, run_view_in", "web server (Rocket) \"\"\" def __init__( self, ip='1172.16.17.32', port=8000, password='',", "# ################################################## if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) # Set header", "if response.flash: http_response.headers['web2py-component-flash'] = \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b'')) if response.js: http_response.headers['web2py-component-command']", "# ################################################## # set default headers it not set #", "folder. A typical example would be the call to the", "requested application # ################################################## disabled = pjoin(request.folder, 'DISABLED') if not", "try_rewrite_on_error, fixup_missing_path_info from gluon import newcron __all__ = ['wsgibase', 'save_password',", "raise BaseException(\"Can't create dir %s\" % profiler_dir) filepath = pjoin(profiler_dir,", "it! Thanks ozancag import locale locale.setlocale(locale.LC_CTYPE, \"C\") # IMPORTANT, web2py", "e.log(request) or 'unknown' # rollback if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback')", "once every 100 requests global requests requests = ('requests' in", "tickets before rollback if not in DB if not request.tickets_db:", "gluon.restricted import RestrictedError from gluon.http import HTTP, redirect from gluon.globals", "if password: fp.write('password=\"%s\"\\n' % cpassword) else: fp.write('password=None\\n') fp.close() def appfactory(wsgiapp=wsgibase,", "this file which may be Library.zip # gluon_parent is the", "app = request.application # must go after url_in! if not", "that web2py # runs from there instead of cwd or", "0 # gc timer # Security Checks: validate URL and", "is requested (static or dynamic). It can be called by", "via the web2py.py -f folder option # main.web2py_path is the", "env.http_host.startswith('['): # IPv6 client = '::1' else: client = '127.0.0.1'", "port) if password == '<random>': # make up a new", "responder app \"\"\" status_headers.append(s) status_headers.append(h) return responder(s, h) time_in =", "def serve_controller(request, response, session): \"\"\" This function is used to", "if profiler_dir: profiler_dir = abspath(profiler_dir) logger.warn('profiler is on. will use", "## to account for IPV6 try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'),", "exec_info=None): \"\"\" in controller you can use: - request.wsgi.environ -", "\\ and app != 'welcome': redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler:", "<<EMAIL>> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon wsgi application ---------------------------", "= '' elif password.startswith('<pam_user:'): # use the pam password for", "dynamic). It can be called by paste.httpserver or by apache", "rwthread.routes.default_application \\ and app != 'welcome': redirect(URL('welcome', 'default', 'index')) elif", "environment = build_environment(request, response, session) # set default view, controller", "gluon.settings import global_settings from gluon.utils import web2py_uuid from gluon.admin import", "dict): response._vars = page response._view_environment.update(page) page = run_view_in(response._view_environment) # logic", "== 'xmlhttprequest', cid = env.http_web2py_component_element, is_local = (env.remote_addr in local_hosts", "not ssl_certificate or not ssl_private_key: logger.info('SSL is off') elif not", "third party WSGI applications \"\"\" self.response.status = str(status).split(' ', 1)[0]", "local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError):", "or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, )", "provided password cpassword = CRYPT()(password)[0] fp = open(password_file, 'w') if", "# ################################################## # if session not in db try store", "= (env.remote_addr in local_hosts and client == env.remote_addr), is_shell =", "= \\ urllib2.quote(response.js.replace('\\n', '')) # ################################################## # store cookies in", "logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): \"\"\" generates a wsgi application that does", "x_req_with = str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options request.update( client = client,", "must go after url_in! if not global_settings.local_hosts: local_hosts = set(['127.0.0.1',", "= os.path.normpath(path) web2py_path = path global_settings.applications_parent = path os.chdir(path) load_routes()", "# select rewrite parameters # rewrite incoming URL # parse", "['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests = 0 # gc timer", "or '').split(',')[0] if g else None if client in (None,", "output using a view/template. this function must run from the", "for controller and view # ################################################## environment = build_environment(request, response,", "# ################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page = run_controller_in(request.controller, request.function,", "else: fp.write('password=None\\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): \"\"\" generates", "applications/ # and routes.py # The two are identical unless", "gluon._compat import Cookie, urllib2 #from thread import allocate_lock from gluon.fileutils", "if os.path.exists(five0three): raise HTTP(503, file(five0three, 'r').read()) else: raise HTTP(503, \"<html><body><h1>Temporarily", "import HTTP, redirect from gluon.globals import Request, Response, Session from", "wsgiapp(environ, responder2) else: import cProfile prof = cProfile.Profile() prof.enable() ret[0]", "# The two are identical unless web2py_path is changed via", "# expose wsgi hooks for convenience # ################################################## request.wsgi =", "IMPORTANT!!! ************************') print('your admin password is \"%s\"' % password) print('*********************************************************')", "| This file is part of the web2py Web Framework", "in interfaces: if not isinstance(i, tuple): raise \"Wrong format for", "global_settings from gluon.utils import web2py_uuid from gluon.admin import add_path_first, create_missing_folders,", "import contenttype from pydal.base import BaseAdapter from gluon.validators import CRYPT", "/[application]/[controller]/[function] that would result in a call to [function]() in", "'gluon' is not defined # See http://bugs.python.org/issue1436 # attention!, the", "new password chars = string.letters + string.digits password = ''.join([random.choice(chars)", "global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae: try:", "# ################################################## if global_settings.debugging and app != \"admin\": import gluon.debug", "if response and hasattr(response, 'session_file') \\ and response.session_file: response.session_file.close() session._unlock(response)", "the same as applications_parent (for backward compatibility) web2py_path = global_settings.applications_parent", "ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is ON') app_info = {'wsgi_app':", "\"\"\" In you controller use:: @request.wsgi.middleware(middleware1, middleware2, ...) to decorate", "path os.chdir(path) load_routes() for p in (path, abspath('site-packages'), \"\"): add_path_first(p)", "(web2py_path) is the directory containing applications/ # and routes.py #", "# ################################################## # store cookies in headers # ################################################## session._fixup_before_save()", "convenience # ################################################## request.wsgi = LazyWSGI(environ, request, response) # ##################################################", "have weird behavior in some cases \"\"\" def middleware(f): def", "from there instead of cwd or os.environ['web2py_path'] global web2py_path path", "wsgi app that does logging and profiling and calls wsgibase", "dict(headers) return lambda *args, **kargs: \\ self.response.write(escape=False, *args, **kargs) def", "certificate. SSL is OFF') elif not exists(ssl_private_key): logger.warning('unable to open", "or env.https == 'on' ) request.url = environ['PATH_INFO'] # ##################################################", "- see http://packages.python.org/rocket/\" else: raise \"Wrong format for rocket interfaces", "on Python 2.5: # NameError: name 'gluon' is not defined", "attention!, the import Tkinter in messageboxhandler, changes locale ... import", "if password == '<random>': # make up a new password", "app_with_logging class HttpServer(object): \"\"\" the web2py web server (Rocket) \"\"\"", "and the handlers. # applications_parent (web2py_path) is the directory containing", "try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e =", "run from the [application] folder. A typical example would be", "'' elif password.startswith('<pam_user:'): # use the pam password for specified", "write_file from gluon.settings import global_settings from gluon.utils import web2py_uuid from", "except: if request.body: request.body.close() # ################################################## # on application error,", "try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close()", "is True: BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ##################################################", "value) raise HTTP(response.status, page, **response.headers) class LazyWSGI(object): def __init__(self, environ,", "wsgi responder app \"\"\" status_headers.append(s) status_headers.append(h) return responder(s, h) time_in", "rocket interfaces parameter - see http://packages.python.org/rocket/\" else: raise \"Wrong format", "'index')) elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app))", "for maintenance</h1></body></html>\") # ################################################## # build missing folders # ##################################################", "% dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally: if response and", "for p in (path, abspath('site-packages'), \"\"): add_path_first(p) if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\")", "response = Response() session = Session() env = request.env #env.web2py_path", "format for rocket interfaces parameter - see http://packages.python.org/rocket/\" if path:", "locale \"C\" exists = os.path.exists pjoin = os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\"))", "party WSGI applications \"\"\" self.response.status = str(status).split(' ', 1)[0] self.response.headers", "# also, make sure the flash is passed through #", "single_cookie: try: request.cookies.load(single_cookie) except Cookie.CookieError: pass # single invalid cookie", "http_response, request, environ, ticket) if not http_response: return wsgibase(new_environ, responder)", "web2py_error='invalid application') elif not request.is_local and exists(disabled): five0three = os.path.join(request.folder,'static','503.html')", "= path global_settings.applications_parent = path os.chdir(path) load_routes() for p in", "= new_environ return self._environ def start_response(self, status='200', headers=[], exec_info=None): \"\"\"", "'wtest') try: filehandle = open( filepath, 'w' ) filehandle.close() os.unlink(filepath)", "rewritten header variables # parse rewritten URL # serve file", "except RestrictedError as e: if request.body: request.body.close() # ################################################## #", "simulated environment so it may have weird behavior in some", "%H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, )", "version\") try: from gluon import rocket except: if not global_settings.web2py_runtime_gae:", "load_routes() for p in (path, abspath('site-packages'), \"\"): add_path_first(p) if exists(\"logging.conf\"):", "the web server. \"\"\" if interfaces: # if interfaces is", "raise HTTP(503, file(five0three, 'r').read()) else: raise HTTP(503, \"<html><body><h1>Temporarily down for", "URL # serve file if static # ################################################## fixup_missing_path_info(environ) (static_file,", "Thanks ozancag import locale locale.setlocale(locale.LC_CTYPE, \"C\") # IMPORTANT, web2py requires", "signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda a, b,", "script has inserted path to script directory into sys.path #", "a call to [function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html \"\"\"", "from gluon.globals import Request, Response, Session from gluon.compileapp import build_environment,", "= wsgiapp(environ, responder2) else: import cProfile prof = cProfile.Profile() prof.enable()", "by paste.httpserver or by apache mod_wsgi (or any WSGI-compatible server).", "cases \"\"\" def middleware(f): def app(environ, start_response): data = f()", "% ticket) except: if request.body: request.body.close() # ################################################## # on", "if False: import import_all # DO NOT REMOVE PART OF", "except HTTP as hr: http_response = hr if static_file: return", "path: # if a path is specified change the global", "rocket.ssl: logger.warning('Python \"ssl\" module unavailable. SSL is OFF') elif not", "################################################## # try load session or create new session file", "appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): \"\"\" generates a wsgi application that", "client = client, folder = abspath('applications', app) + os.sep, ajax", "= socket_timeout sock_list = [ip, port] if not ssl_certificate or", "def start(self): \"\"\" start the web server \"\"\" try: signal.signal(signal.SIGTERM,", "BaseAdapter.close_all_instances('commit') # ################################################## # if session not in db try", "% dict(ticket=ticket), web2py_error='ticket %s' % ticket) except: if request.body: request.body.close()", "not is_valid_ip_address(client): raise HTTP(400, \"Bad Request (request.client=%s)\" % client) return", "'.' with '_' - adds web2py path and version info", "to generate a dynamic page. It first runs all models,", "regex_client.search(eget('http_x_forwarded_for', '')) client = (g.group() or '').split(',')[0] if g else", "used to generate a dynamic page. It first runs all", "in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo in", "shutdown timeout path=None, interfaces=None # Rocket is able to use", "app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def start(self): \"\"\"", "logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line, 'a') else: logfilename.write(line)", "= f() start_response(self.response.status, self.response.headers.items()) if isinstance(data, list): return data return", "in url The url path must be either: 1. for", "isinstance(interfaces, list): for i in interfaces: if not isinstance(i, tuple):", "tries to render the output using a view/template. this function", "generate a dynamic page. It first runs all models, then", "set # ################################################## default_headers = [ ('Content-Type', contenttype('.' + request.extension)),", "'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT' response.stream(static_file, request=request)", "else: sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is ON') app_info", "post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())),", "filepath, 'w' ) filehandle.close() os.unlink(filepath) except IOError: raise BaseException(\"Unable to", "if not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except: raise BaseException(\"Can't create dir", "= environ self.request = request self.response = response @property def", "headers=[], exec_info=None): \"\"\" in controller you can use: - request.wsgi.environ", "only contain `[a-zA-Z0-9_]` - file and sub may also contain", "store session on filesystem # this must be done after", "done after trying to commit database! # ################################################## if not", "try: try: # ################################################## # handle fcgi missing path_info and", "'')) client = (g.group() or '').split(',')[0] if g else None", "\"\"\" status_headers = [] def responder2(s, h): \"\"\" wsgi responder", "conventions are: - application, controller, function and extension may only", "################################################## # run controller # ################################################## if global_settings.debugging and app", "= build_environment(request, response, session) # set default view, controller can", ") request.url = environ['PATH_INFO'] # ################################################## # access the requested", "a wsgi application that does logging and profiling and calls", "all fails, assume '127.0.0.1' or '::1' (running locally) \"\"\" eget", "import rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket')", "for subsequent imports import logging import logging.config # This needed", "request', web2py_error='invalid application') elif not request.is_local and exists(disabled): five0three =", "def responder2(s, h): \"\"\" wsgi responder app \"\"\" status_headers.append(s) status_headers.append(h)", "models, controller and view (if required) # ################################################## run_models_in(environment) response._view_environment", "file if static # ################################################## fixup_missing_path_info(environ) (static_file, version, environ) =", "up logging for subsequent imports import logging import logging.config #", "app(environ, start_response): data = f() start_response(self.response.status, self.response.headers.items()) if isinstance(data, list):", "rollback database # ################################################## # log tickets before rollback if", "wsgibase \"\"\" status_headers = [] def responder2(s, h): \"\"\" wsgi", "not always, once every 100 requests global requests requests =", "global_settings.local_hosts = list(local_hosts) else: local_hosts = global_settings.local_hosts client = get_client(env)", "(if required) # ################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page =", "raise HTTP(503, \"<html><body><h1>Temporarily down for maintenance</h1></body></html>\") # ################################################## # build", "It can be called by paste.httpserver or by apache mod_wsgi", "'no-cache')] for key, value in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status,", "'replace') if request.ajax: if response.flash: http_response.headers['web2py-component-flash'] = \\ urllib2.quote(xmlescape(response.flash).replace(b'\\n', b''))", "in range(8)]) cpassword = CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************') print('your admin", "add_path_first(p) if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password, port) self.pid_filename = pid_filename if", "logfilename: where to store apache-compatible requests log profiler_dir: where to", "flash is passed through # ################################################## # process models, controller", "import add_path_first, create_missing_folders, create_missing_app_folders from gluon.globals import current # Remarks:", "= None except RestrictedError as e: if request.body: request.body.close() #", "except: # fails on GAE or when logfile is missing", "string ): \"\"\" starts the web server. \"\"\" if interfaces:", "os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not want the path to", "DB if not request.tickets_db: ticket = e.log(request) or 'unknown' #", "from the [application] folder. A typical example would be the", "and hasattr(response, 'session_file') \\ and response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ", "password is \"%s\"' % password) print('*********************************************************') elif password == '<recycle>':", "save_password(password, port): \"\"\" Used by main() to save the password", "# not necessarily completely tested (e.g. content of tuples or", "when logfile is missing logging.basicConfig() logger = logging.getLogger(\"web2py\") from gluon.restricted", "'w' ) filehandle.close() os.unlink(filepath) except IOError: raise BaseException(\"Unable to write", "gluon.contenttype import contenttype from pydal.base import BaseAdapter from gluon.validators import", "'').split(',')[0] if g else None if client in (None, '',", "application. The first function called when a page is requested", "else: BaseAdapter.close_all_instances('commit') # ################################################## # if session not in db", "- must be list of socket-tuples as string ): \"\"\"", "path in url The url path must be either: 1.", "web2py_path = path global_settings.applications_parent = path os.chdir(path) load_routes() for p", "variables, replacing '.' with '_' - adds web2py path and", "load session or create new session file # ################################################## if", "# ################################################## request.wsgi = LazyWSGI(environ, request, response) # ################################################## #", "subsequent imports import logging import logging.config # This needed to", "serve_controller(request, response, session) except HTTP as hr: http_response = hr", "max_threads=None, server_name=None, request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None, # Rocket does not", "else: client = '127.0.0.1' # IPv4 if not is_valid_ip_address(client): raise", "################################################## # on application error, rollback database # ################################################## #", "*args, **kargs: \\ self.response.write(escape=False, *args, **kargs) def middleware(self, *middleware_apps): \"\"\"", "if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \\ = 'attachment' if version: response.headers['Cache-Control']", "- compensates for fcgi missing path_info and query_string - validates", "import BaseAdapter from gluon.validators import CRYPT from gluon.html import URL,", "= dict(headers) return lambda *args, **kargs: \\ self.response.write(escape=False, *args, **kargs)", "from gluon.settings import global_settings from gluon.utils import web2py_uuid from gluon.admin", "return app(self.environ, self.start_response) return lambda caller=caller, app=app: caller(app) return middleware", "\"\"\" in controller you can use: - request.wsgi.environ - request.wsgi.start_response", "would be the call to the url /[application]/[controller]/[function] that would", "if interfaces is specified, it must be tested for rocket", "the pam password for specified user cpassword = password[1:-1] else:", "locally) \"\"\" eget = env.get g = regex_client.search(eget('http_x_forwarded_for', '')) client", "is_scheduler = False, is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \\", "from gluon.utils import is_valid_ip_address, getipaddrinfo from gluon.rewrite import load as", "up a new password chars = string.letters + string.digits password", "for rocket interfaces parameter - see http://packages.python.org/rocket/\" if path: #", "% profiler_dir) filepath = pjoin(profiler_dir, 'wtest') try: filehandle = open(", "= False http_response = None try: try: try: # ##################################################", "################################################## try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e", "collect after exec, not always, once every 100 requests global", "request.body: request.body.close() if hasattr(current, 'request'): # ################################################## # on success,", "and calls wsgibase Args: wsgiapp: the base application logfilename: where", "response) # ################################################## # load cookies # ################################################## if env.http_cookie:", "= os.path.exists pjoin = os.path.join try: logging.config.fileConfig(abspath(\"logging.conf\")) except: # fails", "pass # single invalid cookie ignore # ################################################## # try", "import logging.config # This needed to prevent exception on Python", "languages # pattern used to validate client address regex_client =", "g.group() elif env.http_host.startswith('['): # IPv6 client = '::1' else: client", "# ################################################## # get the GET and POST data #", "abspath('applications', app) + os.sep, ajax = x_req_with == 'xmlhttprequest', cid", "# access the requested application # ################################################## disabled = pjoin(request.folder,", "must be list of socket-tuples as string ): \"\"\" starts", "pass global_settings.local_hosts = list(local_hosts) else: local_hosts = global_settings.local_hosts client =", "response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') # if tickets in db, reconnect", "list(local_hosts) else: local_hosts = global_settings.local_hosts client = get_client(env) x_req_with =", "(env.remote_addr in local_hosts and client == env.remote_addr), is_shell = False,", "env.web2py_disable_session: session._try_store_in_db(request, response) # ################################################## # on success, commit database", "to this file which may be Library.zip # gluon_parent is", "database # ################################################## try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except:", "try: request.cookies.load(single_cookie) except Cookie.CookieError: pass # single invalid cookie ignore", "key. SSL is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate)", "try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo", "http_response: return wsgibase(new_environ, responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return", "url_in, THREAD_LOCAL as rwthread, \\ try_rewrite_on_error, fixup_missing_path_info from gluon import", "item in middleware_apps: app = item(app) def caller(app): return app(self.environ,", "variables # parse rewritten URL # serve file if static", "import types if isinstance(interfaces, list): for i in interfaces: if", "a dynamic page. It first runs all models, then runs", "\"\"\" self.response.status = str(status).split(' ', 1)[0] self.response.headers = dict(headers) return", "profiler_dir: where to store profile files \"\"\" if profilerfilename is", "server. \"\"\" if interfaces: # if interfaces is specified, it", "if request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace') if request.ajax: if response.flash: http_response.headers['web2py-component-flash']", "'unrecoverable' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s'", "pjoin(profiler_dir, \"req_%s.prof\" % web2py_uuid()) prof.dump_stats(destfile) try: line = '%s, %s,", "'::1' (running locally) \"\"\" eget = env.get g = regex_client.search(eget('http_x_forwarded_for',", "return http_response.to(responder, env=env) if request.body: request.body.close() if hasattr(current, 'request'): #", "in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass global_settings.local_hosts = list(local_hosts) else:", "try store session in database # ################################################## if not env.web2py_disable_session:", "IMPORTANT, web2py requires locale \"C\" exists = os.path.exists pjoin =", "create_missing_app_folders from gluon.globals import current # Remarks: # calling script", "'http_x_forwarded_for', secondly 'remote_addr' if all fails, assume '127.0.0.1' or '::1'", "= os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not want the path", "hasattr(current, 'request'): # ################################################## # on success, try store session", "if response.js: http_response.headers['web2py-component-command'] = \\ urllib2.quote(response.js.replace('\\n', '')) # ################################################## #", "== rwthread.routes.default_application \\ and app != 'welcome': redirect(URL('welcome', 'default', 'index'))", "import re import copy import sys import time import datetime", "response, session) # set default view, controller can override it", "response._view_environment = copy.copy(environment) page = run_controller_in(request.controller, request.function, environment) if isinstance(page,", "s=self: s.stop()) signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop()) except: pass", "23:59:59 GMT' response.stream(static_file, request=request) # ################################################## # fill in request", "app != \"admin\": import gluon.debug # activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder)", "= env.web2py_status_code or response.status if static_file: if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition']", "in env.http_cookie.split(';'): single_cookie = single_cookie.strip() if single_cookie: try: request.cookies.load(single_cookie) except", "Request, Response, Session from gluon.compileapp import build_environment, run_models_in, \\ run_controller_in,", "called when a page is requested (static or dynamic). It", "rocket interfaces parameter - see http://packages.python.org/rocket/\" if path: # if", "that directory set sys.path to # (\"\", gluon_parent/site-packages, gluon_parent, ...)", "prof = cProfile.Profile() prof.enable() ret[0] = wsgiapp(environ, responder2) prof.disable() destfile", "RestrictedError('Framework', '', '', locals()) ticket = e.log(request) or 'unrecoverable' http_response", "cmd_opts = global_settings.cmd_options request.update( client = client, folder = abspath('applications',", "gluon wsgi application --------------------------- \"\"\" from __future__ import print_function if", "= pjoin(profiler_dir, \"req_%s.prof\" % web2py_uuid()) prof.dump_stats(destfile) try: line = '%s,", "\"\"\" status_headers.append(s) status_headers.append(h) return responder(s, h) time_in = time.time() ret", "self.request = request self.response = response @property def environ(self): if", "def save_password(password, port): \"\"\" Used by main() to save the", "reconnect and store it in db if request.tickets_db: ticket =", "('requests' in globals()) and (requests + 1) % 100 or", "parameter - see http://packages.python.org/rocket/\" if path: # if a path", "= hr if static_file: return http_response.to(responder, env=env) if request.body: request.body.close()", "override it response.view = '%s/%s.%s' % (request.controller, request.function, request.extension) #", "%s\" % profiler_dir) def app_with_logging(environ, responder): \"\"\" a wsgi app", "= e.log(request) or 'unrecoverable' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket %", "env.remote_addr), is_shell = False, is_scheduler = False, is_https = env.wsgi_url_scheme", "applications_parent (web2py_path) is the directory containing applications/ # and routes.py", "= regex_client.search(eget('http_x_forwarded_for', '')) client = (g.group() or '').split(',')[0] if g", "= set(('https', 'HTTPS')) def get_client(env): \"\"\" Guesses the client address", "regex_client.search(eget('remote_addr', '')) if g: client = g.group() elif env.http_host.startswith('['): #", "http_response.to(responder, env=env) if request.body: request.body.close() if hasattr(current, 'request'): # ##################################################", "if not http_response: return wsgibase(new_environ, responder) if global_settings.web2py_crontype == 'soft':", "be called by paste.httpserver or by apache mod_wsgi (or any", "apache-compatible requests log profiler_dir: where to store profile files \"\"\"", "strings. uses a simulated environment so it may have weird", "the client address from the environment variables First tries 'http_x_forwarded_for',", "_ in range(8)]) cpassword = CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************') print('your", "def app(environ, start_response): data = f() start_response(self.response.status, self.response.headers.items()) if isinstance(data,", "HTTP as hr: http_response = hr if static_file: return http_response.to(responder,", "sock_list.append(ssl_ca_certificate) logger.info('SSL is ON') app_info = {'wsgi_app': appfactory(wsgibase, log_filename, profiler_dir)}", "garbage collect after exec, not always, once every 100 requests", "The gluon wsgi application --------------------------- \"\"\" from __future__ import print_function", "eget = environ.get current.__dict__.clear() request = Request(environ) response = Response()", "go after url_in! if not global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1',", "ret[0] = wsgiapp(environ, responder2) else: import cProfile prof = cProfile.Profile()", "use several interfaces - must be list of socket-tuples as", "fill in request items # ################################################## app = request.application #", "\\ run_controller_in, run_view_in from gluon.contenttype import contenttype from pydal.base import", "e.log(request) or 'unrecoverable' http_response = \\ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket),", "# on application error, rollback database # ################################################## # log", "it in db if request.tickets_db: ticket = e.log(request) or 'unknown'", "or 'unknown' # rollback if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') #", "return data return [data] for item in middleware_apps: app =", "so that web2py # runs from there instead of cwd", "so client can distinguish component requests. if request.cid: http_response.headers.setdefault( 'web2py-component-content',", "= environ['PATH_INFO'] # ################################################## # access the requested application #", "'127.0.0.1' or '::1' (running locally) \"\"\" eget = env.get g", "= abspath(profiler_dir) logger.warn('profiler is on. will use dir %s', profiler_dir)", "app_with_logging(environ, responder): \"\"\" a wsgi app that does logging and", "app_info = {'wsgi_app': appfactory(wsgibase, log_filename, profiler_dir)} self.server = rocket.Rocket(interfaces or", "(Rocket) \"\"\" def __init__( self, ip='1172.16.17.32', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log',", "lambda a, b, s=self: s.stop()) except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start()", "stop(self, stoplogging=False): \"\"\" stop cron and the web server \"\"\"", "%s\" % profiler_dir) filepath = pjoin(profiler_dir, 'wtest') try: filehandle =", "path = os.path.normpath(path) web2py_path = path global_settings.applications_parent = path os.chdir(path)", "global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for", "(None, '', 'unknown'): g = regex_client.search(eget('remote_addr', '')) if g: client", "def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): \"\"\" generates a wsgi application", "- /<application>/static/<file> 2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming", "request=request) # ################################################## # fill in request items # ##################################################", "rwthread, \\ try_rewrite_on_error, fixup_missing_path_info from gluon import newcron __all__ =", "a view/template. this function must run from the [application] folder.", "and query_string # select rewrite parameters # rewrite incoming URL", "import web2py_uuid from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders from gluon.globals", "is on. will use dir %s', profiler_dir) if not os.path.isdir(profiler_dir):", "logger.warn('profiler is on. will use dir %s', profiler_dir) if not", "not env.web2py_disable_session: session.connect(request, response) # ################################################## # run controller #", "profilerfilename=None): \"\"\" generates a wsgi application that does logging and", "# on success, try store session in database # ##################################################", "'r').read()) else: raise HTTP(503, \"<html><body><h1>Temporarily down for maintenance</h1></body></html>\") # ##################################################", "str(os.getpid())) self.server.start() def stop(self, stoplogging=False): \"\"\" stop cron and the", "CRYPT()(password)[0] fp = open(password_file, 'w') if password: fp.write('password=\"%s\"\\n' % cpassword)", "(request.client=%s)\" % client) return client def serve_controller(request, response, session): \"\"\"", "'HttpServer'] requests = 0 # gc timer # Security Checks:", "HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally: if", "sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line, 'a') else: logfilename.write(line) except:", "use: - request.wsgi.environ - request.wsgi.start_response to call third party WSGI", "from gluon.http import HTTP, redirect from gluon.globals import Request, Response,", "\"%s\"' % password) print('*********************************************************') elif password == '<recycle>': # reuse", "if g: client = g.group() elif env.http_host.startswith('['): # IPv6 client", "parameters # rewrite incoming URL # parse rewritten header variables", "is the directory containing applications/ # and routes.py # The", "be list of socket-tuples as string ): \"\"\" starts the", "'remote_addr' if all fails, assume '127.0.0.1' or '::1' (running locally)", "down for maintenance</h1></body></html>\") # ################################################## # build missing folders #", "# Rocket does not use a shutdown timeout path=None, interfaces=None", "and view # ################################################## environment = build_environment(request, response, session) #", "cron and the web server \"\"\" newcron.stopcron() self.server.stop(stoplogging) try: os.unlink(self.pid_filename)", "timeout path=None, interfaces=None # Rocket is able to use several", "rwthread.routes.error_message % 'invalid request', web2py_error='invalid application') elif not request.is_local and", "# use provided password cpassword = CRYPT()(password)[0] fp = open(password_file,", "# defaults to that directory set sys.path to # (\"\",", "must be either: 1. for static pages: - /<application>/static/<file> 2.", "p in (path, abspath('site-packages'), \"\"): add_path_first(p) if exists(\"logging.conf\"): logging.config.fileConfig(\"logging.conf\") save_password(password,", "################################################## if env.http_cookie: for single_cookie in env.http_cookie.split(';'): single_cookie = single_cookie.strip()", "environment variables, replacing '.' with '_' - adds web2py path", "and routes.py # The two are identical unless web2py_path is", "app = item(app) def caller(app): return app(self.environ, self.start_response) return lambda", "requested (static or dynamic). It can be called by paste.httpserver", "profiler_dir=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10, socket_timeout=1," ]
[ "+= state.ss.time_step self._s_since_unstake += state.ss.time_step if self._doCreatePool(): self._s_since_create = 0", "\"\"\"Create datatoken contract and mint DTs to self.\"\"\" wallet =", "DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number #new pool pool_address =", "toBase18(mint_amt), from_wallet=wallet) DT = datatoken.Datatoken(DT_address) DT.mint(wallet.address, toBase18(mint_amt), from_wallet=wallet) return DT", "pool_agent_name = f'pool{pool_i}' #new DT DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic", "* constants.S_PER_DAY #magic number self._s_since_unstake = 0 self._s_between_unstake = 3", "how much. Then do the action.\"\"\" pool_agents = state.agents.filterByNonzeroStake(self) pool_agent", "-> None: self._s_since_create += state.ss.time_step self._s_since_unstake += state.ss.time_step if self._doCreatePool():", "do the action.\"\"\" pool_agents = state.agents.filterByNonzeroStake(self) pool_agent = random.choice(list(pool_agents.values())) BPT", "bpool, datatoken, dtfactory, globaltokens from web3tools.web3util import toBase18 @enforce_types class", "7 * constants.S_PER_DAY #magic number self._s_since_unstake = 0 self._s_between_unstake =", "import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine import bfactory, bpool, datatoken, dtfactory,", "= self._wallet._web3wallet DT_address = dtfactory.DTFactory().createToken( '', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet)", "dtfactory.DTFactory().createToken( '', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet) DT = datatoken.Datatoken(DT_address) DT.mint(wallet.address,", "the OCEAN DT_bind_amt = 20.0 #magic number DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet)", "if self.OCEAN() < 200.0: #magic number return False return self._s_since_create", "agent pool_agent = PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent) return pool_agent def _doUnstakeOCEAN(self,", "USD, OCEAN) self._s_since_create = 0 self._s_between_create = 7 * constants.S_PER_DAY", "wallet = self._wallet._web3wallet OCEAN = globaltokens.OCEANtoken() #name pool_i = len(state.agents.filterToPool())", "assert self.OCEAN() > 0.0, \"should not call if no OCEAN\"", "datatoken.Datatoken: \"\"\"Create datatoken contract and mint DTs to self.\"\"\" wallet", "import bfactory, bpool, datatoken, dtfactory, globaltokens from web3tools.web3util import toBase18", "Then do the action.\"\"\" pool_agents = state.agents.filterByNonzeroStake(self) pool_agent = random.choice(list(pool_agents.values()))", "POOL_WEIGHT_OCEAN from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens from", "BPT = self.BPT(pool_agent.pool) BPT_unstake = 0.10 * BPT #magic number", "toBase18 @enforce_types class EWPublisherAgent(PublisherAgent): def __init__(self, name: str, USD: float,", "pool.finalize(from_wallet=wallet) #create agent pool_agent = PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent) return pool_agent", "self._s_between_unstake def _unstakeOCEANsomewhere(self, state): \"\"\"Choose what pool to unstake and", "pool pool_address = bfactory.BFactory().newBPool(from_wallet=wallet) pool = bpool.BPool(pool_address) #bind tokens &", "from web3tools.web3util import toBase18 @enforce_types class EWPublisherAgent(PublisherAgent): def __init__(self, name:", "self._wallet._web3wallet DT_address = dtfactory.DTFactory().createToken( '', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet) DT", "pool to unstake and by how much. Then do the", "pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet) #create", "from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN),", "OCEAN = globaltokens.OCEANtoken() #name pool_i = len(state.agents.filterToPool()) dt_name = f'DT{pool_i}'", "number return False return self._s_since_create >= self._s_between_create def _createPoolAgent(self, state)", "= state.agents.filterByNonzeroStake(self) pool_agent = random.choice(list(pool_agents.values())) BPT = self.BPT(pool_agent.pool) BPT_unstake =", "#bind tokens & add initial liquidity OCEAN_bind_amt = self.OCEAN() #magic", "liquidity OCEAN_bind_amt = self.OCEAN() #magic number: use all the OCEAN", "super().__init__(name, USD, OCEAN) self._s_since_create = 0 self._s_between_create = 7 *", "#magic number self._s_since_unstake = 0 self._s_between_unstake = 3 * constants.S_PER_DAY", "call if no OCEAN\" wallet = self._wallet._web3wallet OCEAN = globaltokens.OCEANtoken()", "number self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create datatoken contract and", "web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens from web3tools.web3util import", "bfactory.BFactory().newBPool(from_wallet=wallet) pool = bpool.BPool(pool_address) #bind tokens & add initial liquidity", "-> PoolAgent: assert self.OCEAN() > 0.0, \"should not call if", "the action.\"\"\" pool_agents = state.agents.filterByNonzeroStake(self) pool_agent = random.choice(list(pool_agents.values())) BPT =", "import logging log = logging.getLogger('marketagents') from enforce_typing import enforce_types #", "state.agents.filterByNonzeroStake(self) pool_agent = random.choice(list(pool_agents.values())) BPT = self.BPT(pool_agent.pool) BPT_unstake = 0.10", "ignore[import] import random from agents.PublisherAgent import PublisherAgent from agents.PoolAgent import", "self._s_between_unstake = 3 * constants.S_PER_DAY #magic number def takeStep(self, state)", "> 0.0, \"should not call if no OCEAN\" wallet =", "#magic number #new pool pool_address = bfactory.BFactory().newBPool(from_wallet=wallet) pool = bpool.BPool(pool_address)", "= PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent) return pool_agent def _doUnstakeOCEAN(self, state) ->", "self._s_since_create >= self._s_between_create def _createPoolAgent(self, state) -> PoolAgent: assert self.OCEAN()", "= dtfactory.DTFactory().createToken( '', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet) DT = datatoken.Datatoken(DT_address)", "DT_address = dtfactory.DTFactory().createToken( '', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet) DT =", "= bpool.BPool(pool_address) #bind tokens & add initial liquidity OCEAN_bind_amt =", "'', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet) DT = datatoken.Datatoken(DT_address) DT.mint(wallet.address, toBase18(mint_amt),", "if self._doCreatePool(): self._s_since_create = 0 self._createPoolAgent(state) if self._doUnstakeOCEAN(state): self._s_since_unstake =", "random from agents.PublisherAgent import PublisherAgent from agents.PoolAgent import PoolAgent from", "pool) state.addAgent(pool_agent) return pool_agent def _doUnstakeOCEAN(self, state) -> bool: if", "random.choice(list(pool_agents.values())) BPT = self.BPT(pool_agent.pool) BPT_unstake = 0.10 * BPT #magic", "@enforce_types class EWPublisherAgent(PublisherAgent): def __init__(self, name: str, USD: float, OCEAN:", "bool: if self.OCEAN() < 200.0: #magic number return False return", "= 3 * constants.S_PER_DAY #magic number def takeStep(self, state) ->", "number def takeStep(self, state) -> None: self._s_since_create += state.ss.time_step self._s_since_unstake", "= self.BPT(pool_agent.pool) BPT_unstake = 0.10 * BPT #magic number self.unstakeOCEAN(BPT_unstake,", "log = logging.getLogger('marketagents') from enforce_typing import enforce_types # type: ignore[import]", "datatoken, dtfactory, globaltokens from web3tools.web3util import toBase18 @enforce_types class EWPublisherAgent(PublisherAgent):", "unstake and by how much. Then do the action.\"\"\" pool_agents", "#new DT DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number #new pool", "state) -> PoolAgent: assert self.OCEAN() > 0.0, \"should not call", "self._s_since_unstake += state.ss.time_step if self._doCreatePool(): self._s_since_create = 0 self._createPoolAgent(state) if", "float): super().__init__(name, USD, OCEAN) self._s_since_create = 0 self._s_between_create = 7", "#new pool pool_address = bfactory.BFactory().newBPool(from_wallet=wallet) pool = bpool.BPool(pool_address) #bind tokens", "= 0 self._s_between_create = 7 * constants.S_PER_DAY #magic number self._s_since_unstake", "toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet) #create agent pool_agent", "self._s_since_create += state.ss.time_step self._s_since_unstake += state.ss.time_step if self._doCreatePool(): self._s_since_create =", "number self._s_since_unstake = 0 self._s_between_unstake = 3 * constants.S_PER_DAY #magic", "= logging.getLogger('marketagents') from enforce_typing import enforce_types # type: ignore[import] import", "float, OCEAN: float): super().__init__(name, USD, OCEAN) self._s_since_create = 0 self._s_between_create", "def _doCreatePool(self) -> bool: if self.OCEAN() < 200.0: #magic number", "logging log = logging.getLogger('marketagents') from enforce_typing import enforce_types # type:", "globaltokens.OCEANtoken() #name pool_i = len(state.agents.filterToPool()) dt_name = f'DT{pool_i}' pool_agent_name =", "pool = bpool.BPool(pool_address) #bind tokens & add initial liquidity OCEAN_bind_amt", "= globaltokens.OCEANtoken() #name pool_i = len(state.agents.filterToPool()) dt_name = f'DT{pool_i}' pool_agent_name", "toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet) #create agent pool_agent = PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent)", "util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine import bfactory, bpool, datatoken,", "self.OCEAN() < 200.0: #magic number return False return self._s_since_create >=", "pool_address = bfactory.BFactory().newBPool(from_wallet=wallet) pool = bpool.BPool(pool_address) #bind tokens & add", "def takeStep(self, state) -> None: self._s_since_create += state.ss.time_step self._s_since_unstake +=", "OCEAN_bind_amt = self.OCEAN() #magic number: use all the OCEAN DT_bind_amt", "bfactory, bpool, datatoken, dtfactory, globaltokens from web3tools.web3util import toBase18 @enforce_types", "False return self._s_since_create >= self._s_between_create def _createPoolAgent(self, state) -> PoolAgent:", "0 self._createPoolAgent(state) if self._doUnstakeOCEAN(state): self._s_since_unstake = 0 self._unstakeOCEANsomewhere(state) def _doCreatePool(self)", "self.OCEAN() > 0.0, \"should not call if no OCEAN\" wallet", "0.0, \"should not call if no OCEAN\" wallet = self._wallet._web3wallet", "toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet)", "initial liquidity OCEAN_bind_amt = self.OCEAN() #magic number: use all the", "by how much. Then do the action.\"\"\" pool_agents = state.agents.filterByNonzeroStake(self)", "= random.choice(list(pool_agents.values())) BPT = self.BPT(pool_agent.pool) BPT_unstake = 0.10 * BPT", "= 20.0 #magic number DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address,", "<filename>agents/EWPublisherAgent.py import logging log = logging.getLogger('marketagents') from enforce_typing import enforce_types", "200.0: #magic number return False return self._s_since_create >= self._s_between_create def", "#magic number return False return self._s_since_create >= self._s_between_create def _createPoolAgent(self,", "globaltokens from web3tools.web3util import toBase18 @enforce_types class EWPublisherAgent(PublisherAgent): def __init__(self,", "BPT_unstake = 0.10 * BPT #magic number self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def", "dt_name, toBase18(mint_amt), from_wallet=wallet) DT = datatoken.Datatoken(DT_address) DT.mint(wallet.address, toBase18(mint_amt), from_wallet=wallet) return", "PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent) return pool_agent def _doUnstakeOCEAN(self, state) -> bool:", "self.OCEAN() #magic number: use all the OCEAN DT_bind_amt = 20.0", "self._s_between_create = 7 * constants.S_PER_DAY #magic number self._s_since_unstake = 0", "much. Then do the action.\"\"\" pool_agents = state.agents.filterByNonzeroStake(self) pool_agent =", "self._s_since_create = 0 self._s_between_create = 7 * constants.S_PER_DAY #magic number", "no OCEAN\" wallet = self._wallet._web3wallet OCEAN = globaltokens.OCEANtoken() #name pool_i", "constants from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine import bfactory,", "self.\"\"\" wallet = self._wallet._web3wallet DT_address = dtfactory.DTFactory().createToken( '', dt_name, dt_name,", "0.10 * BPT #magic number self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken:", "not call if no OCEAN\" wallet = self._wallet._web3wallet OCEAN =", "* BPT #magic number self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create", "pool_agent = random.choice(list(pool_agents.values())) BPT = self.BPT(pool_agent.pool) BPT_unstake = 0.10 *", "pool_agents = state.agents.filterByNonzeroStake(self) pool_agent = random.choice(list(pool_agents.values())) BPT = self.BPT(pool_agent.pool) BPT_unstake", "_createPoolAgent(self, state) -> PoolAgent: assert self.OCEAN() > 0.0, \"should not", "= f'DT{pool_i}' pool_agent_name = f'pool{pool_i}' #new DT DT = self._createDatatoken(dt_name,", "_createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create datatoken contract and mint DTs to self.\"\"\"", "= self.OCEAN() #magic number: use all the OCEAN DT_bind_amt =", "name: str, USD: float, OCEAN: float): super().__init__(name, USD, OCEAN) self._s_since_create", "self._s_between_create def _createPoolAgent(self, state) -> PoolAgent: assert self.OCEAN() > 0.0,", "= f'pool{pool_i}' #new DT DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number", "state) -> bool: if not state.agents.filterByNonzeroStake(self): return False return self._s_since_unstake", "= 0 self._unstakeOCEANsomewhere(state) def _doCreatePool(self) -> bool: if self.OCEAN() <", "import PublisherAgent from agents.PoolAgent import PoolAgent from util import constants", "all the OCEAN DT_bind_amt = 20.0 #magic number DT.approve(pool.address, toBase18(DT_bind_amt),", "f'DT{pool_i}' pool_agent_name = f'pool{pool_i}' #new DT DT = self._createDatatoken(dt_name, mint_amt=1000.0)", "action.\"\"\" pool_agents = state.agents.filterByNonzeroStake(self) pool_agent = random.choice(list(pool_agents.values())) BPT = self.BPT(pool_agent.pool)", "USD: float, OCEAN: float): super().__init__(name, USD, OCEAN) self._s_since_create = 0", "return self._s_since_unstake >= self._s_between_unstake def _unstakeOCEANsomewhere(self, state): \"\"\"Choose what pool", "util import constants from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine", "__init__(self, name: str, USD: float, OCEAN: float): super().__init__(name, USD, OCEAN)", "def _doUnstakeOCEAN(self, state) -> bool: if not state.agents.filterByNonzeroStake(self): return False", "toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt),", "type: ignore[import] import random from agents.PublisherAgent import PublisherAgent from agents.PoolAgent", "\"should not call if no OCEAN\" wallet = self._wallet._web3wallet OCEAN", "3 * constants.S_PER_DAY #magic number def takeStep(self, state) -> None:", "import random from agents.PublisherAgent import PublisherAgent from agents.PoolAgent import PoolAgent", "agents.PoolAgent import PoolAgent from util import constants from util.constants import", "= self._wallet._web3wallet OCEAN = globaltokens.OCEANtoken() #name pool_i = len(state.agents.filterToPool()) dt_name", "import constants from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine import", "pool_i = len(state.agents.filterToPool()) dt_name = f'DT{pool_i}' pool_agent_name = f'pool{pool_i}' #new", "toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet) #create agent", "= 0.10 * BPT #magic number self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)->", "agents.PublisherAgent import PublisherAgent from agents.PoolAgent import PoolAgent from util import", "#name pool_i = len(state.agents.filterToPool()) dt_name = f'DT{pool_i}' pool_agent_name = f'pool{pool_i}'", "number DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet)", "OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet)", "from_wallet=wallet) pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet) #create agent pool_agent =", "pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create datatoken contract and mint DTs", "wallet = self._wallet._web3wallet DT_address = dtfactory.DTFactory().createToken( '', dt_name, dt_name, toBase18(mint_amt),", "= bfactory.BFactory().newBPool(from_wallet=wallet) pool = bpool.BPool(pool_address) #bind tokens & add initial", "BPT #magic number self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create datatoken", "#magic number def takeStep(self, state) -> None: self._s_since_create += state.ss.time_step", "OCEAN) self._s_since_create = 0 self._s_between_create = 7 * constants.S_PER_DAY #magic", "self._createPoolAgent(state) if self._doUnstakeOCEAN(state): self._s_since_unstake = 0 self._unstakeOCEANsomewhere(state) def _doCreatePool(self) ->", "enforce_typing import enforce_types # type: ignore[import] import random from agents.PublisherAgent", "pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet) #create agent pool_agent = PoolAgent(pool_agent_name,", "# type: ignore[import] import random from agents.PublisherAgent import PublisherAgent from", "dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet) DT = datatoken.Datatoken(DT_address) DT.mint(wallet.address, toBase18(mint_amt), from_wallet=wallet)", "import PoolAgent from util import constants from util.constants import POOL_WEIGHT_DT,", "OCEAN: float): super().__init__(name, USD, OCEAN) self._s_since_create = 0 self._s_between_create =", "str, USD: float, OCEAN: float): super().__init__(name, USD, OCEAN) self._s_since_create =", "number #new pool pool_address = bfactory.BFactory().newBPool(from_wallet=wallet) pool = bpool.BPool(pool_address) #bind", "DTs to self.\"\"\" wallet = self._wallet._web3wallet DT_address = dtfactory.DTFactory().createToken( '',", "-> bool: if not state.agents.filterByNonzeroStake(self): return False return self._s_since_unstake >=", "self._unstakeOCEANsomewhere(state) def _doCreatePool(self) -> bool: if self.OCEAN() < 200.0: #magic", "dtfactory, globaltokens from web3tools.web3util import toBase18 @enforce_types class EWPublisherAgent(PublisherAgent): def", "use all the OCEAN DT_bind_amt = 20.0 #magic number DT.approve(pool.address,", "self._createDatatoken(dt_name, mint_amt=1000.0) #magic number #new pool pool_address = bfactory.BFactory().newBPool(from_wallet=wallet) pool", "0 self._unstakeOCEANsomewhere(state) def _doCreatePool(self) -> bool: if self.OCEAN() < 200.0:", "PoolAgent from util import constants from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN", "len(state.agents.filterToPool()) dt_name = f'DT{pool_i}' pool_agent_name = f'pool{pool_i}' #new DT DT", "dt_name = f'DT{pool_i}' pool_agent_name = f'pool{pool_i}' #new DT DT =", "f'pool{pool_i}' #new DT DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number #new", "#magic number self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create datatoken contract", "20.0 #magic number DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt),", "bool: if not state.agents.filterByNonzeroStake(self): return False return self._s_since_unstake >= self._s_between_unstake", "self._wallet._web3wallet OCEAN = globaltokens.OCEANtoken() #name pool_i = len(state.agents.filterToPool()) dt_name =", "= 0 self._createPoolAgent(state) if self._doUnstakeOCEAN(state): self._s_since_unstake = 0 self._unstakeOCEANsomewhere(state) def", "from agents.PoolAgent import PoolAgent from util import constants from util.constants", "DT DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number #new pool pool_address", "= self._createDatatoken(dt_name, mint_amt=1000.0) #magic number #new pool pool_address = bfactory.BFactory().newBPool(from_wallet=wallet)", "self._s_since_create = 0 self._createPoolAgent(state) if self._doUnstakeOCEAN(state): self._s_since_unstake = 0 self._unstakeOCEANsomewhere(state)", "toBase18(OCEAN_bind_amt), toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet) pool.finalize(from_wallet=wallet) #create agent pool_agent = PoolAgent(pool_agent_name, pool)", "#magic number: use all the OCEAN DT_bind_amt = 20.0 #magic", "from util import constants from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from", "if not state.agents.filterByNonzeroStake(self): return False return self._s_since_unstake >= self._s_between_unstake def", "datatoken contract and mint DTs to self.\"\"\" wallet = self._wallet._web3wallet", "#magic number DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT),", "from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine import bfactory, bpool,", "mint_amt=1000.0) #magic number #new pool pool_address = bfactory.BFactory().newBPool(from_wallet=wallet) pool =", "state) -> None: self._s_since_create += state.ss.time_step self._s_since_unstake += state.ss.time_step if", "from_wallet=wallet) pool.finalize(from_wallet=wallet) #create agent pool_agent = PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent) return", "self._s_since_unstake >= self._s_between_unstake def _unstakeOCEANsomewhere(self, state): \"\"\"Choose what pool to", ">= self._s_between_unstake def _unstakeOCEANsomewhere(self, state): \"\"\"Choose what pool to unstake", "state.agents.filterByNonzeroStake(self): return False return self._s_since_unstake >= self._s_between_unstake def _unstakeOCEANsomewhere(self, state):", "pool_agent = PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent) return pool_agent def _doUnstakeOCEAN(self, state)", "def _unstakeOCEANsomewhere(self, state): \"\"\"Choose what pool to unstake and by", "mint DTs to self.\"\"\" wallet = self._wallet._web3wallet DT_address = dtfactory.DTFactory().createToken(", "what pool to unstake and by how much. Then do", "_unstakeOCEANsomewhere(self, state): \"\"\"Choose what pool to unstake and by how", "takeStep(self, state) -> None: self._s_since_create += state.ss.time_step self._s_since_unstake += state.ss.time_step", "OCEAN\" wallet = self._wallet._web3wallet OCEAN = globaltokens.OCEANtoken() #name pool_i =", "#create agent pool_agent = PoolAgent(pool_agent_name, pool) state.addAgent(pool_agent) return pool_agent def", "_doCreatePool(self) -> bool: if self.OCEAN() < 200.0: #magic number return", "pool_agent def _doUnstakeOCEAN(self, state) -> bool: if not state.agents.filterByNonzeroStake(self): return", "and mint DTs to self.\"\"\" wallet = self._wallet._web3wallet DT_address =", "POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens", "& add initial liquidity OCEAN_bind_amt = self.OCEAN() #magic number: use", "return pool_agent def _doUnstakeOCEAN(self, state) -> bool: if not state.agents.filterByNonzeroStake(self):", "_doUnstakeOCEAN(self, state) -> bool: if not state.agents.filterByNonzeroStake(self): return False return", "0 self._s_between_unstake = 3 * constants.S_PER_DAY #magic number def takeStep(self,", "from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens from web3tools.web3util", "self._doUnstakeOCEAN(state): self._s_since_unstake = 0 self._unstakeOCEANsomewhere(state) def _doCreatePool(self) -> bool: if", "to unstake and by how much. Then do the action.\"\"\"", "PublisherAgent from agents.PoolAgent import PoolAgent from util import constants from", "\"\"\"Choose what pool to unstake and by how much. Then", "from agents.PublisherAgent import PublisherAgent from agents.PoolAgent import PoolAgent from util", "number: use all the OCEAN DT_bind_amt = 20.0 #magic number", "self.BPT(pool_agent.pool) BPT_unstake = 0.10 * BPT #magic number self.unstakeOCEAN(BPT_unstake, pool_agent.pool)", "None: self._s_since_create += state.ss.time_step self._s_since_unstake += state.ss.time_step if self._doCreatePool(): self._s_since_create", "to self.\"\"\" wallet = self._wallet._web3wallet DT_address = dtfactory.DTFactory().createToken( '', dt_name,", "def _createPoolAgent(self, state) -> PoolAgent: assert self.OCEAN() > 0.0, \"should", "OCEAN DT_bind_amt = 20.0 #magic number DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address,", "state.ss.time_step self._s_since_unstake += state.ss.time_step if self._doCreatePool(): self._s_since_create = 0 self._createPoolAgent(state)", "if no OCEAN\" wallet = self._wallet._web3wallet OCEAN = globaltokens.OCEANtoken() #name", "and by how much. Then do the action.\"\"\" pool_agents =", "DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet) pool.bind(DT.address, toBase18(DT_bind_amt), toBase18(POOL_WEIGHT_DT), from_wallet=wallet) pool.bind(OCEAN.address,", "def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create datatoken contract and mint DTs to", "state): \"\"\"Choose what pool to unstake and by how much.", ">= self._s_between_create def _createPoolAgent(self, state) -> PoolAgent: assert self.OCEAN() >", "not state.agents.filterByNonzeroStake(self): return False return self._s_since_unstake >= self._s_between_unstake def _unstakeOCEANsomewhere(self,", "* constants.S_PER_DAY #magic number def takeStep(self, state) -> None: self._s_since_create", "add initial liquidity OCEAN_bind_amt = self.OCEAN() #magic number: use all", "self._s_since_unstake = 0 self._s_between_unstake = 3 * constants.S_PER_DAY #magic number", "import enforce_types # type: ignore[import] import random from agents.PublisherAgent import", "logging.getLogger('marketagents') from enforce_typing import enforce_types # type: ignore[import] import random", "import toBase18 @enforce_types class EWPublisherAgent(PublisherAgent): def __init__(self, name: str, USD:", "constants.S_PER_DAY #magic number self._s_since_unstake = 0 self._s_between_unstake = 3 *", "= 0 self._s_between_unstake = 3 * constants.S_PER_DAY #magic number def", "return False return self._s_since_create >= self._s_between_create def _createPoolAgent(self, state) ->", "return self._s_since_create >= self._s_between_create def _createPoolAgent(self, state) -> PoolAgent: assert", "PoolAgent: assert self.OCEAN() > 0.0, \"should not call if no", "tokens & add initial liquidity OCEAN_bind_amt = self.OCEAN() #magic number:", "if self._doUnstakeOCEAN(state): self._s_since_unstake = 0 self._unstakeOCEANsomewhere(state) def _doCreatePool(self) -> bool:", "from enforce_typing import enforce_types # type: ignore[import] import random from", "enforce_types # type: ignore[import] import random from agents.PublisherAgent import PublisherAgent", "= 7 * constants.S_PER_DAY #magic number self._s_since_unstake = 0 self._s_between_unstake", "web3tools.web3util import toBase18 @enforce_types class EWPublisherAgent(PublisherAgent): def __init__(self, name: str,", "state.ss.time_step if self._doCreatePool(): self._s_since_create = 0 self._createPoolAgent(state) if self._doUnstakeOCEAN(state): self._s_since_unstake", "-> bool: if self.OCEAN() < 200.0: #magic number return False", "self._doCreatePool(): self._s_since_create = 0 self._createPoolAgent(state) if self._doUnstakeOCEAN(state): self._s_since_unstake = 0", "DT_bind_amt = 20.0 #magic number DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet) OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet)", "state.addAgent(pool_agent) return pool_agent def _doUnstakeOCEAN(self, state) -> bool: if not", "class EWPublisherAgent(PublisherAgent): def __init__(self, name: str, USD: float, OCEAN: float):", "EWPublisherAgent(PublisherAgent): def __init__(self, name: str, USD: float, OCEAN: float): super().__init__(name,", "def __init__(self, name: str, USD: float, OCEAN: float): super().__init__(name, USD,", "0 self._s_between_create = 7 * constants.S_PER_DAY #magic number self._s_since_unstake =", "constants.S_PER_DAY #magic number def takeStep(self, state) -> None: self._s_since_create +=", "bpool.BPool(pool_address) #bind tokens & add initial liquidity OCEAN_bind_amt = self.OCEAN()", "return False return self._s_since_unstake >= self._s_between_unstake def _unstakeOCEANsomewhere(self, state): \"\"\"Choose", "self._s_since_unstake = 0 self._unstakeOCEANsomewhere(state) def _doCreatePool(self) -> bool: if self.OCEAN()", "= len(state.agents.filterToPool()) dt_name = f'DT{pool_i}' pool_agent_name = f'pool{pool_i}' #new DT", "self.unstakeOCEAN(BPT_unstake, pool_agent.pool) def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken: \"\"\"Create datatoken contract and mint", "contract and mint DTs to self.\"\"\" wallet = self._wallet._web3wallet DT_address", "+= state.ss.time_step if self._doCreatePool(): self._s_since_create = 0 self._createPoolAgent(state) if self._doUnstakeOCEAN(state):", "False return self._s_since_unstake >= self._s_between_unstake def _unstakeOCEANsomewhere(self, state): \"\"\"Choose what", "< 200.0: #magic number return False return self._s_since_create >= self._s_between_create" ]
[ "SXR_COLORS = (\"#000000\", \"#330000\", \"#520000\", \"#850000\", \"#ad0000\", \"#ff0000\") HXR_AREAS =", "3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTH_2\": [3058.457, 3110.961],", "\"DL1_1\": [2024.791, 2031.992], \"DL1_2\": [2031.992, 2035.035], \"L1\": [2035.035, 2044.167], \"BC1\":", "3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTS\": [3177.650, 3224.022],", ". import _version __version__ = _version.get_versions()['version'] HXR_COLORS = (\"#000000\", \"#02004a\",", "\"BC2\": [2410.698, 2438.400], \"L3\": [2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\":", "3117.409], \"BSYH_2\": [3117.409, 3224.022], \"LTUH\": [3224.022, 3562.739], \"UNDH\": [3562.739, 3718.483],", "from . import _version __version__ = _version.get_versions()['version'] HXR_COLORS = (\"#000000\",", "key, value in HXR_AREAS.items()} SXR_AREAS = { \"GUN\" : [2017.911,", "2410.698], \"BC2\": [2410.698, 2438.400], \"L3\": [2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512],", "3765.481] } HXR_AREAS = {np.mean(value): key for key, value in", "\"#850000\", \"#ad0000\", \"#ff0000\") HXR_AREAS = { \"GUN\" : [2017.911, 2018.712],", "(\"#000000\", \"#330000\", \"#520000\", \"#850000\", \"#ad0000\", \"#ff0000\") HXR_AREAS = { \"GUN\"", "\"BSYS\": [3224.022, 3565.656], \"LTUS\": [3565.656, 3718.483], \"UNDS\": [3718.483, 3734.407], \"DMPS_1\":", "\"CLTS\": [3177.650, 3224.022], \"BSYS\": [3224.022, 3565.656], \"LTUS\": [3565.656, 3718.483], \"UNDS\":", "HXR_AREAS = { \"GUN\" : [2017.911, 2018.712], \"L0\" : [2018.712,", "\"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTH_2\": [3058.457, 3110.961], \"BSYH_1\":", "2017.911], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\": [2031.992,", "\"#02004a\", \"#030069\", \"#04008f\", \"#0500b3\", \"#0700ff\") SXR_COLORS = (\"#000000\", \"#330000\", \"#520000\",", "[2044.167, 2059.733], \"L2\": [2059.733, 2410.698], \"BC2\": [2410.698, 2438.400], \"L3\": [2438.400,", "3110.961], \"BSYH_1\": [3110.961, 3117.409], \"BSYH_2\": [3117.409, 3224.022], \"LTUH\": [3224.022, 3562.739],", "[3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTS\": [3177.650, 3224.022], \"BSYS\": [3224.022,", "SXR_AREAS = { \"GUN\" : [2017.911, 2017.911], \"L0\" : [2018.712,", "[3562.739, 3718.483], \"DMPH_1\": [3718.483, 3734.407], \"DMPH_2\": [3734.407, 3765.481] } HXR_AREAS", "\"L1\": [2035.035, 2044.167], \"BC1\": [2044.167, 2059.733], \"L2\": [2059.733, 2410.698], \"BC2\":", "= (\"#000000\", \"#02004a\", \"#030069\", \"#04008f\", \"#0500b3\", \"#0700ff\") SXR_COLORS = (\"#000000\",", "2018.712], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\": [2031.992,", "\"DL1_2\": [2031.992, 2035.035], \"L1\": [2035.035, 2044.167], \"BC1\": [2044.167, 2059.733], \"L2\":", "3718.483], \"UNDS\": [3718.483, 3734.407], \"DMPS_1\": [3734.407, 3734.407], \"DMPS_2\": [3734.407, 3765.481]", "HXR_AREAS.items()} SXR_AREAS = { \"GUN\" : [2017.911, 2017.911], \"L0\" :", "import _version __version__ = _version.get_versions()['version'] HXR_COLORS = (\"#000000\", \"#02004a\", \"#030069\",", "[3224.022, 3562.739], \"UNDH\": [3562.739, 3718.483], \"DMPH_1\": [3718.483, 3734.407], \"DMPH_2\": [3734.407,", "_version.get_versions()['version'] HXR_COLORS = (\"#000000\", \"#02004a\", \"#030069\", \"#04008f\", \"#0500b3\", \"#0700ff\") SXR_COLORS", "_version __version__ = _version.get_versions()['version'] HXR_COLORS = (\"#000000\", \"#02004a\", \"#030069\", \"#04008f\",", "[2017.911, 2017.911], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\":", "= { \"GUN\" : [2017.911, 2018.712], \"L0\" : [2018.712, 2024.791],", "3765.481] } SXR_AREAS = {np.mean(value): key for key, value in", "[2035.035, 2044.167], \"BC1\": [2044.167, 2059.733], \"L2\": [2059.733, 2410.698], \"BC2\": [2410.698,", "[3734.407, 3765.481] } SXR_AREAS = {np.mean(value): key for key, value", "for key, value in HXR_AREAS.items()} SXR_AREAS = { \"GUN\" :", "numpy as np from . import _version __version__ = _version.get_versions()['version']", "3565.656], \"LTUS\": [3565.656, 3718.483], \"UNDS\": [3718.483, 3734.407], \"DMPS_1\": [3734.407, 3734.407],", "\"DMPH_1\": [3718.483, 3734.407], \"DMPH_2\": [3734.407, 3765.481] } HXR_AREAS = {np.mean(value):", "2438.400], \"L3\": [2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457],", "2059.733], \"L2\": [2059.733, 2410.698], \"BC2\": [2410.698, 2438.400], \"L3\": [2438.400, 3042.005],", "\"#520000\", \"#850000\", \"#ad0000\", \"#ff0000\") HXR_AREAS = { \"GUN\" : [2017.911,", "\"DMPS_2\": [3734.407, 3765.481] } SXR_AREAS = {np.mean(value): key for key,", "3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTS\": [3177.650, 3224.022], \"BSYS\": [3224.022, 3565.656],", "} SXR_AREAS = {np.mean(value): key for key, value in SXR_AREAS.items()}", "\"GUN\" : [2017.911, 2017.911], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791,", "\"#ff0000\") HXR_AREAS = { \"GUN\" : [2017.911, 2018.712], \"L0\" :", "} HXR_AREAS = {np.mean(value): key for key, value in HXR_AREAS.items()}", "3562.739], \"UNDH\": [3562.739, 3718.483], \"DMPH_1\": [3718.483, 3734.407], \"DMPH_2\": [3734.407, 3765.481]", "\"#ad0000\", \"#ff0000\") HXR_AREAS = { \"GUN\" : [2017.911, 2018.712], \"L0\"", "[2031.992, 2035.035], \"L1\": [2035.035, 2044.167], \"BC1\": [2044.167, 2059.733], \"L2\": [2059.733,", "HXR_COLORS = (\"#000000\", \"#02004a\", \"#030069\", \"#04008f\", \"#0500b3\", \"#0700ff\") SXR_COLORS =", "3224.022], \"BSYS\": [3224.022, 3565.656], \"LTUS\": [3565.656, 3718.483], \"UNDS\": [3718.483, 3734.407],", "value in HXR_AREAS.items()} SXR_AREAS = { \"GUN\" : [2017.911, 2017.911],", "\"DMPS_1\": [3734.407, 3734.407], \"DMPS_2\": [3734.407, 3765.481] } SXR_AREAS = {np.mean(value):", "(\"#000000\", \"#02004a\", \"#030069\", \"#04008f\", \"#0500b3\", \"#0700ff\") SXR_COLORS = (\"#000000\", \"#330000\",", "[3718.483, 3734.407], \"DMPS_1\": [3734.407, 3734.407], \"DMPS_2\": [3734.407, 3765.481] } SXR_AREAS", "[3734.407, 3765.481] } HXR_AREAS = {np.mean(value): key for key, value", "\"L2\": [2059.733, 2410.698], \"BC2\": [2410.698, 2438.400], \"L3\": [2438.400, 3042.005], \"CLTH_0\":", "3734.407], \"DMPS_2\": [3734.407, 3765.481] } SXR_AREAS = {np.mean(value): key for", "= _version.get_versions()['version'] HXR_COLORS = (\"#000000\", \"#02004a\", \"#030069\", \"#04008f\", \"#0500b3\", \"#0700ff\")", "= {np.mean(value): key for key, value in HXR_AREAS.items()} SXR_AREAS =", "[3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTH_2\": [3058.457, 3110.961], \"BSYH_1\": [3110.961,", "[3734.407, 3734.407], \"DMPS_2\": [3734.407, 3765.481] } SXR_AREAS = {np.mean(value): key", "[2024.791, 2031.992], \"DL1_2\": [2031.992, 2035.035], \"L1\": [2035.035, 2044.167], \"BC1\": [2044.167,", "[2017.911, 2018.712], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\":", "= (\"#000000\", \"#330000\", \"#520000\", \"#850000\", \"#ad0000\", \"#ff0000\") HXR_AREAS = {", "\"L3\": [2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTH_2\":", "import numpy as np from . import _version __version__ =", "as np from . import _version __version__ = _version.get_versions()['version'] HXR_COLORS", "\"#030069\", \"#04008f\", \"#0500b3\", \"#0700ff\") SXR_COLORS = (\"#000000\", \"#330000\", \"#520000\", \"#850000\",", ": [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\": [2031.992, 2035.035], \"L1\":", "[3565.656, 3718.483], \"UNDS\": [3718.483, 3734.407], \"DMPS_1\": [3734.407, 3734.407], \"DMPS_2\": [3734.407,", "[3177.650, 3224.022], \"BSYS\": [3224.022, 3565.656], \"LTUS\": [3565.656, 3718.483], \"UNDS\": [3718.483,", "\"#0500b3\", \"#0700ff\") SXR_COLORS = (\"#000000\", \"#330000\", \"#520000\", \"#850000\", \"#ad0000\", \"#ff0000\")", "\"#330000\", \"#520000\", \"#850000\", \"#ad0000\", \"#ff0000\") HXR_AREAS = { \"GUN\" :", "2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\": [2031.992, 2035.035], \"L1\": [2035.035, 2044.167],", "= { \"GUN\" : [2017.911, 2017.911], \"L0\" : [2018.712, 2024.791],", "\"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTS\": [3177.650, 3224.022], \"BSYS\":", "\"GUN\" : [2017.911, 2018.712], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791,", "\"UNDS\": [3718.483, 3734.407], \"DMPS_1\": [3734.407, 3734.407], \"DMPS_2\": [3734.407, 3765.481] }", "[3050.512, 3058.457], \"CLTH_2\": [3058.457, 3110.961], \"BSYH_1\": [3110.961, 3117.409], \"BSYH_2\": [3117.409,", "\"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\": [2031.992, 2035.035],", "[3110.961, 3117.409], \"BSYH_2\": [3117.409, 3224.022], \"LTUH\": [3224.022, 3562.739], \"UNDH\": [3562.739,", "[2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTH_2\": [3058.457,", "2031.992], \"DL1_2\": [2031.992, 2035.035], \"L1\": [2035.035, 2044.167], \"BC1\": [2044.167, 2059.733],", "\"UNDH\": [3562.739, 3718.483], \"DMPH_1\": [3718.483, 3734.407], \"DMPH_2\": [3734.407, 3765.481] }", "\"CLTH_1\": [3050.512, 3058.457], \"CLTH_2\": [3058.457, 3110.961], \"BSYH_1\": [3110.961, 3117.409], \"BSYH_2\":", ": [2017.911, 2017.911], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992],", "in HXR_AREAS.items()} SXR_AREAS = { \"GUN\" : [2017.911, 2017.911], \"L0\"", "key for key, value in HXR_AREAS.items()} SXR_AREAS = { \"GUN\"", "\"L3\": [2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTS\":", "\"#0700ff\") SXR_COLORS = (\"#000000\", \"#330000\", \"#520000\", \"#850000\", \"#ad0000\", \"#ff0000\") HXR_AREAS", "2044.167], \"BC1\": [2044.167, 2059.733], \"L2\": [2059.733, 2410.698], \"BC2\": [2410.698, 2438.400],", "[3224.022, 3565.656], \"LTUS\": [3565.656, 3718.483], \"UNDS\": [3718.483, 3734.407], \"DMPS_1\": [3734.407,", "[2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTS\": [3177.650,", "3058.457], \"CLTS\": [3177.650, 3224.022], \"BSYS\": [3224.022, 3565.656], \"LTUS\": [3565.656, 3718.483],", "\"BSYH_2\": [3117.409, 3224.022], \"LTUH\": [3224.022, 3562.739], \"UNDH\": [3562.739, 3718.483], \"DMPH_1\":", "np from . import _version __version__ = _version.get_versions()['version'] HXR_COLORS =", "\"LTUS\": [3565.656, 3718.483], \"UNDS\": [3718.483, 3734.407], \"DMPS_1\": [3734.407, 3734.407], \"DMPS_2\":", "{ \"GUN\" : [2017.911, 2017.911], \"L0\" : [2018.712, 2024.791], \"DL1_1\":", "[2059.733, 2410.698], \"BC2\": [2410.698, 2438.400], \"L3\": [2438.400, 3042.005], \"CLTH_0\": [3042.005,", "__version__ = _version.get_versions()['version'] HXR_COLORS = (\"#000000\", \"#02004a\", \"#030069\", \"#04008f\", \"#0500b3\",", "2035.035], \"L1\": [2035.035, 2044.167], \"BC1\": [2044.167, 2059.733], \"L2\": [2059.733, 2410.698],", "[3050.512, 3058.457], \"CLTS\": [3177.650, 3224.022], \"BSYS\": [3224.022, 3565.656], \"LTUS\": [3565.656,", "[2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992], \"DL1_2\": [2031.992, 2035.035], \"L1\": [2035.035,", "\"LTUH\": [3224.022, 3562.739], \"UNDH\": [3562.739, 3718.483], \"DMPH_1\": [3718.483, 3734.407], \"DMPH_2\":", "3058.457], \"CLTH_2\": [3058.457, 3110.961], \"BSYH_1\": [3110.961, 3117.409], \"BSYH_2\": [3117.409, 3224.022],", "3734.407], \"DMPS_1\": [3734.407, 3734.407], \"DMPS_2\": [3734.407, 3765.481] } SXR_AREAS =", "3734.407], \"DMPH_2\": [3734.407, 3765.481] } HXR_AREAS = {np.mean(value): key for", "[3058.457, 3110.961], \"BSYH_1\": [3110.961, 3117.409], \"BSYH_2\": [3117.409, 3224.022], \"LTUH\": [3224.022,", "3718.483], \"DMPH_1\": [3718.483, 3734.407], \"DMPH_2\": [3734.407, 3765.481] } HXR_AREAS =", "[3718.483, 3734.407], \"DMPH_2\": [3734.407, 3765.481] } HXR_AREAS = {np.mean(value): key", "{np.mean(value): key for key, value in HXR_AREAS.items()} SXR_AREAS = {", "[3117.409, 3224.022], \"LTUH\": [3224.022, 3562.739], \"UNDH\": [3562.739, 3718.483], \"DMPH_1\": [3718.483,", "{ \"GUN\" : [2017.911, 2018.712], \"L0\" : [2018.712, 2024.791], \"DL1_1\":", "\"#04008f\", \"#0500b3\", \"#0700ff\") SXR_COLORS = (\"#000000\", \"#330000\", \"#520000\", \"#850000\", \"#ad0000\",", "\"CLTH_2\": [3058.457, 3110.961], \"BSYH_1\": [3110.961, 3117.409], \"BSYH_2\": [3117.409, 3224.022], \"LTUH\":", "3050.512], \"CLTH_1\": [3050.512, 3058.457], \"CLTH_2\": [3058.457, 3110.961], \"BSYH_1\": [3110.961, 3117.409],", "3224.022], \"LTUH\": [3224.022, 3562.739], \"UNDH\": [3562.739, 3718.483], \"DMPH_1\": [3718.483, 3734.407],", "[2410.698, 2438.400], \"L3\": [2438.400, 3042.005], \"CLTH_0\": [3042.005, 3050.512], \"CLTH_1\": [3050.512,", ": [2017.911, 2018.712], \"L0\" : [2018.712, 2024.791], \"DL1_1\": [2024.791, 2031.992],", "HXR_AREAS = {np.mean(value): key for key, value in HXR_AREAS.items()} SXR_AREAS", "\"DMPH_2\": [3734.407, 3765.481] } HXR_AREAS = {np.mean(value): key for key,", "\"BSYH_1\": [3110.961, 3117.409], \"BSYH_2\": [3117.409, 3224.022], \"LTUH\": [3224.022, 3562.739], \"UNDH\":", "\"BC1\": [2044.167, 2059.733], \"L2\": [2059.733, 2410.698], \"BC2\": [2410.698, 2438.400], \"L3\":", "\"CLTH_1\": [3050.512, 3058.457], \"CLTS\": [3177.650, 3224.022], \"BSYS\": [3224.022, 3565.656], \"LTUS\":" ]
[ "self.loss_control = [] # for introspection t0 = time() for", "optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MAIN TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<<", "optimizer) # cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MISC", "TESTS PASSED >>\\n\", 'green') def test_misc(self): # tests of non-main", "Y[batch_num])] print(\"model_custom -- %s batches -- time: %.2f sec\" %", "@staticmethod def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False): if", "before train may fail X, Y = self._make_data(num_batches, *batch_shape) self.eta_history", "# Save/Load, Warm Restarts (w/ cosine annealing) for optimizer_name in", "optimizer = self._make_optimizer(optimizer_name, self.model, total_iterations) self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() #", "= 5 total_iterations = 0 self.model = self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim,", "def _test_control(self, optimizer_name, amsgrad=False, nesterov=False): optimizer_kw = dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad,", "(num_batches, batch_size, timesteps)) else: X = np.random.randn(num_batches, batch_size, timesteps, num_channels)", "'Nadam' not in optimizer_name: optimizer_kw.update({'decay': decay}) if not control_mode: wd_dict", "= optimizer_dict[optimizer_name] optimizer_kw = {} if 'Adam' in optimizer_name: optimizer_kw", "self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control = [] # for introspection t0 =", "embed_input_dim, (num_batches, batch_size, timesteps)) else: X = np.random.randn(num_batches, batch_size, timesteps,", "print(\"TF version: %s\" % tf.__version__) tf_eager = bool(os.environ[\"TF_EAGER\"] == \"True\")", "'sparse_categorical_crossentropy' reset_seeds(verbose=0) X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K,", "del optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0) def _test_save_load(self, model, X, optimizer_name,", "('Adam', 'Nadam', 'SGD')]): return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations,", "ipt = Input(batch_shape=batch_shape) if sparse: x = Embedding(embed_input_dim, embed_input_dim*3 +", "= self._make_model(**model_kw) optimizer_custom = self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom", "lmw in zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8)) for sow,", "MISC TESTS PASSED >>\\n\", 'green') def test_control(self): # tests losses", "introspection t0 = time() for batch_num in range(num_batches): self.loss_custom +=", "( 1 + np.cos(np.pi*iteration / total_iterations))) return np.allclose(eta_history, eta_history_simul, rtol=0,", "test_misc(self): # tests of non-main features to improve coverage for", "Embedding from .. import Model, load_model from .. import l2", "decay}) if not control_mode: wd_dict = get_weight_decays(model) l2_extra = [2e-5]*(len(wd_dict)", "saved_model_preds = model.predict(X[0]) saved_model_weights = K.batch_get_value(model.trainable_weights) saved_optim_weights = K.batch_get_value(model.optimizer.weights) test_name", "'test__%f{}.h5'.format(np.random.random()) modelpath = os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath) del model model =", "Model, load_model from .. import l2 from .. import maxnorm", "embed_input_dim=None, sparse=False): if dense_constraint is not None: dense_constraint = maxnorm(dense_constraint)", "'SGDW'} num_batches = 4 batch_size, timesteps = 16, 8 batch_shape", "batch_num in range(num_batches): self.loss_custom += [self.model_custom.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_custom --", "decay=0, amsgrad=False, nesterov=False, control_mode=False): optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW,", "train may fail X, Y = self._make_data(num_batches, *batch_shape) self.eta_history =", "5 total_iterations = 0 model_kw = dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim,", "in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue')", "annealing) for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {}", "running in graph mode\") class TestOptimizers(TestCase): def test_all(self): # Save/Load,", "range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup del", ">>\".format(optimizer_name), 'blue') pass_txt = \"Control Test Passed\" if optimizer_name ==", "num_channels = 16, 8, 4 batch_shape = (batch_size, timesteps, num_channels)", "= {'total_iterations': 0, 'decay': 1e-3, 'amsgrad': optimizer_name == 'AdamW', 'nesterov':", "amsgrad=False, nesterov=False, control_mode=False): optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW':", "self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1 + int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history,", "**optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom = [] # for introspection t0", "zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8)) @staticmethod def _make_data(num_batches, batch_size,", "print(\"model_custom -- %s batches -- time: %.2f sec\" % (num_batches,", "may fail X, Y = self._make_data(num_batches, *batch_shape) self.eta_history = []", "activation='sigmoid')(x) return Model(ipt, out) @staticmethod def _make_optimizer(optimizer_name, model, total_iterations, decay=0,", "'SGD')]): return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations, **optimizer_kw) else:", "x = Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if sparse: out = Dense(2,", "AdamW, NadamW, SGDW from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval", "for stop-introspection for epoch in range(num_epochs): for batch_num in range(num_batches):", "num_batches = 4 batch_size, timesteps = 16, 8 batch_shape =", "self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MISC TEST PASSED >>\\n\".format(optimizer_name), 'green')", "batch_size, timesteps)) else: X = np.random.randn(num_batches, batch_size, timesteps, num_channels) Y", "'Adam' in optimizer_name: optimizer_kw = {'amsgrad': amsgrad} elif 'SGD' in", "= 100 batch_size, timesteps = 16, 32 batch_shape = (batch_size,", "[1e-5, 1e-5, 1e-6] + l2_extra) lr_m = {'gru': 0.5} use_cosine_annealing", "from .. import AdamW, NadamW, SGDW from .. import get_weight_decays,", "else: wd, lr_m = None, None use_cosine_annealing = False if", "for epoch in range(num_epochs): for batch_num in range(num_batches): self.t_cur_history +=", "sparse: out = Dense(2, activation='softmax')(x) else: out = Dense(1, activation='sigmoid')(x)", "reset_seeds, K_eval print(\"TF version: %s\" % tf.__version__) tf_eager = bool(os.environ[\"TF_EAGER\"]", ".. import Adam, Nadam, SGD from .. import AdamW, NadamW,", "batch_size, timesteps, num_channels=None, embed_input_dim=None, sparse=False): if sparse: X = np.random.randint(0,", "% tf.__version__) tf_eager = bool(os.environ[\"TF_EAGER\"] == \"True\") if tf_eager: print(\"TF", "= bool(os.environ[\"TF_EAGER\"] == \"True\") if tf_eager: print(\"TF running eagerly\") else:", "== 'NadamW': self._test_control(optimizer_name) elif optimizer_name == 'SGDW': for nesterov in", "Y[batch_num])] print(\"model_control -- %s batches -- time: %.2f sec\" %", "cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() optimizer_kw = {'total_iterations':", "**optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control = [] # for introspection t0", "load_model(modelpath, custom_objects={optimizer_name: optimizer}) loaded_model_preds = model.predict(X[0]) loaded_model_weights = K.batch_get_value(model.trainable_weights) loaded_optim_weights", "self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8)) @staticmethod def _make_data(num_batches, batch_size, timesteps, num_channels=None,", "optimizer_name: optimizer_kw = {'amsgrad': amsgrad} elif 'SGD' in optimizer_name: optimizer_kw", "timesteps = 16, 8 batch_shape = (batch_size, timesteps) embed_input_dim =", "= \"Control Test Passed\" if optimizer_name == 'AdamW': for amsgrad", "print(\"TF running in graph mode\") class TestOptimizers(TestCase): def test_all(self): #", "low, rtol=0, atol=1e-8)) @staticmethod def _make_data(num_batches, batch_size, timesteps, num_channels=None, embed_input_dim=None,", "X, Y @staticmethod def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True, dense_constraint=None, embed_input_dim=None,", "to improve coverage for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<<", "False]: self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>> AdamW amsgrad={} {}\".format(amsgrad, pass_txt)) elif optimizer_name", "32 batch_shape = (batch_size, timesteps) embed_input_dim = 5 total_iterations =", "reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control = self._make_model(**model_kw) optimizer_control = self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw)", "nesterov=False): optimizer_kw = dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad, nesterov=nesterov, control_mode=True) num_batches =", "= K.batch_get_value(model.optimizer.weights) test_name = 'test__%f{}.h5'.format(np.random.random()) modelpath = os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath)", "test_name = 'test__%f{}.h5'.format(np.random.random()) modelpath = os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath) del model", "optimizer_name, optimizer): saved_model_preds = model.predict(X[0]) saved_model_weights = K.batch_get_value(model.trainable_weights) saved_optim_weights =", "nesterov, 'momentum': .9} if 'Nadam' not in optimizer_name: optimizer_kw.update({'decay': decay})", "Dense, GRU, Bidirectional, Embedding from .. import Model, load_model from", "not any([optimizer_name == name for name in ('Adam', 'Nadam', 'SGD')]):", "wd in weight_decays.values(): trues += (wd != 0) return (trues", "load_model from .. import l2 from .. import maxnorm from", "- t0)) loss_diff = np.abs(np.array(self.loss_custom) - np.array(self.loss_control)) print(\"%s max loss", "smw, lmw in zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8)) for", "any([optimizer_name == name for name in ('Adam', 'Nadam', 'SGD')]): return", "import cprint from unittest import TestCase from .. import K", "# cleanup del self.model_custom, self.model_control del optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0)", "'decay': 1e-3, 'amsgrad': optimizer_name == 'AdamW', 'nesterov': optimizer_name == 'SGDW'}", "self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom = [] # for", "fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra) lr_m = {'gru': 0.5}", "= gru(x) x = Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if sparse: out", "num_epochs)) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup del self.model, optimizer", "mode\") class TestOptimizers(TestCase): def test_all(self): # Save/Load, Warm Restarts (w/", "= self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) for batch_num in range(num_batches): self.model.train_on_batch(X[batch_num],", "out) @staticmethod def _make_optimizer(optimizer_name, model, total_iterations, decay=0, amsgrad=False, nesterov=False, control_mode=False):", "_valid_cosine_annealing(eta_history, total_iterations, num_epochs): eta_history_simul = [] for epoch in range(num_epochs):", "_make_data(num_batches, batch_size, timesteps, num_channels=None, embed_input_dim=None, sparse=False): if sparse: X =", "TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MAIN TESTS PASSED >>\\n\",", "if 'Adam' in optimizer_name: optimizer_kw = {'amsgrad': amsgrad} elif 'SGD'", "self.loss_custom += [self.model_custom.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_custom -- %s batches --", "test_name) model.save(modelpath) del model model = load_model(modelpath, custom_objects={optimizer_name: optimizer}) loaded_model_preds", "'amsgrad': optimizer_name == 'AdamW', 'nesterov': optimizer_name == 'SGDW'} num_batches =", "lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations, **optimizer_kw) else: return optimizer(lr=1e-4, **optimizer_kw) @staticmethod", "Model(ipt, out) @staticmethod def _make_optimizer(optimizer_name, model, total_iterations, decay=0, amsgrad=False, nesterov=False,", "= (batch_size, timesteps) embed_input_dim = 5 total_iterations = 0 model_kw", "import time from termcolor import cprint from unittest import TestCase", "tests of non-main features to improve coverage for optimizer_name in", "AdamW amsgrad={} {}\".format(amsgrad, pass_txt)) elif optimizer_name == 'NadamW': self._test_control(optimizer_name) elif", "self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0, atol=1e-3)) # cleanup del self.model_custom, self.model_control del", "for epoch in range(num_epochs): for iteration in range(0, total_iterations): eta_history_simul.append(0.5", "reset_seeds() optimizer_kw = {'total_iterations': 0, 'decay': 1e-3, 'amsgrad': optimizer_name ==", "PASSED >>\\n\", 'green') def test_misc(self): # tests of non-main features", "self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) for", "kernel_constraint=dense_constraint)(x) if sparse: out = Dense(2, activation='softmax')(x) else: out =", "cprint(\"\\n<< ALL CONTROL TESTS PASSED >>\\n\", 'green') def _test_control(self, optimizer_name,", "= self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4, bidirectional=False, sparse=True) optimizer =", "for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER", "self.model, total_iterations) self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() # else K.eval before", "*batch_shape) self.eta_history = [] # for stop-introspection self.t_cur_history = []", "cprint(\"\\n<< {} MISC TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MISC", "+= [self.model_custom.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_custom -- %s batches -- time:", ".. import AdamW, NadamW, SGDW from .. import get_weight_decays, fill_dict_in_order,", "8, 4 batch_shape = (batch_size, timesteps, num_channels) total_iterations = num_batches", "rtol=0, atol=1e-8)) for smw, lmw in zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw, lmw,", "(batch_size, timesteps, num_channels) total_iterations = num_batches # due to warm", "{} {} >>\\n\".format(o_name, pass_txt.upper()), 'green') cprint(\"\\n<< ALL CONTROL TESTS PASSED", "optimizer_name == 'AdamW', 'nesterov': optimizer_name == 'SGDW'} num_batches = 4", "X[batch_num], Y[batch_num])] print(\"model_control -- %s batches -- time: %.2f sec\"", "'AdamW': for amsgrad in [True, False]: self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>> AdamW", "import tensorflow as tf from time import time from termcolor", "return X, Y @staticmethod def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True, dense_constraint=None,", "for name in ('Adam', 'Nadam', 'SGD')]): return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m,", "loss=loss_name) self.loss_control = [] # for introspection t0 = time()", "**optimizer_kw) else: return optimizer(lr=1e-4, **optimizer_kw) @staticmethod def _valid_weight_decays(model): weight_decays =", "amsgrad=amsgrad) print(\"\\n>> AdamW amsgrad={} {}\".format(amsgrad, pass_txt)) elif optimizer_name == 'NadamW':", "batch_size, timesteps = 16, 8 batch_shape = (batch_size, timesteps) embed_input_dim", "atol=1e-8)) @staticmethod def _make_data(num_batches, batch_size, timesteps, num_channels=None, embed_input_dim=None, sparse=False): if", "in range(0, total_iterations): eta_history_simul.append(0.5 * ( 1 + np.cos(np.pi*iteration /", "-- %s batches -- time: %.2f sec\" % (num_batches, time()", "reset_seeds() num_batches, num_epochs = 25, 4 batch_size, timesteps, num_channels =", "model, total_iterations, decay=0, amsgrad=False, nesterov=False, control_mode=False): optimizer_dict = {'AdamW': AdamW,", "X, optimizer_name, optimizer): saved_model_preds = model.predict(X[0]) saved_model_weights = K.batch_get_value(model.trainable_weights) saved_optim_weights", "+ 1, mask_zero=True)(ipt) else: x = ipt gru = GRU(4,", "self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() # else K.eval before train may", "cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') pass_txt = \"Control Test", "timesteps, num_channels) total_iterations = num_batches # due to warm restarts", "activation='softmax')(x) else: out = Dense(1, activation='sigmoid')(x) return Model(ipt, out) @staticmethod", "= self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom = self._make_model(**model_kw)", "control_mode=True) num_batches = 100 batch_size, timesteps = 16, 32 batch_shape", "'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() optimizer_kw", "optimizer_name, optimizer) # cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {}", "in optimizer_name: optimizer_kw = {'amsgrad': amsgrad} elif 'SGD' in optimizer_name:", "SGDW, 'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD} optimizer = optimizer_dict[optimizer_name]", "optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name),", "losses against original optimizers' for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:", "to warm restarts self.model = self._make_model(batch_shape, total_iterations) optimizer = self._make_optimizer(optimizer_name,", "in zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8)) @staticmethod def _make_data(num_batches,", "False]: self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>> SGDW nesterov={} {}\".format(nesterov, pass_txt)) o_name =", "loss_diff = np.abs(np.array(self.loss_custom) - np.array(self.loss_control)) print(\"%s max loss diff: %e\"", "t0)) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control = self._make_model(**model_kw) optimizer_control = self._make_optimizer(optimizer_name[:-1], self.model_control,", "use_cosine_annealing = False if not any([optimizer_name == name for name", "self.eta_history = [] # for stop-introspection self.t_cur_history = [] #", "verbose=0) def _test_save_load(self, model, X, optimizer_name, optimizer): saved_model_preds = model.predict(X[0])", "optimizer) # cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MAIN", "= 0 model_kw = dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False,", "def _valid_cosine_annealing(eta_history, total_iterations, num_epochs): eta_history_simul = [] for epoch in", "(num_batches, time() - t0)) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control = self._make_model(**model_kw) optimizer_control", "= Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if sparse: out = Dense(2, activation='softmax')(x)", "ALL MAIN TESTS PASSED >>\\n\", 'green') def test_misc(self): # tests", "from .. import K from .. import Input, Dense, GRU,", "custom_objects={optimizer_name: optimizer}) loaded_model_preds = model.predict(X[0]) loaded_model_weights = K.batch_get_value(model.trainable_weights) loaded_optim_weights =", "x = Embedding(embed_input_dim, embed_input_dim*3 + 1, mask_zero=True)(ipt) else: x =", "== \"True\") if tf_eager: print(\"TF running eagerly\") else: tf.compat.v1.disable_eager_execution() print(\"TF", "from time import time from termcolor import cprint from unittest", "= K.batch_get_value(model.trainable_weights) saved_optim_weights = K.batch_get_value(model.optimizer.weights) test_name = 'test__%f{}.h5'.format(np.random.random()) modelpath =", "nesterov={} {}\".format(nesterov, pass_txt)) o_name = optimizer_name cprint(\"\\n<< {} {} >>\\n\".format(o_name,", "sparse=False): if sparse: X = np.random.randint(0, embed_input_dim, (num_batches, batch_size, timesteps))", "# due to warm restarts self.model = self._make_model(batch_shape, total_iterations) optimizer", "numpy as np import tensorflow as tf from time import", "sparse=True) for batch_num in range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model, X, optimizer_name,", "= 16, 32 batch_shape = (batch_size, timesteps) embed_input_dim = 5", "== 'SGDW': for nesterov in [True, False]: self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>>", "termcolor import cprint from unittest import TestCase from .. import", "X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom", "total_iterations = 0 model_kw = dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0,", "diff: %e\" % (optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0, atol=1e-3)) #", "{} MISC TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MISC TESTS", "num_epochs = 25, 4 batch_size, timesteps, num_channels = 16, 8,", "verbose=0) self.model_control = self._make_model(**model_kw) optimizer_control = self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control,", "self._make_optimizer(optimizer_name, self.model, **optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X, Y = self._make_data(num_batches, *batch_shape,", "_test_save_load(self, model, X, optimizer_name, optimizer): saved_model_preds = model.predict(X[0]) saved_model_weights =", "reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MISC TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL", "{} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() num_batches, num_epochs = 25, 4", "= self._make_model(batch_shape, total_iterations) optimizer = self._make_optimizer(optimizer_name, self.model, total_iterations) self.model.compile(optimizer, loss='binary_crossentropy')", "from .. import Input, Dense, GRU, Bidirectional, Embedding from ..", "dense_constraint=None, embed_input_dim=None, sparse=False): if dense_constraint is not None: dense_constraint =", "% (num_batches, time() - t0)) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control = self._make_model(**model_kw)", "optimizer): saved_model_preds = model.predict(X[0]) saved_model_weights = K.batch_get_value(model.trainable_weights) saved_optim_weights = K.batch_get_value(model.optimizer.weights)", "self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-8)) for smw, lmw in zip(saved_model_weights, loaded_model_weights):", "optimizer_custom = self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom = []", "{} if 'Adam' in optimizer_name: optimizer_kw = {'amsgrad': amsgrad} elif", "= [] for epoch in range(num_epochs): for iteration in range(0,", "optimizer_name, amsgrad=False, nesterov=False): optimizer_kw = dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad, nesterov=nesterov, control_mode=True)", "verbose=0) self.model_custom = self._make_model(**model_kw) optimizer_custom = self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom,", "control_mode: wd_dict = get_weight_decays(model) l2_extra = [2e-5]*(len(wd_dict) - 3) wd", "as np import tensorflow as tf from time import time", "optimizers' for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {}", "amsgrad} elif 'SGD' in optimizer_name: optimizer_kw = {'nesterov': nesterov, 'momentum':", "== 0) @staticmethod def _valid_cosine_annealing(eta_history, total_iterations, num_epochs): eta_history_simul = []", "TESTS PASSED >>\\n\", 'green') def _test_control(self, optimizer_name, amsgrad=False, nesterov=False): optimizer_kw", "saved_model_weights = K.batch_get_value(model.trainable_weights) saved_optim_weights = K.batch_get_value(model.optimizer.weights) test_name = 'test__%f{}.h5'.format(np.random.random()) modelpath", "for stop-introspection self.t_cur_history = [] # for stop-introspection for epoch", "atol=1e-8)) for smw, lmw in zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw, lmw, rtol=0,", "not None: dense_constraint = maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape) if sparse:", "total_iterations, num_epochs): eta_history_simul = [] for epoch in range(num_epochs): for", "= 25, 4 batch_size, timesteps, num_channels = 16, 8, 4", "eta_history_simul.append(0.5 * ( 1 + np.cos(np.pi*iteration / total_iterations))) return np.allclose(eta_history,", "def test_misc(self): # tests of non-main features to improve coverage", "else: x = ipt gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if", "t0 = time() for batch_num in range(num_batches): self.loss_control += [self.model_control.train_on_batch(", "def _make_data(num_batches, batch_size, timesteps, num_channels=None, embed_input_dim=None, sparse=False): if sparse: X", "[K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1 + int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs))", "-- time: %.2f sec\" % (num_batches, time() - t0)) loss_diff", "[self.model_custom.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_custom -- %s batches -- time: %.2f", "'SGD' in optimizer_name: optimizer_kw = {'nesterov': nesterov, 'momentum': .9} if", "unittest import TestCase from .. import K from .. import", "+= [K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1 + int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations,", "ALL CONTROL TESTS PASSED >>\\n\", 'green') def _test_control(self, optimizer_name, amsgrad=False,", "return (trues == 0) @staticmethod def _valid_cosine_annealing(eta_history, total_iterations, num_epochs): eta_history_simul", "(num_batches, batch_size)) return X, Y @staticmethod def _make_model(batch_shape, total_iterations, l2_reg=0,", "Y @staticmethod def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False):", "self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs)) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup del", "fill_dict_in_order, reset_seeds, K_eval print(\"TF version: %s\" % tf.__version__) tf_eager =", "bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False): if dense_constraint is not None: dense_constraint", "self.model_control del optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0) def _test_save_load(self, model, X,", "loaded_optim_weights = K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-8)) for smw, lmw", "gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if bidirectional: x = Bidirectional(gru)(x)", "Test Passed\" if optimizer_name == 'AdamW': for amsgrad in [True,", "= {'amsgrad': amsgrad} elif 'SGD' in optimizer_name: optimizer_kw = {'nesterov':", "rtol=0, atol=1e-3)) # cleanup del self.model_custom, self.model_control del optimizer_custom, optimizer_control", "loaded_model_preds, rtol=0, atol=1e-8)) for smw, lmw in zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw,", "% (num_batches, time() - t0)) loss_diff = np.abs(np.array(self.loss_custom) - np.array(self.loss_control))", "os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath) del model model = load_model(modelpath, custom_objects={optimizer_name: optimizer})", ">>\\n\", 'green') def _test_control(self, optimizer_name, amsgrad=False, nesterov=False): optimizer_kw = dict(total_iterations=0,", "Bidirectional(gru)(x) else: x = gru(x) x = Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x)", "zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8)) for sow, low in", "graph mode\") class TestOptimizers(TestCase): def test_all(self): # Save/Load, Warm Restarts", "from .. import l2 from .. import maxnorm from ..", "pass_txt)) elif optimizer_name == 'NadamW': self._test_control(optimizer_name) elif optimizer_name == 'SGDW':", "self.t_cur_history = [] # for stop-introspection for epoch in range(num_epochs):", "total_iterations, decay=0, amsgrad=False, nesterov=False, control_mode=False): optimizer_dict = {'AdamW': AdamW, 'NadamW':", "AdamW, 'NadamW': NadamW, 'SGDW': SGDW, 'Adam': Adam, 'Nadam': Nadam, 'SGD':", "0, 'decay': 1e-3, 'amsgrad': optimizer_name == 'AdamW', 'nesterov': optimizer_name ==", "weight_decays = get_weight_decays(model) trues = 0 for wd in weight_decays.values():", "self._make_optimizer(optimizer_name, self.model, total_iterations) self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() # else K.eval", "test_control(self): # tests losses against original optimizers' for optimizer_name in", "# tests losses against original optimizers' for optimizer_name in ['AdamW',", "= 4 batch_size, timesteps = 16, 8 batch_shape = (batch_size,", "Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if sparse: out = Dense(2, activation='softmax')(x) else:", "batches -- time: %.2f sec\" % (num_batches, time() - t0))", "original optimizers' for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING", "'green') cprint(\"\\n<< ALL MISC TESTS PASSED >>\\n\", 'green') def test_control(self):", "+= [K_eval(self.model.optimizer.t_cur, K)] self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history", "TestCase from .. import K from .. import Input, Dense,", "Nadam, 'SGD': SGD} optimizer = optimizer_dict[optimizer_name] optimizer_kw = {} if", "0 for wd in weight_decays.values(): trues += (wd != 0)", "optimizer_kw = {'amsgrad': amsgrad} elif 'SGD' in optimizer_name: optimizer_kw =", "'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') pass_txt =", "model.predict(X[0]) saved_model_weights = K.batch_get_value(model.trainable_weights) saved_optim_weights = K.batch_get_value(model.optimizer.weights) test_name = 'test__%f{}.h5'.format(np.random.random())", "- np.array(self.loss_control)) print(\"%s max loss diff: %e\" % (optimizer_name, np.max(loss_diff)))", "# for stop-introspection for epoch in range(num_epochs): for batch_num in", "from .. import maxnorm from .. import Adam, Nadam, SGD", "'green') cprint(\"\\n<< ALL MAIN TESTS PASSED >>\\n\", 'green') def test_misc(self):", "return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations, **optimizer_kw) else: return", "loss='sparse_categorical_crossentropy') X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) for batch_num", "= 16, 8 batch_shape = (batch_size, timesteps) embed_input_dim = 5", "warm restarts self.model = self._make_model(batch_shape, total_iterations) optimizer = self._make_optimizer(optimizer_name, self.model,", "self.eta_history.pop(-(1 + int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs)) self._test_save_load(self.model, X,", "K_eval print(\"TF version: %s\" % tf.__version__) tf_eager = bool(os.environ[\"TF_EAGER\"] ==", "{'total_iterations': 0, 'decay': 1e-3, 'amsgrad': optimizer_name == 'AdamW', 'nesterov': optimizer_name", "self.model = self._make_model(batch_shape, total_iterations) optimizer = self._make_optimizer(optimizer_name, self.model, total_iterations) self.model.compile(optimizer,", "0.5} use_cosine_annealing = True else: wd, lr_m = None, None", "t0)) loss_diff = np.abs(np.array(self.loss_custom) - np.array(self.loss_control)) print(\"%s max loss diff:", "Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) for batch_num in range(num_batches):", "weight_decays.values(): trues += (wd != 0) return (trues == 0)", "(batch_size, timesteps) embed_input_dim = 5 total_iterations = 0 self.model =", "eta_history_simul = [] for epoch in range(num_epochs): for iteration in", "= (batch_size, timesteps, num_channels) total_iterations = num_batches # due to", "t0 = time() for batch_num in range(num_batches): self.loss_custom += [self.model_custom.train_on_batch(", "num_batches # due to warm restarts self.model = self._make_model(batch_shape, total_iterations)", "batch_size, timesteps = 16, 32 batch_shape = (batch_size, timesteps) embed_input_dim", "batch_size)) return X, Y @staticmethod def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True,", "if not control_mode: wd_dict = get_weight_decays(model) l2_extra = [2e-5]*(len(wd_dict) -", "= K.batch_get_value(model.trainable_weights) loaded_optim_weights = K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-8)) for", "features to improve coverage for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:", "= self._make_optimizer(optimizer_name, self.model, **optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X, Y = self._make_data(num_batches,", "for sow, low in zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8))", "stop-introspection self.t_cur_history = [] # for stop-introspection for epoch in", "'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() num_batches, num_epochs", "= 0 for wd in weight_decays.values(): trues += (wd !=", ">>\\n\".format(o_name, pass_txt.upper()), 'green') cprint(\"\\n<< ALL CONTROL TESTS PASSED >>\\n\", 'green')", "timesteps, num_channels) Y = np.random.randint(0, 2, (num_batches, batch_size)) return X,", "_valid_weight_decays(model): weight_decays = get_weight_decays(model) trues = 0 for wd in", "self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom = self._make_model(**model_kw) optimizer_custom", "TESTS PASSED >>\\n\", 'green') def test_control(self): # tests losses against", "loaded_model_weights): self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8)) for sow, low in zip(saved_optim_weights,", "batch_num in range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model, X, optimizer_name, optimizer) #", "== 'SGDW'} num_batches = 4 batch_size, timesteps = 16, 8", "timesteps = 16, 32 batch_shape = (batch_size, timesteps) embed_input_dim =", "import AdamW, NadamW, SGDW from .. import get_weight_decays, fill_dict_in_order, reset_seeds,", "trues = 0 for wd in weight_decays.values(): trues += (wd", "'SGDW': for nesterov in [True, False]: self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>> SGDW", "bidirectional: x = Bidirectional(gru)(x) else: x = gru(x) x =", "CONTROL TESTS PASSED >>\\n\", 'green') def _test_control(self, optimizer_name, amsgrad=False, nesterov=False):", "optimizer = self._make_optimizer(optimizer_name, self.model, **optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X, Y =", "= maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape) if sparse: x = Embedding(embed_input_dim,", "(optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0, atol=1e-3)) # cleanup del self.model_custom,", "of non-main features to improve coverage for optimizer_name in ['AdamW',", "== 'AdamW', 'nesterov': optimizer_name == 'SGDW'} num_batches = 4 batch_size,", "optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0) def _test_save_load(self, model, X, optimizer_name, optimizer):", "print(\"\\n>> SGDW nesterov={} {}\".format(nesterov, pass_txt)) o_name = optimizer_name cprint(\"\\n<< {}", "optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0) def _test_save_load(self, model, X, optimizer_name, optimizer): saved_model_preds", "= Input(batch_shape=batch_shape) if sparse: x = Embedding(embed_input_dim, embed_input_dim*3 + 1,", "= 5 total_iterations = 0 model_kw = dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations,", "num_channels=None, embed_input_dim=None, sparse=False): if sparse: X = np.random.randint(0, embed_input_dim, (num_batches,", "= np.random.randint(0, 2, (num_batches, batch_size)) return X, Y @staticmethod def", "wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra) lr_m =", "pass_txt)) o_name = optimizer_name cprint(\"\\n<< {} {} >>\\n\".format(o_name, pass_txt.upper()), 'green')", "model model = load_model(modelpath, custom_objects={optimizer_name: optimizer}) loaded_model_preds = model.predict(X[0]) loaded_model_weights", "Y = np.random.randint(0, 2, (num_batches, batch_size)) return X, Y @staticmethod", "[] # for stop-introspection for epoch in range(num_epochs): for batch_num", "4 batch_size, timesteps = 16, 8 batch_shape = (batch_size, timesteps)", "{}\".format(amsgrad, pass_txt)) elif optimizer_name == 'NadamW': self._test_control(optimizer_name) elif optimizer_name ==", "% (optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0, atol=1e-3)) # cleanup del", "K.eval before train may fail X, Y = self._make_data(num_batches, *batch_shape)", "del model model = load_model(modelpath, custom_objects={optimizer_name: optimizer}) loaded_model_preds = model.predict(X[0])", "optimizer}) loaded_model_preds = model.predict(X[0]) loaded_model_weights = K.batch_get_value(model.trainable_weights) loaded_optim_weights = K.batch_get_value(model.optimizer.weights)", "in zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8)) for sow, low", "self.model._make_train_function() # else K.eval before train may fail X, Y", "- t0)) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control = self._make_model(**model_kw) optimizer_control = self._make_optimizer(optimizer_name[:-1],", "= K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-8)) for smw, lmw in", "name in ('Adam', 'Nadam', 'SGD')]): return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing,", "= os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath) del model model = load_model(modelpath, custom_objects={optimizer_name:", "loaded_optim_weights): self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8)) @staticmethod def _make_data(num_batches, batch_size, timesteps,", "num_batches = 100 batch_size, timesteps = 16, 32 batch_shape =", "optimizer_kw = {'nesterov': nesterov, 'momentum': .9} if 'Nadam' not in", "test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing) for optimizer_name", "= time() for batch_num in range(num_batches): self.loss_custom += [self.model_custom.train_on_batch( X[batch_num],", "'NadamW': NadamW, 'SGDW': SGDW, 'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD}", "Bidirectional, Embedding from .. import Model, load_model from .. import", "amsgrad=amsgrad, nesterov=nesterov, control_mode=True) num_batches = 100 batch_size, timesteps = 16,", "if sparse: X = np.random.randint(0, embed_input_dim, (num_batches, batch_size, timesteps)) else:", "25, 4 batch_size, timesteps, num_channels = 16, 8, 4 batch_shape", "optimizer_name == 'NadamW': self._test_control(optimizer_name) elif optimizer_name == 'SGDW': for nesterov", "version: %s\" % tf.__version__) tf_eager = bool(os.environ[\"TF_EAGER\"] == \"True\") if", "= ipt gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if bidirectional: x", "cprint from unittest import TestCase from .. import K from", "recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if bidirectional: x = Bidirectional(gru)(x) else: x =", "cprint(\"\\n<< {} MAIN TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MAIN", "'SGDW': SGDW, 'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD} optimizer =", "decay=1e-3, amsgrad=amsgrad, nesterov=nesterov, control_mode=True) num_batches = 100 batch_size, timesteps =", "= {'nesterov': nesterov, 'momentum': .9} if 'Nadam' not in optimizer_name:", "print(\"TF running eagerly\") else: tf.compat.v1.disable_eager_execution() print(\"TF running in graph mode\")", "Restarts (w/ cosine annealing) for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:", "{} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() optimizer_kw = {'total_iterations': 0, 'decay':", "!= 0) return (trues == 0) @staticmethod def _valid_cosine_annealing(eta_history, total_iterations,", "timesteps) embed_input_dim = 5 total_iterations = 0 model_kw = dict(batch_shape=batch_shape,", "self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control = [] # for", "np.random.randint(0, 2, (num_batches, batch_size)) return X, Y @staticmethod def _make_model(batch_shape,", "del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MISC TEST PASSED >>\\n\".format(optimizer_name),", "np.random.randn(num_batches, batch_size, timesteps, num_channels) Y = np.random.randint(0, 2, (num_batches, batch_size))", "batch_shape = (batch_size, timesteps, num_channels) total_iterations = num_batches # due", "not in optimizer_name: optimizer_kw.update({'decay': decay}) if not control_mode: wd_dict =", "= Dense(1, activation='sigmoid')(x) return Model(ipt, out) @staticmethod def _make_optimizer(optimizer_name, model,", "self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>> AdamW amsgrad={} {}\".format(amsgrad, pass_txt)) elif optimizer_name ==", "get_weight_decays(model) l2_extra = [2e-5]*(len(wd_dict) - 3) wd = fill_dict_in_order(wd_dict, [1e-5,", "in graph mode\") class TestOptimizers(TestCase): def test_all(self): # Save/Load, Warm", "optimizer_kw = {} if 'Adam' in optimizer_name: optimizer_kw = {'amsgrad':", "if 'Nadam' not in optimizer_name: optimizer_kw.update({'decay': decay}) if not control_mode:", "TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MISC TESTS PASSED >>\\n\",", "'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') pass_txt = \"Control", "np import tensorflow as tf from time import time from", "'green') def _test_control(self, optimizer_name, amsgrad=False, nesterov=False): optimizer_kw = dict(total_iterations=0, decay=1e-3,", "# for introspection t0 = time() for batch_num in range(num_batches):", "= [] # for introspection t0 = time() for batch_num", "'blue') reset_seeds() optimizer_kw = {'total_iterations': 0, 'decay': 1e-3, 'amsgrad': optimizer_name", "sparse=True) optimizer = self._make_optimizer(optimizer_name, self.model, **optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X, Y", "loss diff: %e\" % (optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0, atol=1e-3))", "'green') def test_misc(self): # tests of non-main features to improve", "from .. import Model, load_model from .. import l2 from", "return optimizer(lr=1e-4, **optimizer_kw) @staticmethod def _valid_weight_decays(model): weight_decays = get_weight_decays(model) trues", "K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-8)) for smw, lmw in zip(saved_model_weights,", "**optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True)", "16, 32 batch_shape = (batch_size, timesteps) embed_input_dim = 5 total_iterations", "for wd in weight_decays.values(): trues += (wd != 0) return", "= np.abs(np.array(self.loss_custom) - np.array(self.loss_control)) print(\"%s max loss diff: %e\" %", "TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() num_batches, num_epochs = 25,", "num_channels) Y = np.random.randint(0, 2, (num_batches, batch_size)) return X, Y", "= [2e-5]*(len(wd_dict) - 3) wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6]", "time: %.2f sec\" % (num_batches, time() - t0)) loss_diff =", "cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MISC TEST PASSED", "for batch_num in range(num_batches): self.loss_control += [self.model_control.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_control", "+= (wd != 0) return (trues == 0) @staticmethod def", "+ int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs)) self._test_save_load(self.model, X, optimizer_name,", "for iteration in range(0, total_iterations): eta_history_simul.append(0.5 * ( 1 +", "kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if sparse: out = Dense(2, activation='softmax')(x) else: out", "['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds()", "np.random.randint(0, embed_input_dim, (num_batches, batch_size, timesteps)) else: X = np.random.randn(num_batches, batch_size,", "for batch_num in range(num_batches): self.loss_custom += [self.model_custom.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_custom", "batch_num in range(num_batches): self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)] self.eta_history += [K_eval(self.model.optimizer.eta_t,", "= load_model(modelpath, custom_objects={optimizer_name: optimizer}) loaded_model_preds = model.predict(X[0]) loaded_model_weights = K.batch_get_value(model.trainable_weights)", "time import time from termcolor import cprint from unittest import", "= {'gru': 0.5} use_cosine_annealing = True else: wd, lr_m =", "Y[batch_num]) self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1 + int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0)", "= self._make_data(num_batches, *batch_shape) self.eta_history = [] # for stop-introspection self.t_cur_history", "self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) for batch_num in range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num])", "np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0, atol=1e-3)) # cleanup del self.model_custom, self.model_control", "mask_zero=True)(ipt) else: x = ipt gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg))", "self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1 + int(tf_eager))) K.set_value(self.model.optimizer.t_cur,", "NadamW, SGDW from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval print(\"TF", "else: X = np.random.randn(num_batches, batch_size, timesteps, num_channels) Y = np.random.randint(0,", "nesterov=nesterov) print(\"\\n>> SGDW nesterov={} {}\".format(nesterov, pass_txt)) o_name = optimizer_name cprint(\"\\n<<", "16, 8, 4 batch_shape = (batch_size, timesteps, num_channels) total_iterations =", "del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MAIN TEST PASSED >>\\n\".format(optimizer_name),", "5 total_iterations = 0 self.model = self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1,", "timesteps)) else: X = np.random.randn(num_batches, batch_size, timesteps, num_channels) Y =", "stop-introspection for epoch in range(num_epochs): for batch_num in range(num_batches): self.t_cur_history", "import tempfile import numpy as np import tensorflow as tf", "0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs)) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup", "'SGD': SGD} optimizer = optimizer_dict[optimizer_name] optimizer_kw = {} if 'Adam'", "use_cosine_annealing = True else: wd, lr_m = None, None use_cosine_annealing", "def _valid_weight_decays(model): weight_decays = get_weight_decays(model) trues = 0 for wd", "K)] self.eta_history.pop(-(1 + int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs)) self._test_save_load(self.model,", "l2_reg=0, bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False): if dense_constraint is not None:", "GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if bidirectional: x = Bidirectional(gru)(x) else: x", ">>\\n\", 'green') def test_misc(self): # tests of non-main features to", "Dense(1, activation='sigmoid')(x) return Model(ipt, out) @staticmethod def _make_optimizer(optimizer_name, model, total_iterations,", "optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations, **optimizer_kw) else: return optimizer(lr=1e-4,", "embed_input_dim*3 + 1, mask_zero=True)(ipt) else: x = ipt gru =", "4 batch_shape = (batch_size, timesteps, num_channels) total_iterations = num_batches #", "optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MISC TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<<", "rtol=0, atol=1e-8)) for sow, low in zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow, low,", "= 'sparse_categorical_crossentropy' reset_seeds(verbose=0) X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True)", "range(num_epochs): for iteration in range(0, total_iterations): eta_history_simul.append(0.5 * ( 1", "bidirectional=False, sparse=True) loss_name = 'sparse_categorical_crossentropy' reset_seeds(verbose=0) X, Y = self._make_data(num_batches,", "import numpy as np import tensorflow as tf from time", "coverage for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {}", "'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() num_batches,", "improve coverage for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING", "@staticmethod def _make_data(num_batches, batch_size, timesteps, num_channels=None, embed_input_dim=None, sparse=False): if sparse:", "sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom = self._make_model(**model_kw) optimizer_custom = self._make_optimizer(optimizer_name, self.model_custom,", "if tf_eager: print(\"TF running eagerly\") else: tf.compat.v1.disable_eager_execution() print(\"TF running in", "def test_control(self): # tests losses against original optimizers' for optimizer_name", "total_iterations) self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() # else K.eval before train", ".. import Input, Dense, GRU, Bidirectional, Embedding from .. import", "1e-3, 'amsgrad': optimizer_name == 'AdamW', 'nesterov': optimizer_name == 'SGDW'} num_batches", "t_cur=0, total_iterations=total_iterations, **optimizer_kw) else: return optimizer(lr=1e-4, **optimizer_kw) @staticmethod def _valid_weight_decays(model):", "= self._make_optimizer(optimizer_name, self.model, total_iterations) self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() # else", "eagerly\") else: tf.compat.v1.disable_eager_execution() print(\"TF running in graph mode\") class TestOptimizers(TestCase):", "_test_control(self, optimizer_name, amsgrad=False, nesterov=False): optimizer_kw = dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad, nesterov=nesterov,", ">>\\n\", 'green') def test_control(self): # tests losses against original optimizers'", "K from .. import Input, Dense, GRU, Bidirectional, Embedding from", "self.loss_control, rtol=0, atol=1e-3)) # cleanup del self.model_custom, self.model_control del optimizer_custom,", "self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MAIN TEST PASSED >>\\n\".format(optimizer_name), 'green')", "import K from .. import Input, Dense, GRU, Bidirectional, Embedding", "maxnorm from .. import Adam, Nadam, SGD from .. import", "False if not any([optimizer_name == name for name in ('Adam',", "tf.compat.v1.disable_eager_execution() print(\"TF running in graph mode\") class TestOptimizers(TestCase): def test_all(self):", "import Input, Dense, GRU, Bidirectional, Embedding from .. import Model,", "embed_input_dim = 5 total_iterations = 0 model_kw = dict(batch_shape=batch_shape, dense_constraint=1,", "reset_seeds(reset_graph_with_backend=K, verbose=0) def _test_save_load(self, model, X, optimizer_name, optimizer): saved_model_preds =", "if sparse: out = Dense(2, activation='softmax')(x) else: out = Dense(1,", "else: out = Dense(1, activation='sigmoid')(x) return Model(ipt, out) @staticmethod def", "1 + np.cos(np.pi*iteration / total_iterations))) return np.allclose(eta_history, eta_history_simul, rtol=0, atol=2e-7)", "_make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False): if dense_constraint is", "bias_regularizer=l2(l2_reg)) if bidirectional: x = Bidirectional(gru)(x) else: x = gru(x)", "tf_eager: print(\"TF running eagerly\") else: tf.compat.v1.disable_eager_execution() print(\"TF running in graph", "cosine annealing) for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING", "Y = self._make_data(num_batches, *batch_shape) self.eta_history = [] # for stop-introspection", "against original optimizers' for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<<", ".. import maxnorm from .. import Adam, Nadam, SGD from", "'NadamW': self._test_control(optimizer_name) elif optimizer_name == 'SGDW': for nesterov in [True,", "optimizer_dict[optimizer_name] optimizer_kw = {} if 'Adam' in optimizer_name: optimizer_kw =", "tests losses against original optimizers' for optimizer_name in ['AdamW', 'NadamW',", "\"True\") if tf_eager: print(\"TF running eagerly\") else: tf.compat.v1.disable_eager_execution() print(\"TF running", "[] for epoch in range(num_epochs): for iteration in range(0, total_iterations):", "range(num_epochs): for batch_num in range(num_batches): self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)] self.eta_history", "dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad, nesterov=nesterov, control_mode=True) num_batches = 100 batch_size, timesteps", "optimizer_kw = dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad, nesterov=nesterov, control_mode=True) num_batches = 100", "del self.model_custom, self.model_control del optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0) def _test_save_load(self,", "if not any([optimizer_name == name for name in ('Adam', 'Nadam',", "'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() optimizer_kw =", "self._make_model(**model_kw) optimizer_custom = self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom =", "self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4, bidirectional=False, sparse=True) optimizer = self._make_optimizer(optimizer_name,", "gru(x) x = Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if sparse: out =", "time() - t0)) loss_diff = np.abs(np.array(self.loss_custom) - np.array(self.loss_control)) print(\"%s max", "SGD} optimizer = optimizer_dict[optimizer_name] optimizer_kw = {} if 'Adam' in", "time: %.2f sec\" % (num_batches, time() - t0)) reset_seeds(reset_graph_with_backend=K, verbose=0)", "from .. import Adam, Nadam, SGD from .. import AdamW,", "def test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing) for", "elif optimizer_name == 'SGDW': for nesterov in [True, False]: self._test_control(optimizer_name,", "MAIN TESTS PASSED >>\\n\", 'green') def test_misc(self): # tests of", "* ( 1 + np.cos(np.pi*iteration / total_iterations))) return np.allclose(eta_history, eta_history_simul,", "@staticmethod def _make_optimizer(optimizer_name, model, total_iterations, decay=0, amsgrad=False, nesterov=False, control_mode=False): optimizer_dict", "= dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad, nesterov=nesterov, control_mode=True) num_batches = 100 batch_size,", "Dense(2, activation='softmax')(x) else: out = Dense(1, activation='sigmoid')(x) return Model(ipt, out)", "sparse: X = np.random.randint(0, embed_input_dim, (num_batches, batch_size, timesteps)) else: X", "in [True, False]: self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>> SGDW nesterov={} {}\".format(nesterov, pass_txt))", "self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom = [] # for introspection", "+= [self.model_control.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_control -- %s batches -- time:", "= model.predict(X[0]) loaded_model_weights = K.batch_get_value(model.trainable_weights) loaded_optim_weights = K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds,", "= {} if 'Adam' in optimizer_name: optimizer_kw = {'amsgrad': amsgrad}", "in optimizer_name: optimizer_kw = {'nesterov': nesterov, 'momentum': .9} if 'Nadam'", "np.array(self.loss_control)) print(\"%s max loss diff: %e\" % (optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom,", "amsgrad={} {}\".format(amsgrad, pass_txt)) elif optimizer_name == 'NadamW': self._test_control(optimizer_name) elif optimizer_name", "total_iterations = num_batches # due to warm restarts self.model =", "MAIN TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MAIN TESTS PASSED", "1e-6] + l2_extra) lr_m = {'gru': 0.5} use_cosine_annealing = True", "reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom = self._make_model(**model_kw) optimizer_custom = self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw)", "**optimizer_kw) @staticmethod def _valid_weight_decays(model): weight_decays = get_weight_decays(model) trues = 0", "SGDW from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval print(\"TF version:", ".. import K from .. import Input, Dense, GRU, Bidirectional,", "for smw, lmw in zip(saved_model_weights, loaded_model_weights): self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8))", "total_iterations, num_epochs)) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup del self.model,", "= 0 self.model = self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4, bidirectional=False,", "time() for batch_num in range(num_batches): self.loss_control += [self.model_control.train_on_batch( X[batch_num], Y[batch_num])]", "reset_seeds(verbose=0) X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0)", "MISC TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MISC TESTS PASSED", "from unittest import TestCase from .. import K from ..", "epoch in range(num_epochs): for iteration in range(0, total_iterations): eta_history_simul.append(0.5 *", "in range(num_batches): self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)] self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]", "embed_input_dim=None, sparse=False): if sparse: X = np.random.randint(0, embed_input_dim, (num_batches, batch_size,", "def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False): if dense_constraint", "Adam, Nadam, SGD from .. import AdamW, NadamW, SGDW from", "model_kw = dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False, sparse=True) loss_name", "self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]", "{} MAIN TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MAIN TESTS", "Nadam, SGD from .. import AdamW, NadamW, SGDW from ..", "if dense_constraint is not None: dense_constraint = maxnorm(dense_constraint) ipt =", "'Nadam', 'SGD')]): return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations, **optimizer_kw)", "cprint(\"\\n<< ALL MAIN TESTS PASSED >>\\n\", 'green') def test_misc(self): #", "= GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if bidirectional: x = Bidirectional(gru)(x) else:", "= self._make_model(**model_kw) optimizer_control = self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control", "optimizer_kw = {'total_iterations': 0, 'decay': 1e-3, 'amsgrad': optimizer_name == 'AdamW',", "sparse: x = Embedding(embed_input_dim, embed_input_dim*3 + 1, mask_zero=True)(ipt) else: x", "{'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW, 'Adam': Adam, 'Nadam': Nadam,", "= fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra) lr_m = {'gru':", "max loss diff: %e\" % (optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0,", "import os import tempfile import numpy as np import tensorflow", "elif 'SGD' in optimizer_name: optimizer_kw = {'nesterov': nesterov, 'momentum': .9}", "optimizer_name: optimizer_kw = {'nesterov': nesterov, 'momentum': .9} if 'Nadam' not", "(wd != 0) return (trues == 0) @staticmethod def _valid_cosine_annealing(eta_history,", "[2e-5]*(len(wd_dict) - 3) wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] +", "dense_constraint is not None: dense_constraint = maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape)", "tf.__version__) tf_eager = bool(os.environ[\"TF_EAGER\"] == \"True\") if tf_eager: print(\"TF running", "100 batch_size, timesteps = 16, 32 batch_shape = (batch_size, timesteps)", "trues += (wd != 0) return (trues == 0) @staticmethod", "not control_mode: wd_dict = get_weight_decays(model) l2_extra = [2e-5]*(len(wd_dict) - 3)", "(num_batches, time() - t0)) loss_diff = np.abs(np.array(self.loss_custom) - np.array(self.loss_control)) print(\"%s", "[self.model_control.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_control -- %s batches -- time: %.2f", "+= [K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1", "= model.predict(X[0]) saved_model_weights = K.batch_get_value(model.trainable_weights) saved_optim_weights = K.batch_get_value(model.optimizer.weights) test_name =", "ipt gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if bidirectional: x =", "2, (num_batches, batch_size)) return X, Y @staticmethod def _make_model(batch_shape, total_iterations,", "batch_size, timesteps, num_channels = 16, 8, 4 batch_shape = (batch_size,", "lmw, rtol=0, atol=1e-8)) for sow, low in zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow,", "atol=1e-3)) # cleanup del self.model_custom, self.model_control del optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K,", "modelpath = os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath) del model model = load_model(modelpath,", "in [True, False]: self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>> AdamW amsgrad={} {}\".format(amsgrad, pass_txt))", "[K_eval(self.model.optimizer.t_cur, K)] self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history +=", "embed_input_dim = 5 total_iterations = 0 self.model = self._make_model(batch_shape, total_iterations,", "'Nadam': Nadam, 'SGD': SGD} optimizer = optimizer_dict[optimizer_name] optimizer_kw = {}", "get_weight_decays(model) trues = 0 for wd in weight_decays.values(): trues +=", "self.model_custom, self.model_control del optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0) def _test_save_load(self, model,", "sow, low in zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8)) @staticmethod", "%s\" % tf.__version__) tf_eager = bool(os.environ[\"TF_EAGER\"] == \"True\") if tf_eager:", "optimizer_name cprint(\"\\n<< {} {} >>\\n\".format(o_name, pass_txt.upper()), 'green') cprint(\"\\n<< ALL CONTROL", "X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) for batch_num in", "rtol=0, atol=1e-8)) @staticmethod def _make_data(num_batches, batch_size, timesteps, num_channels=None, embed_input_dim=None, sparse=False):", "pass_txt = \"Control Test Passed\" if optimizer_name == 'AdamW': for", "K.batch_get_value(model.trainable_weights) saved_optim_weights = K.batch_get_value(model.optimizer.weights) test_name = 'test__%f{}.h5'.format(np.random.random()) modelpath = os.path.join(tempfile.gettempdir(),", "if sparse: x = Embedding(embed_input_dim, embed_input_dim*3 + 1, mask_zero=True)(ipt) else:", "num_epochs): eta_history_simul = [] for epoch in range(num_epochs): for iteration", "bool(os.environ[\"TF_EAGER\"] == \"True\") if tf_eager: print(\"TF running eagerly\") else: tf.compat.v1.disable_eager_execution()", "import Model, load_model from .. import l2 from .. import", "tempfile import numpy as np import tensorflow as tf from", "due to warm restarts self.model = self._make_model(batch_shape, total_iterations) optimizer =", "self.model, **optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim,", "[] # for introspection t0 = time() for batch_num in", "x = gru(x) x = Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if sparse:", "self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)] self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num], Y[batch_num])", "in optimizer_name: optimizer_kw.update({'decay': decay}) if not control_mode: wd_dict = get_weight_decays(model)", "dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False, sparse=True) loss_name = 'sparse_categorical_crossentropy'", "optimizer_name == 'SGDW': for nesterov in [True, False]: self._test_control(optimizer_name, nesterov=nesterov)", "sec\" % (num_batches, time() - t0)) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control =", "K)] self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1 + int(tf_eager)))", "batch_size, timesteps, num_channels) Y = np.random.randint(0, 2, (num_batches, batch_size)) return", "restarts self.model = self._make_model(batch_shape, total_iterations) optimizer = self._make_optimizer(optimizer_name, self.model, total_iterations)", "X, optimizer_name, optimizer) # cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<<", "['AdamW', 'NadamW', 'SGDW']: cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') pass_txt", "time() for batch_num in range(num_batches): self.loss_custom += [self.model_custom.train_on_batch( X[batch_num], Y[batch_num])]", "Embedding(embed_input_dim, embed_input_dim*3 + 1, mask_zero=True)(ipt) else: x = ipt gru", "self.loss_custom = [] # for introspection t0 = time() for", "cprint(\"\\n<< {} {} >>\\n\".format(o_name, pass_txt.upper()), 'green') cprint(\"\\n<< ALL CONTROL TESTS", "- 3) wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra)", "self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup del self.model,", "total_iterations=total_iterations, **optimizer_kw) else: return optimizer(lr=1e-4, **optimizer_kw) @staticmethod def _valid_weight_decays(model): weight_decays", "0 self.model = self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4, bidirectional=False, sparse=True)", "fail X, Y = self._make_data(num_batches, *batch_shape) self.eta_history = [] #", "lr_m = None, None use_cosine_annealing = False if not any([optimizer_name", "num_channels) total_iterations = num_batches # due to warm restarts self.model", "l2_extra) lr_m = {'gru': 0.5} use_cosine_annealing = True else: wd,", "total_iterations): eta_history_simul.append(0.5 * ( 1 + np.cos(np.pi*iteration / total_iterations))) return", "dense_constraint = maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape) if sparse: x =", "*batch_shape, embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom = self._make_model(**model_kw) optimizer_custom =", "if bidirectional: x = Bidirectional(gru)(x) else: x = gru(x) x", "# tests of non-main features to improve coverage for optimizer_name", "= self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom = [] #", "for introspection t0 = time() for batch_num in range(num_batches): self.loss_custom", "batch_num in range(num_batches): self.loss_control += [self.model_control.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_control --", "range(0, total_iterations): eta_history_simul.append(0.5 * ( 1 + np.cos(np.pi*iteration / total_iterations)))", "timesteps, num_channels=None, embed_input_dim=None, sparse=False): if sparse: X = np.random.randint(0, embed_input_dim,", "import l2 from .. import maxnorm from .. import Adam,", "timesteps) embed_input_dim = 5 total_iterations = 0 self.model = self._make_model(batch_shape,", "ALL MISC TESTS PASSED >>\\n\", 'green') def test_control(self): # tests", "'blue') pass_txt = \"Control Test Passed\" if optimizer_name == 'AdamW':", "Y[batch_num]) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup del self.model, optimizer", "PASSED >>\\n\", 'green') def _test_control(self, optimizer_name, amsgrad=False, nesterov=False): optimizer_kw =", "def _test_save_load(self, model, X, optimizer_name, optimizer): saved_model_preds = model.predict(X[0]) saved_model_weights", "= 'test__%f{}.h5'.format(np.random.random()) modelpath = os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath) del model model", "Save/Load, Warm Restarts (w/ cosine annealing) for optimizer_name in ['AdamW',", "# cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MISC TEST", "embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False, sparse=True) loss_name = 'sparse_categorical_crossentropy' reset_seeds(verbose=0) X, Y", "X[batch_num], Y[batch_num])] print(\"model_custom -- %s batches -- time: %.2f sec\"", "self.model = self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4, bidirectional=False, sparse=True) optimizer", "dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False, sparse=True) loss_name = 'sparse_categorical_crossentropy' reset_seeds(verbose=0)", ".. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval print(\"TF version: %s\" %", "OPTIMIZER >>\".format(optimizer_name), 'blue') pass_txt = \"Control Test Passed\" if optimizer_name", "timesteps, num_channels = 16, 8, 4 batch_shape = (batch_size, timesteps,", "reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MAIN TEST PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL", "sec\" % (num_batches, time() - t0)) loss_diff = np.abs(np.array(self.loss_custom) -", ">>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MAIN TESTS PASSED >>\\n\", 'green') def", "TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() optimizer_kw = {'total_iterations': 0,", "OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() num_batches, num_epochs = 25, 4 batch_size,", "bidirectional=False, sparse=True) optimizer = self._make_optimizer(optimizer_name, self.model, **optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy') X,", "0) @staticmethod def _valid_cosine_annealing(eta_history, total_iterations, num_epochs): eta_history_simul = [] for", "= get_weight_decays(model) trues = 0 for wd in weight_decays.values(): trues", "# for stop-introspection self.t_cur_history = [] # for stop-introspection for", "cprint(\"<< TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() num_batches, num_epochs =", "'nesterov': optimizer_name == 'SGDW'} num_batches = 4 batch_size, timesteps =", "loss=loss_name) self.loss_custom = [] # for introspection t0 = time()", "l2_reg=1e-4, bidirectional=False, sparse=True) optimizer = self._make_optimizer(optimizer_name, self.model, **optimizer_kw) self.model.compile(optimizer, loss='sparse_categorical_crossentropy')", "PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MISC TESTS PASSED >>\\n\", 'green')", "loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() # else K.eval before train may fail", "sparse=True) loss_name = 'sparse_categorical_crossentropy' reset_seeds(verbose=0) X, Y = self._make_data(num_batches, *batch_shape,", "None use_cosine_annealing = False if not any([optimizer_name == name for", "K.batch_get_value(model.trainable_weights) loaded_optim_weights = K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-8)) for smw,", "optimizer(lr=1e-4, **optimizer_kw) @staticmethod def _valid_weight_decays(model): weight_decays = get_weight_decays(model) trues =", "self.model_control = self._make_model(**model_kw) optimizer_control = self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name)", "epoch in range(num_epochs): for batch_num in range(num_batches): self.t_cur_history += [K_eval(self.model.optimizer.t_cur,", "embed_input_dim=embed_input_dim, sparse=True) for batch_num in range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model, X,", "True else: wd, lr_m = None, None use_cosine_annealing = False", "Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom =", "in ('Adam', 'Nadam', 'SGD')]): return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0,", "os import tempfile import numpy as np import tensorflow as", "maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape) if sparse: x = Embedding(embed_input_dim, embed_input_dim*3", "Input(batch_shape=batch_shape) if sparse: x = Embedding(embed_input_dim, embed_input_dim*3 + 1, mask_zero=True)(ipt)", "l2_reg=0, bidirectional=False, sparse=True) loss_name = 'sparse_categorical_crossentropy' reset_seeds(verbose=0) X, Y =", "optimizer_name == 'SGDW'} num_batches = 4 batch_size, timesteps = 16,", "model.save(modelpath) del model model = load_model(modelpath, custom_objects={optimizer_name: optimizer}) loaded_model_preds =", "pass_txt.upper()), 'green') cprint(\"\\n<< ALL CONTROL TESTS PASSED >>\\n\", 'green') def", "weight_decays=wd, lr_multipliers=lr_m, use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations, **optimizer_kw) else: return optimizer(lr=1e-4, **optimizer_kw)", "x = ipt gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)) if bidirectional:", "'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD} optimizer = optimizer_dict[optimizer_name] optimizer_kw", "for amsgrad in [True, False]: self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>> AdamW amsgrad={}", "else: return optimizer(lr=1e-4, **optimizer_kw) @staticmethod def _valid_weight_decays(model): weight_decays = get_weight_decays(model)", "SGDW nesterov={} {}\".format(nesterov, pass_txt)) o_name = optimizer_name cprint(\"\\n<< {} {}", "wd_dict = get_weight_decays(model) l2_extra = [2e-5]*(len(wd_dict) - 3) wd =", "\"Control Test Passed\" if optimizer_name == 'AdamW': for amsgrad in", "optimizer_control = self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control = []", "nesterov=nesterov, control_mode=True) num_batches = 100 batch_size, timesteps = 16, 32", "model, X, optimizer_name, optimizer): saved_model_preds = model.predict(X[0]) saved_model_weights = K.batch_get_value(model.trainable_weights)", "in range(num_epochs): for batch_num in range(num_batches): self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)]", "= [] # for stop-introspection for epoch in range(num_epochs): for", "total_iterations) optimizer = self._make_optimizer(optimizer_name, self.model, total_iterations) self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function()", "1, mask_zero=True)(ipt) else: x = ipt gru = GRU(4, recurrent_regularizer=l2(l2_reg),", "self._test_control(optimizer_name) elif optimizer_name == 'SGDW': for nesterov in [True, False]:", "self._make_data(num_batches, *batch_shape) self.eta_history = [] # for stop-introspection self.t_cur_history =", "in range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup", "time() - t0)) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control = self._make_model(**model_kw) optimizer_control =", "16, 8 batch_shape = (batch_size, timesteps) embed_input_dim = 5 total_iterations", "amsgrad in [True, False]: self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>> AdamW amsgrad={} {}\".format(amsgrad,", "total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False, sparse=True) loss_name = 'sparse_categorical_crossentropy' reset_seeds(verbose=0) X,", "= Embedding(embed_input_dim, embed_input_dim*3 + 1, mask_zero=True)(ipt) else: x = ipt", "import TestCase from .. import K from .. import Input,", "non-main features to improve coverage for optimizer_name in ['AdamW', 'NadamW',", "l2_extra = [2e-5]*(len(wd_dict) - 3) wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5,", "elif optimizer_name == 'NadamW': self._test_control(optimizer_name) elif optimizer_name == 'SGDW': for", ".. import l2 from .. import maxnorm from .. import", "(w/ cosine annealing) for optimizer_name in ['AdamW', 'NadamW', 'SGDW']: cprint(\"<<", "= time() for batch_num in range(num_batches): self.loss_control += [self.model_control.train_on_batch( X[batch_num],", "else: x = gru(x) x = Dense(2, kernel_regularizer=l2(l2_reg), kernel_constraint=dense_constraint)(x) if", "x = Bidirectional(gru)(x) else: x = gru(x) x = Dense(2,", "GRU, Bidirectional, Embedding from .. import Model, load_model from ..", "= Bidirectional(gru)(x) else: x = gru(x) x = Dense(2, kernel_regularizer=l2(l2_reg),", "low in zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8)) @staticmethod def", "%.2f sec\" % (num_batches, time() - t0)) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_control", "'AdamW', 'nesterov': optimizer_name == 'SGDW'} num_batches = 4 batch_size, timesteps", "l2 from .. import maxnorm from .. import Adam, Nadam,", "{'gru': 0.5} use_cosine_annealing = True else: wd, lr_m = None,", "4 batch_size, timesteps, num_channels = 16, 8, 4 batch_shape =", "import Adam, Nadam, SGD from .. import AdamW, NadamW, SGDW", "PASSED >>\\n\", 'green') def test_control(self): # tests losses against original", "introspection t0 = time() for batch_num in range(num_batches): self.loss_control +=", "in range(num_batches): self.loss_custom += [self.model_custom.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_custom -- %s", "embed_input_dim=embed_input_dim, sparse=True) reset_seeds(reset_graph_with_backend=K, verbose=0) self.model_custom = self._make_model(**model_kw) optimizer_custom = self._make_optimizer(optimizer_name,", "out = Dense(2, activation='softmax')(x) else: out = Dense(1, activation='sigmoid')(x) return", "== name for name in ('Adam', 'Nadam', 'SGD')]): return optimizer(lr=1e-4,", "'green') cprint(\"\\n<< ALL CONTROL TESTS PASSED >>\\n\", 'green') def _test_control(self,", "self.model_custom = self._make_model(**model_kw) optimizer_custom = self._make_optimizer(optimizer_name, self.model_custom, **optimizer_kw) self.model_custom.compile(optimizer_custom, loss=loss_name)", "Passed\" if optimizer_name == 'AdamW': for amsgrad in [True, False]:", "class TestOptimizers(TestCase): def test_all(self): # Save/Load, Warm Restarts (w/ cosine", "X = np.random.randn(num_batches, batch_size, timesteps, num_channels) Y = np.random.randint(0, 2,", "get_weight_decays, fill_dict_in_order, reset_seeds, K_eval print(\"TF version: %s\" % tf.__version__) tf_eager", "total_iterations, l2_reg=0, bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False): if dense_constraint is not", "nesterov in [True, False]: self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>> SGDW nesterov={} {}\".format(nesterov,", "def _make_optimizer(optimizer_name, model, total_iterations, decay=0, amsgrad=False, nesterov=False, control_mode=False): optimizer_dict =", "nesterov=False, control_mode=False): optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW,", "else: tf.compat.v1.disable_eager_execution() print(\"TF running in graph mode\") class TestOptimizers(TestCase): def", "cprint(\"\\n<< ALL MISC TESTS PASSED >>\\n\", 'green') def test_control(self): #", "in range(num_batches): self.loss_control += [self.model_control.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_control -- %s", "%.2f sec\" % (num_batches, time() - t0)) loss_diff = np.abs(np.array(self.loss_custom)", "{}\".format(nesterov, pass_txt)) o_name = optimizer_name cprint(\"\\n<< {} {} >>\\n\".format(o_name, pass_txt.upper()),", "self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control = [] # for introspection", "[] # for stop-introspection self.t_cur_history = [] # for stop-introspection", "K.batch_get_value(model.optimizer.weights) test_name = 'test__%f{}.h5'.format(np.random.random()) modelpath = os.path.join(tempfile.gettempdir(), test_name) model.save(modelpath) del", "= np.random.randn(num_batches, batch_size, timesteps, num_channels) Y = np.random.randint(0, 2, (num_batches,", "0) return (trues == 0) @staticmethod def _valid_cosine_annealing(eta_history, total_iterations, num_epochs):", "optimizer_name: optimizer_kw.update({'decay': decay}) if not control_mode: wd_dict = get_weight_decays(model) l2_extra", "for introspection t0 = time() for batch_num in range(num_batches): self.loss_control", "batch_shape = (batch_size, timesteps) embed_input_dim = 5 total_iterations = 0", "= True else: wd, lr_m = None, None use_cosine_annealing =", "iteration in range(0, total_iterations): eta_history_simul.append(0.5 * ( 1 + np.cos(np.pi*iteration", "8 batch_shape = (batch_size, timesteps) embed_input_dim = 5 total_iterations =", "K)] self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history += [K_eval(self.model.optimizer.eta_t,", "embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4, bidirectional=False, sparse=True) optimizer = self._make_optimizer(optimizer_name, self.model, **optimizer_kw)", "tf_eager = bool(os.environ[\"TF_EAGER\"] == \"True\") if tf_eager: print(\"TF running eagerly\")", "range(num_batches): self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)] self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num],", "model = load_model(modelpath, custom_objects={optimizer_name: optimizer}) loaded_model_preds = model.predict(X[0]) loaded_model_weights =", "self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8)) for sow, low in zip(saved_optim_weights, loaded_optim_weights):", "o_name = optimizer_name cprint(\"\\n<< {} {} >>\\n\".format(o_name, pass_txt.upper()), 'green') cprint(\"\\n<<", "{'nesterov': nesterov, 'momentum': .9} if 'Nadam' not in optimizer_name: optimizer_kw.update({'decay':", "TESTING {} OPTIMIZER >>\".format(optimizer_name), 'blue') pass_txt = \"Control Test Passed\"", "SGD from .. import AdamW, NadamW, SGDW from .. import", "sparse=False): if dense_constraint is not None: dense_constraint = maxnorm(dense_constraint) ipt", "3) wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra) lr_m", "total_iterations = 0 self.model = self._make_model(batch_shape, total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4,", "-- time: %.2f sec\" % (num_batches, time() - t0)) reset_seeds(reset_graph_with_backend=K,", "tf from time import time from termcolor import cprint from", "for nesterov in [True, False]: self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>> SGDW nesterov={}", "saved_optim_weights = K.batch_get_value(model.optimizer.weights) test_name = 'test__%f{}.h5'.format(np.random.random()) modelpath = os.path.join(tempfile.gettempdir(), test_name)", "NadamW, 'SGDW': SGDW, 'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD} optimizer", "{'amsgrad': amsgrad} elif 'SGD' in optimizer_name: optimizer_kw = {'nesterov': nesterov,", "= get_weight_decays(model) l2_extra = [2e-5]*(len(wd_dict) - 3) wd = fill_dict_in_order(wd_dict,", "in weight_decays.values(): trues += (wd != 0) return (trues ==", "None, None use_cosine_annealing = False if not any([optimizer_name == name", "= optimizer_name cprint(\"\\n<< {} {} >>\\n\".format(o_name, pass_txt.upper()), 'green') cprint(\"\\n<< ALL", "self.loss_control += [self.model_control.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_control -- %s batches --", "num_batches, num_epochs = 25, 4 batch_size, timesteps, num_channels = 16,", "print(\"model_control -- %s batches -- time: %.2f sec\" % (num_batches,", "running eagerly\") else: tf.compat.v1.disable_eager_execution() print(\"TF running in graph mode\") class", "%s batches -- time: %.2f sec\" % (num_batches, time() -", "= None, None use_cosine_annealing = False if not any([optimizer_name ==", "import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval print(\"TF version: %s\" % tf.__version__)", "self._test_save_load(self.model, X, optimizer_name, optimizer) # cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K)", "cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MAIN TEST PASSED", "= False if not any([optimizer_name == name for name in", "model.predict(X[0]) loaded_model_weights = K.batch_get_value(model.trainable_weights) loaded_optim_weights = K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0,", "{} OPTIMIZER >>\".format(optimizer_name), 'blue') pass_txt = \"Control Test Passed\" if", "out = Dense(1, activation='sigmoid')(x) return Model(ipt, out) @staticmethod def _make_optimizer(optimizer_name,", "optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW, 'Adam': Adam,", "from termcolor import cprint from unittest import TestCase from ..", "range(num_batches): self.loss_control += [self.model_control.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_control -- %s batches", "loaded_model_preds = model.predict(X[0]) loaded_model_weights = K.batch_get_value(model.trainable_weights) loaded_optim_weights = K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds,", "= {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW, 'Adam': Adam, 'Nadam':", "is not None: dense_constraint = maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape) if", "atol=1e-8)) for sow, low in zip(saved_optim_weights, loaded_optim_weights): self.assertTrue(np.allclose(sow, low, rtol=0,", "(batch_size, timesteps) embed_input_dim = 5 total_iterations = 0 model_kw =", "Input, Dense, GRU, Bidirectional, Embedding from .. import Model, load_model", "self.assertTrue(self._valid_weight_decays(self.model)) self.model._make_train_function() # else K.eval before train may fail X,", "self.model_custom.compile(optimizer_custom, loss=loss_name) self.loss_custom = [] # for introspection t0 =", "K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs)) self._test_save_load(self.model, X, optimizer_name, optimizer) #", "np.abs(np.array(self.loss_custom) - np.array(self.loss_control)) print(\"%s max loss diff: %e\" % (optimizer_name,", "_make_optimizer(optimizer_name, model, total_iterations, decay=0, amsgrad=False, nesterov=False, control_mode=False): optimizer_dict = {'AdamW':", "dense_constraint=1, l2_reg=1e-4, bidirectional=False, sparse=True) optimizer = self._make_optimizer(optimizer_name, self.model, **optimizer_kw) self.model.compile(optimizer,", ".9} if 'Nadam' not in optimizer_name: optimizer_kw.update({'decay': decay}) if not", "lr_m = {'gru': 0.5} use_cosine_annealing = True else: wd, lr_m", "total_iterations, embed_input_dim=embed_input_dim, dense_constraint=1, l2_reg=1e-4, bidirectional=False, sparse=True) optimizer = self._make_optimizer(optimizer_name, self.model,", "range(num_batches): self.loss_custom += [self.model_custom.train_on_batch( X[batch_num], Y[batch_num])] print(\"model_custom -- %s batches", "as tf from time import time from termcolor import cprint", "loaded_model_weights = K.batch_get_value(model.trainable_weights) loaded_optim_weights = K.batch_get_value(model.optimizer.weights) self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-8))", "optimizer_name == 'AdamW': for amsgrad in [True, False]: self._test_control(optimizer_name, amsgrad=amsgrad)", "Adam, 'Nadam': Nadam, 'SGD': SGD} optimizer = optimizer_dict[optimizer_name] optimizer_kw =", "(trues == 0) @staticmethod def _valid_cosine_annealing(eta_history, total_iterations, num_epochs): eta_history_simul =", "control_mode=False): optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW, 'Adam':", "# else K.eval before train may fail X, Y =", "else K.eval before train may fail X, Y = self._make_data(num_batches,", "= dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False, sparse=True) loss_name =", "*batch_shape, embed_input_dim=embed_input_dim, sparse=True) for batch_num in range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model,", "for batch_num in range(num_batches): self.model.train_on_batch(X[batch_num], Y[batch_num]) self._test_save_load(self.model, X, optimizer_name, optimizer)", "optimizer = optimizer_dict[optimizer_name] optimizer_kw = {} if 'Adam' in optimizer_name:", "use_cosine_annealing=use_cosine_annealing, t_cur=0, total_iterations=total_iterations, **optimizer_kw) else: return optimizer(lr=1e-4, **optimizer_kw) @staticmethod def", "= [] # for stop-introspection self.t_cur_history = [] # for", "time from termcolor import cprint from unittest import TestCase from", "TestOptimizers(TestCase): def test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing)", "name for name in ('Adam', 'Nadam', 'SGD')]): return optimizer(lr=1e-4, weight_decays=wd,", "tensorflow as tf from time import time from termcolor import", ">>\".format(optimizer_name), 'blue') reset_seeds() num_batches, num_epochs = 25, 4 batch_size, timesteps,", "return Model(ipt, out) @staticmethod def _make_optimizer(optimizer_name, model, total_iterations, decay=0, amsgrad=False,", "amsgrad=False, nesterov=False): optimizer_kw = dict(total_iterations=0, decay=1e-3, amsgrad=amsgrad, nesterov=nesterov, control_mode=True) num_batches", ">>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MISC TESTS PASSED >>\\n\", 'green') def", "if optimizer_name == 'AdamW': for amsgrad in [True, False]: self._test_control(optimizer_name,", "[True, False]: self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>> SGDW nesterov={} {}\".format(nesterov, pass_txt)) o_name", "optimizer_kw.update({'decay': decay}) if not control_mode: wd_dict = get_weight_decays(model) l2_extra =", "in range(num_epochs): for iteration in range(0, total_iterations): eta_history_simul.append(0.5 * (", "[K_eval(self.model.optimizer.eta_t, K)] self.model.train_on_batch(X[batch_num], Y[batch_num]) self.eta_history += [K_eval(self.model.optimizer.eta_t, K)] self.eta_history.pop(-(1 +", "cleanup del self.model_custom, self.model_control del optimizer_custom, optimizer_control reset_seeds(reset_graph_with_backend=K, verbose=0) def", "'green') def test_control(self): # tests losses against original optimizers' for", "'blue') reset_seeds() num_batches, num_epochs = 25, 4 batch_size, timesteps, num_channels", "= (batch_size, timesteps) embed_input_dim = 5 total_iterations = 0 self.model", "1e-5, 1e-6] + l2_extra) lr_m = {'gru': 0.5} use_cosine_annealing =", "wd, lr_m = None, None use_cosine_annealing = False if not", "+ l2_extra) lr_m = {'gru': 0.5} use_cosine_annealing = True else:", "= 16, 8, 4 batch_shape = (batch_size, timesteps, num_channels) total_iterations", "X = np.random.randint(0, embed_input_dim, (num_batches, batch_size, timesteps)) else: X =", "= num_batches # due to warm restarts self.model = self._make_model(batch_shape,", "# cleanup del self.model, optimizer reset_seeds(reset_graph_with_backend=K) cprint(\"\\n<< {} MAIN TEST", "Warm Restarts (w/ cosine annealing) for optimizer_name in ['AdamW', 'NadamW',", "loss_name = 'sparse_categorical_crossentropy' reset_seeds(verbose=0) X, Y = self._make_data(num_batches, *batch_shape, embed_input_dim=embed_input_dim,", "0 model_kw = dict(batch_shape=batch_shape, dense_constraint=1, total_iterations=total_iterations, embed_input_dim=embed_input_dim, l2_reg=0, bidirectional=False, sparse=True)", ".. import Model, load_model from .. import l2 from ..", "X, Y = self._make_data(num_batches, *batch_shape) self.eta_history = [] # for", "print(\"%s max loss diff: %e\" % (optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control,", "= Dense(2, activation='softmax')(x) else: out = Dense(1, activation='sigmoid')(x) return Model(ipt,", "== 'AdamW': for amsgrad in [True, False]: self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>>", "{} >>\\n\".format(o_name, pass_txt.upper()), 'green') cprint(\"\\n<< ALL CONTROL TESTS PASSED >>\\n\",", "None: dense_constraint = maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape) if sparse: x", "print(\"\\n>> AdamW amsgrad={} {}\".format(amsgrad, pass_txt)) elif optimizer_name == 'NadamW': self._test_control(optimizer_name)", "self._make_model(**model_kw) optimizer_control = self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control =", "int(tf_eager))) K.set_value(self.model.optimizer.t_cur, 0) self.assertTrue(self._valid_cosine_annealing(self.eta_history, total_iterations, num_epochs)) self._test_save_load(self.model, X, optimizer_name, optimizer)", "import maxnorm from .. import Adam, Nadam, SGD from ..", ">>\".format(optimizer_name), 'blue') reset_seeds() optimizer_kw = {'total_iterations': 0, 'decay': 1e-3, 'amsgrad':", "from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval print(\"TF version: %s\"", "'momentum': .9} if 'Nadam' not in optimizer_name: optimizer_kw.update({'decay': decay}) if", "@staticmethod def _valid_cosine_annealing(eta_history, total_iterations, num_epochs): eta_history_simul = [] for epoch", "OPTIMIZER >>\".format(optimizer_name), 'blue') reset_seeds() optimizer_kw = {'total_iterations': 0, 'decay': 1e-3,", "= self._make_optimizer(optimizer_name[:-1], self.model_control, **optimizer_kw) self.model_control.compile(optimizer_control, loss=loss_name) self.loss_control = [] #", "%e\" % (optimizer_name, np.max(loss_diff))) self.assertTrue(np.allclose(self.loss_custom, self.loss_control, rtol=0, atol=1e-3)) # cleanup", "self._make_model(batch_shape, total_iterations) optimizer = self._make_optimizer(optimizer_name, self.model, total_iterations) self.model.compile(optimizer, loss='binary_crossentropy') self.assertTrue(self._valid_weight_decays(self.model))", "PASSED >>\\n\".format(optimizer_name), 'green') cprint(\"\\n<< ALL MAIN TESTS PASSED >>\\n\", 'green')", "@staticmethod def _valid_weight_decays(model): weight_decays = get_weight_decays(model) trues = 0 for", "[True, False]: self._test_control(optimizer_name, amsgrad=amsgrad) print(\"\\n>> AdamW amsgrad={} {}\".format(amsgrad, pass_txt)) elif", "for batch_num in range(num_batches): self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)] self.eta_history +=", "self._test_control(optimizer_name, nesterov=nesterov) print(\"\\n>> SGDW nesterov={} {}\".format(nesterov, pass_txt)) o_name = optimizer_name", "= np.random.randint(0, embed_input_dim, (num_batches, batch_size, timesteps)) else: X = np.random.randn(num_batches," ]
[ "source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,", "create index if not exists ocp_namespace_idx on reporting_ocpusagelineitem_daily using gin", "index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\",", "django.db import models class Migration(migrations.Migration): dependencies = [(\"reporting\", \"0098_auto_20200221_2034\")] operations", "reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit,", "exists ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add", "migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\",", "reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval", "using gin (namespace gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary using", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ),", "opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\",", "reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) -", "migrations.AddIndex( model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"costsummary\",", "lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.tags, lids.usage_amount,", "# ocp azure project cost migrations.RunSQL( \"\"\" /* add namespace", "reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid", "like trigram ops */ create index if not exists cost__proj_sum_node_like_idx", "reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index for", "name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\",", "), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex(", "create materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as SELECT row_number()", "model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\",", "not exists ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\"", "reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id,", "on reporting_ocpallcostlineitem_daily_summary using gin (namespace); create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary", "migrations.RunSQL( \"\"\" refresh materialized view reporting_ocpallcostlineitem_daily_summary; refresh materialized view reporting_ocpallcostlineitem_project_daily_summary;", "cases will specifically help with case-insensitive # and contains (vs", "name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\",", "source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,", "specifically with \"col LIKE %val%\" # operations. (As long as", "NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS", "lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.usage_amount, lids.unit, lids.unblended_cost, lids.project_markup_cost, lids.pod_cost, lids.currency_code", "migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\",", "text searching. # These cases will specifically help with case-insensitive", "index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\",", "ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ), #", "lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.usage_amount, lids.unit, lids.unblended_cost, lids.project_markup_cost, lids.pod_cost,", "gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\" ), migrations.RunSQL(", "), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\",", "if not exists cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);", "trigram ops */ create index if not exists ocp_storage_li_node_like_idx on", "*/ create index if not exists cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary using", "name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "*/ create index if not exists cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary using", "gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\",", "exists ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add", "name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ),", "create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix", "on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_daily_summary", "ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add node", "ocp usage line item daily summary migrations.RunSQL( \"\"\" /* add", "varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);", "AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.namespace, lids.node, lids.resource_id, lids.usage_start, lids.usage_end,", "create index if not exists ocp_node_idx on reporting_ocpusagelineitem_daily using gin", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), migrations.RunSQL( \"\"\" drop materialized view", "lids.markup_cost, lids.currency_code, lids.shared_projects, lids.project_costs FROM ( SELECT 'AWS'::text AS source_type,", "reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code,", "reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name", "model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(", "create index if not exists ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary using gin", "migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"],", "*/ create index if not exists ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using", "*/ create index if not exists ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using", "reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure", "like trigram ops */ create index if not exists cost_summary_node_like_idx", "exists cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "lids.pod_cost, lids.currency_code FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,", "not exists pg_trgm schema public; \"\"\" ), # Create indexes", "name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\",", "name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\",", "), # This extension will help specifically with \"col LIKE", "migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField()", "reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS", "AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type,", "account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location", "with case-insensitive # and contains (vs startswith) searches # ocp", "reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,", "help with case-insensitive # and contains (vs startswith) searches #", "exists reporting_ocpallcostlineitem_daily_summary as SELECT row_number() OVER () AS id, lids.source_type,", "trigram ops */ create index if not exists ocp_summary_namespace_like_idx on", "day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node,", "name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\",", "not exists ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\"", "ops */ create index if not exists cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary", "lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.tags, lids.usage_amount, lids.unit, lids.unblended_cost, lids.markup_cost, lids.currency_code,", "availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS", "as val is at least 3 characters) migrations.RunSQL( \"\"\" create", "cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node", "), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\",", "view if exists reporting_ocpallcostlineitem_project_daily_summary; create materialized view if not exists", "varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary (usage_start); drop materialized view", "if exists reporting_ocpallcostlineitem_daily_summary; drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\"", "'1 day'::interval day)) lids with no data; create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix", "name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ),", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp usage line item", "ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), #", "reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp usage", "model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]),", "name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ),", "migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\",", "lids with no data; create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary using", "azure project cost migrations.RunSQL( \"\"\" /* add namespace index for", "index if not exists cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node)", "if exists reporting_ocpallcostlineitem_daily_summary; create materialized view if not exists reporting_ocpallcostlineitem_daily_summary", "migrations.RunSQL( \"\"\" create extension if not exists pg_trgm schema public;", "add node index for like trigram ops */ create index", "index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on", "), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"],", "reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost,", "if not exists reporting_ocpallcostlineitem_project_daily_summary as SELECT row_number() OVER () AS", "AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure cost migrations.RunSQL(", "index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops); create index", "availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,", "reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost,", "index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on", "ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), #", "WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day))", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\",", "reporting_ocpallcostlineitem_daily_summary; drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\",", "lids.source_type, lids.cluster_id, lids.cluster_alias, lids.namespace, lids.node, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id,", "varying AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS region, NULL::character varying AS", "reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) -", "reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,", "index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\",", "reporting_ocpallcostlineitem_daily_summary as SELECT row_number() OVER () AS id, lids.source_type, lids.cluster_id,", "daily migrations.RunSQL( \"\"\" /* add namespace index for like trigram", "account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location", "product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS region, NULL::character", "currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) -", "exists reporting_ocpallcostlineitem_daily_summary; create materialized view if not exists reporting_ocpallcostlineitem_daily_summary as", "lids.project_costs FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace,", "reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,", "on reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\" ), migrations.RunSQL( \"\"\" refresh materialized view", "Generated by Django 2.2.10 on 2020-02-18 12:51 import django.contrib.postgres.indexes from", "model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField(", "schema public; \"\"\" ), # Create indexes to aid with", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\" /*", "field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"],", "- '1 day'::interval day)) lids with no data; create index", "lids.currency_code FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source,", "), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(", "node index for like trigram ops */ create index if", "reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE", "reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval", "reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_daily_summary migrations.RunSQL(", "reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM", "12:51 import django.contrib.postgres.indexes from django.db import migrations from django.db import", "index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ), # This extension will help specifically with", "trigram ops */ create index if not exists ocpazure_proj_node_like_idx on", "create index if not exists ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily using gin", "\"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; drop materialized view", "exists cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "# and contains (vs startswith) searches # ocp usage line", "on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp", "name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField()", "lids.cluster_alias, lids.namespace, lids.node, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family,", "reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary (usage_start); drop", "lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.tags, lids.usage_amount, lids.unit, lids.unblended_cost,", "usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency", "AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.data_source, lids.namespace, lids.node, lids.pod_labels, lids.resource_id,", "by Django 2.2.10 on 2020-02-18 12:51 import django.contrib.postgres.indexes from django.db", "drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; drop materialized view if", ">= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day) UNION SELECT", "source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id,", "migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\",", "migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\")", "[ migrations.RunSQL( \"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; drop", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure project", "AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,", "ops */ create index if not exists ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary", "migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\",", "fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "These cases will specifically help with case-insensitive # and contains", "exists reporting_ocpallcostlineitem_project_daily_summary as SELECT row_number() OVER () AS id, lids.source_type,", "reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code, NULL::character", "\"\"\" ), # ocp azure project cost migrations.RunSQL( \"\"\" /*", "), # reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\" /* add namespace index for", "no data; create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops); create", "extension will help specifically with \"col LIKE %val%\" # operations.", "ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node", "ops */ create index if not exists ocp_node_idx on reporting_ocpusagelineitem_daily", "*/ create index if not exists ocp_node_idx on reporting_ocpusagelineitem_daily using", "not exists ocp_node_idx on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\"", "as SELECT row_number() OVER () AS id, lids.source_type, lids.cluster_id, lids.cluster_alias,", "reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now())", "lids.pod_labels, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region,", "reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpstoragelineitem_daily migrations.RunSQL(", "row_number() OVER () AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.data_source, lids.namespace,", "like trigram ops */ create index if not exists ocpazure_proj_node_like_idx", "SELECT row_number() OVER () AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.namespace,", "'Azure'::text AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date,", "), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"],", "name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ),", "() AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.data_source, lids.namespace, lids.node, lids.pod_labels,", "lids.region, lids.availability_zone, lids.usage_amount, lids.unit, lids.unblended_cost, lids.project_markup_cost, lids.pod_cost, lids.currency_code FROM (", "lids.namespace, lids.node, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type,", "daily summary migrations.RunSQL( \"\"\" /* add namespace index for like", "gin (namespace gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin", "summary migrations.RunSQL( \"\"\" /* add namespace index for like trigram", "with \"col LIKE %val%\" # operations. (As long as val", "\"\"\" refresh materialized view reporting_ocpallcostlineitem_daily_summary; refresh materialized view reporting_ocpallcostlineitem_project_daily_summary; \"\"\"", "ops */ create index if not exists ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary", "reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) -", "ops */ create index if not exists ocpcostsum_node_like_idx on reporting_ocpcosts_summary", "migrations from django.db import models class Migration(migrations.Migration): dependencies = [(\"reporting\",", "varying AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost", "materialized view if exists reporting_ocpallcostlineitem_daily_summary; drop materialized view if exists", "on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on", "name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\",", "), # Create indexes to aid with text searching. #", "migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\",", "like trigram ops */ create index if not exists ocp_storage_li_node_like_idx", "NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code, NULL::character varying AS product_family,", "case-insensitive # and contains (vs startswith) searches # ocp usage", "), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField()", "), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\",", "if not exists ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\" /*", "import models class Migration(migrations.Migration): dependencies = [(\"reporting\", \"0098_auto_20200221_2034\")] operations =", "reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start", "reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\" /* add namespace index for like trigram", "no data; create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary using gin (namespace);", "reporting_ocpallcostlineitem_project_daily_summary; create materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as SELECT", "lids.usage_amount, lids.unit, lids.unblended_cost, lids.markup_cost, lids.currency_code, lids.shared_projects, lids.project_costs FROM ( SELECT", "reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval", "migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\",", "reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index for", "), migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ), # This extension will", "item daily migrations.RunSQL( \"\"\" /* add namespace index for like", "day)) lids with no data; create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary", "), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(", "'Azure'::text AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id,", "from django.db import migrations from django.db import models class Migration(migrations.Migration):", "day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace,", "%val%\" # operations. (As long as val is at least", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ),", "FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace,", "field=models.DateField() ), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\",", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\"", "model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]", "name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ), # This", "dependencies = [(\"reporting\", \"0098_auto_20200221_2034\")] operations = [ migrations.RunSQL( \"\"\" drop", "AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id,", "reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family,", "reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id,", "(namespace gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (node", "reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add node index for", "index if not exists ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node)", "view if not exists reporting_ocpallcostlineitem_project_daily_summary as SELECT row_number() OVER ()", "\"\"\" ), # reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\" /* add namespace index", "lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.tags,", "), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()),", "model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ), # This extension will help specifically", "model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\",", "usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code, NULL::character varying AS", "reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity", "mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary", "name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField(", "data; create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary using gin (namespace); create", "index if not exists ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node)", "ops */ create index if not exists cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary", "reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost,", "if not exists ocp_namespace_idx on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);", "AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\"", "ocp azure cost migrations.RunSQL( \"\"\" /* add node index for", "not exists cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\"", "item daily summary migrations.RunSQL( \"\"\" /* add namespace index for", "\"\"\" ), # ocp usage line item daily summary migrations.RunSQL(", "UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node,", "AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE", "django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\",", "not exists cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\"", "if exists reporting_ocpallcostlineitem_project_daily_summary; create materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary", "trigram ops */ create index if not exists ocp_summary_node_like_idx on", "reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,", "lids.unit, lids.unblended_cost, lids.project_markup_cost, lids.pod_cost, lids.currency_code FROM ( SELECT 'AWS'::text AS", "not exists reporting_ocpallcostlineitem_daily_summary as SELECT row_number() OVER () AS id,", "FROM reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1", "lids.currency_code, lids.shared_projects, lids.project_costs FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id,", "index if not exists ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary using gin (UPPER(namespace)", "reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >=", "reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\" ), migrations.RunSQL( \"\"\" refresh materialized view reporting_ocpallcostlineitem_daily_summary;", "for like trigram ops */ create index if not exists", "trigram ops */ create index if not exists ocpcostsum_node_like_idx on", "if not exists ocp_node_idx on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);", "), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField()", "SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,", "if not exists ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);", "with text searching. # These cases will specifically help with", "), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\",", "drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; create materialized view if", "reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >=", "on 2020-02-18 12:51 import django.contrib.postgres.indexes from django.db import migrations from", "will help specifically with \"col LIKE %val%\" # operations. (As", "\"\"\" ), # ocp azure cost migrations.RunSQL( \"\"\" /* add", "reporting_ocpallcostlineitem_daily_summary using gin (namespace); create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary (node", "index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary (usage_start); drop materialized view if exists", "lids.availability_zone, lids.usage_amount, lids.unit, lids.unblended_cost, lids.project_markup_cost, lids.pod_cost, lids.currency_code FROM ( SELECT", "migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\",", "least 3 characters) migrations.RunSQL( \"\"\" create extension if not exists", "using gin (namespace); create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);", "on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index", "reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary", "index if not exists ocpcostsum_node_like_idx on reporting_ocpcosts_summary using gin (UPPER(node)", "migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField(", "*/ create index if not exists ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary using", "AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text,", "refresh materialized view reporting_ocpallcostlineitem_daily_summary; refresh materialized view reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ),", "name=\"usage_start\", field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\",", "name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ),", "index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]", "ops */ create index if not exists ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary", "field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"],", "*/ create index if not exists ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily using", "trigram ops */ create index if not exists ocp_namespace_idx on", "create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\" ), migrations.RunSQL( \"\"\"", "reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\",", "reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code,", "2020-02-18 12:51 import django.contrib.postgres.indexes from django.db import migrations from django.db", "region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS", "create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary using gin (namespace); create index", "materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex(", "SELECT row_number() OVER () AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.data_source,", "exists ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "reporting_ocpazurecostlineitem_daily_summary.resource_location AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS", "migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\",", "on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp", "exists reporting_ocpallcostlineitem_project_daily_summary; create materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as", "like trigram ops */ create index if not exists ocpcostsum_node_like_idx", "lids.instance_type, lids.region, lids.availability_zone, lids.tags, lids.usage_amount, lids.unit, lids.unblended_cost, lids.markup_cost, lids.currency_code, lids.shared_projects,", "exists ocp_namespace_idx on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add", "), # reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\" /* add namespace index for", "FROM reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1", "migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"),", "migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\",", "field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\",", "), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField()", "'AWS'::text AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date,", "id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.data_source, lids.namespace, lids.node, lids.pod_labels, lids.resource_id, lids.usage_start,", "\"0098_auto_20200221_2034\")] operations = [ migrations.RunSQL( \"\"\" drop materialized view if", "field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\",", "# reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\" /* add node index for like", "cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), #", "on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary (node", "mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix", "gin_trgm_ops); \"\"\" ), migrations.RunSQL( \"\"\" drop materialized view if exists", "name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "at least 3 characters) migrations.RunSQL( \"\"\" create extension if not", "() AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.namespace, lids.node, lids.resource_id, lids.usage_start,", "materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as SELECT row_number() OVER", "create index if not exists ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary using gin", "# Generated by Django 2.2.10 on 2020-02-18 12:51 import django.contrib.postgres.indexes", "lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.tags, lids.usage_amount, lids.unit, lids.unblended_cost, lids.markup_cost,", "reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\" /* add namespace index for like trigram", ">= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids with", "(node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary (usage_start); drop materialized", "trigram ops */ create index if not exists ocpazure_node_like_idx on", "name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\",", "if not exists ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);", "mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary (usage_start); drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;", "ocp usage line item daily migrations.RunSQL( \"\"\" /* add namespace", "LIKE %val%\" # operations. (As long as val is at", "like trigram ops */ create index if not exists ocpazure_proj_namespace_like_idx", "migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"),", "field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(", "field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()),", "on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add node index", "lids.namespace, lids.node, lids.pod_labels, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family,", "line item daily summary migrations.RunSQL( \"\"\" /* add namespace index", "index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary using gin (namespace); create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix", "ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node", "lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.usage_amount,", "date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids with no", "not exists ocp_namespace_idx on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /*", "data; create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops); create index", "(UPPER(node) gin_trgm_ops); \"\"\" ), migrations.RunSQL( \"\"\" drop materialized view if", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure project cost", "migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\",", "trigram ops */ create index if not exists cost_summary_node_like_idx on", "reporting_ocpcosts_summary migrations.RunSQL( \"\"\" /* add namespace index for like trigram", "migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"),", "lids.cluster_id, lids.cluster_alias, lids.namespace, lids.node, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code,", "migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex(", "extension if not exists pg_trgm schema public; \"\"\" ), #", "index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\",", "(UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpcosts_summary migrations.RunSQL( \"\"\" /* add", "reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount,", "is at least 3 characters) migrations.RunSQL( \"\"\" create extension if", "gin_trgm_ops); \"\"\" ), # ocp azure cost migrations.RunSQL( \"\"\" /*", "on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary (usage_start);", "model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField(", "index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text,", "reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >=", "migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex( model_name=\"costsummary\",", "), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField()", "ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), #", "# reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\" /* add namespace index for like", "trigram ops */ create index if not exists cost__proj_sum_node_like_idx on", "create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix", "NULL::character varying AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region, NULL::character varying", "id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.namespace, lids.node, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id,", "not exists reporting_ocpallcostlineitem_project_daily_summary as SELECT row_number() OVER () AS id,", "on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on", "), migrations.AddIndex( model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex(", "lids.unblended_cost, lids.project_markup_cost, lids.pod_cost, lids.currency_code FROM ( SELECT 'AWS'::text AS source_type,", "reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now())", "name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex(", "field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()),", "2.2.10 on 2020-02-18 12:51 import django.contrib.postgres.indexes from django.db import migrations", "WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day))", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ),", "unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE", "SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date,", "create index if not exists cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpcosts_summary migrations.RunSQL( \"\"\" /*", "currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text,", "(UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\" /* add", "date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day) UNION SELECT 'Azure'::text", "name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"),", "), # ocp usage line item daily summary migrations.RunSQL( \"\"\"", "if not exists ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);", "\"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; create materialized view", "exists ocpcostsum_node_like_idx on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "import django.contrib.postgres.indexes from django.db import migrations from django.db import models", "\"\"\" ), migrations.RunSQL( \"\"\" refresh materialized view reporting_ocpallcostlineitem_daily_summary; refresh materialized", "reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type,", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\",", "exists cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add", "migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\",", "usage line item daily migrations.RunSQL( \"\"\" /* add namespace index", "exists ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone,", "AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS region, NULL::character varying AS availability_zone,", "index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on", "name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "project cost migrations.RunSQL( \"\"\" /* add namespace index for like", "reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS", "like trigram ops */ create index if not exists ocp_summary_namespace_like_idx", "reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval", "AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency", "reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >=", "specifically help with case-insensitive # and contains (vs startswith) searches", "day'::interval day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace,", "SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date,", "opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "ops */ create index if not exists ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary", "reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS", "# These cases will specifically help with case-insensitive # and", "model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\",", "not exists ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\"", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpcosts_summary migrations.RunSQL( \"\"\"", "), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"],", "index if not exists ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace)", "will specifically help with case-insensitive # and contains (vs startswith)", "on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp", "reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL(", "create index if not exists ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary using gin", "pg_trgm schema public; \"\"\" ), # Create indexes to aid", "index if not exists cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace)", "create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary (usage_start); drop materialized view if", "), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex(", "migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"),", "field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()),", "AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code, NULL::character varying", "lids.data_source, lids.namespace, lids.node, lids.pod_labels, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code,", "ops */ create index if not exists ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary", "migrations.RunSQL( \"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; create materialized", "create materialized view if not exists reporting_ocpallcostlineitem_daily_summary as SELECT row_number()", "), ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"costsummary\",", "index if not exists ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(node)", "AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,", "# operations. (As long as val is at least 3", "model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"],", "'1 day'::interval day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias,", "lids.cluster_alias, lids.data_source, lids.namespace, lids.node, lids.pod_labels, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id,", "ocp_node_idx on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ), #", "\"\"\" ), # reporting_ocpcosts_summary migrations.RunSQL( \"\"\" /* add namespace index", "lids.node, lids.pod_labels, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type,", "FROM reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1", "), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()),", "reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day) UNION", "), migrations.RunSQL( \"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; create", "Django 2.2.10 on 2020-02-18 12:51 import django.contrib.postgres.indexes from django.db import", "migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"],", "reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now())", "migrations.RunSQL( \"\"\" /* add namespace index for like trigram ops", "reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family,", "varying AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region, NULL::character varying AS", "), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"],", "exists pg_trgm schema public; \"\"\" ), # Create indexes to", "This extension will help specifically with \"col LIKE %val%\" #", "reporting_ocpallcostlineitem_daily_summary; create materialized view if not exists reporting_ocpallcostlineitem_daily_summary as SELECT", "migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\",", "index if not exists ocp_namespace_idx on reporting_ocpusagelineitem_daily using gin (UPPER(namespace)", "not exists ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /*", "create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix", "operations. (As long as val is at least 3 characters)", "(namespace varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops); create", "AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type,", "date_trunc('month'::text, now()) - '1 day'::interval day)) lids with no data;", "NULL::character varying AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS region, NULL::character varying", "gin_trgm_ops); /* add node index for like trigram ops */", "create index if not exists ocpcostsum_node_like_idx on reporting_ocpcosts_summary using gin", "# reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\" /* add namespace index for like", "public; \"\"\" ), # Create indexes to aid with text", "*/ create index if not exists ocpcostsum_node_like_idx on reporting_ocpcosts_summary using", "materialized view reporting_ocpallcostlineitem_daily_summary; refresh materialized view reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), ]", "migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\",", "reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids", "like trigram ops */ create index if not exists ocp_summary_node_like_idx", "model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]),", "name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\",", "name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ),", "searching. # These cases will specifically help with case-insensitive #", "reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day) UNION", "field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\",", "lids.node, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region,", "create index if not exists ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary using gin", "model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\",", "\"\"\" ), # reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\" /* add namespace index", "models class Migration(migrations.Migration): dependencies = [(\"reporting\", \"0098_auto_20200221_2034\")] operations = [", "(UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure cost migrations.RunSQL( \"\"\"", "reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit,", "reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ), # This extension", "on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpstoragelineitem_daily", "gin (node gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\"", "not exists ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\"", "/* add namespace index for like trigram ops */ create", "create index if not exists ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily using gin", "lids.source_type, lids.cluster_id, lids.cluster_alias, lids.data_source, lids.namespace, lids.node, lids.pod_labels, lids.resource_id, lids.usage_start, lids.usage_end,", "AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects,", "AS currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now())", "exists ocp_node_idx on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex(", "migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ), # This extension will help", "reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index for", "), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\",", "reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,", "name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(", "[(\"reporting\", \"0098_auto_20200221_2034\")] operations = [ migrations.RunSQL( \"\"\" drop materialized view", "source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS", "if not exists ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);", "), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"],", "), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"],", "on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index", "AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost", "on reporting_ocpallcostlineitem_daily_summary (usage_start); drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; create", "reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects,", "# ocp usage line item daily migrations.RunSQL( \"\"\" /* add", "model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\",", "gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\" /*", "field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\",", "FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1", "usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code, NULL::character varying AS", "reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure", "OVER () AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.namespace, lids.node, lids.resource_id,", "product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags,", "field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()),", "gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\" /* add node", "product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region, NULL::character", "(namespace); create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops); create index", "), # reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\" /* add node index for", "from django.db import models class Migration(migrations.Migration): dependencies = [(\"reporting\", \"0098_auto_20200221_2034\")]", "create extension if not exists pg_trgm schema public; \"\"\" ),", "help specifically with \"col LIKE %val%\" # operations. (As long", "reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region,", "AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS", "reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\" /* add node index for like trigram", "ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp usage line", "lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.usage_amount, lids.unit,", "index if not exists ocp_node_idx on reporting_ocpusagelineitem_daily using gin (UPPER(node)", "view if exists reporting_ocpallcostlineitem_daily_summary; drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;", "AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region, NULL::character varying AS availability_zone,", "reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary", "model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"],", "name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\",", "ops */ create index if not exists ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary", "lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone,", "SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels,", "AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure", "*/ create index if not exists ocp_namespace_idx on reporting_ocpusagelineitem_daily using", "unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start", "import migrations from django.db import models class Migration(migrations.Migration): dependencies =", "AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,", "not exists ocpcostsum_node_like_idx on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops); \"\"\"", "not exists ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /*", "like trigram ops */ create index if not exists ocpazure_node_like_idx", "index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]", "materialized view if not exists reporting_ocpallcostlineitem_daily_summary as SELECT row_number() OVER", "ops */ create index if not exists cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary", "field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()),", "add namespace index for like trigram ops */ create index", "index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]", "like trigram ops */ create index if not exists ocp_node_idx", "*/ create index if not exists ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary using", "reporting_ocpallcostlineitem_daily_summary (usage_start); drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; create materialized", "(node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace", "'1 day'::interval day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,", "model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(", "reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags,", "varying AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,", "with no data; create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary using gin", "create index if not exists cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin", "reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\"", "migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField() ),", "migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\",", "if not exists pg_trgm schema public; \"\"\" ), # Create", "on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index", "like trigram ops */ create index if not exists cost__proj_sum_namespace_like_idx", "if not exists ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);", "reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS", "\"\"\" create extension if not exists pg_trgm schema public; \"\"\"", "(UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure project cost migrations.RunSQL(", "materialized view if exists reporting_ocpallcostlineitem_daily_summary; create materialized view if not", "# reporting_ocpcosts_summary migrations.RunSQL( \"\"\" /* add namespace index for like", "ocp azure project cost migrations.RunSQL( \"\"\" /* add namespace index", "create index if not exists cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary using gin", "# ocp usage line item daily summary migrations.RunSQL( \"\"\" /*", "on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add node index", "now()) - '1 day'::interval day) UNION SELECT 'Azure'::text AS source_type,", "- '1 day'::interval day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,", "index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "operations = [ migrations.RunSQL( \"\"\" drop materialized view if exists", "reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add node index for", "NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code, NULL::character varying AS product_family,", "reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin", "migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "\"\"\" ), # Create indexes to aid with text searching.", "index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ),", "migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\",", "# This extension will help specifically with \"col LIKE %val%\"", "), # reporting_ocpcosts_summary migrations.RunSQL( \"\"\" /* add namespace index for", "reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text,", "reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS", "with no data; create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);", "day'::interval day)) lids with no data; create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on", "model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpazure_proj_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"],", "migrations.RunSQL( \"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; drop materialized", "not exists ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops); /*", "reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text,", "unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs", "reporting_ocpallcostlineitem_project_daily_summary as SELECT row_number() OVER () AS id, lids.source_type, lids.cluster_id,", "reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id, NULL::integer", "\"col LIKE %val%\" # operations. (As long as val is", "- '1 day'::interval day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id,", "and contains (vs startswith) searches # ocp usage line item", "on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpcosts_summary", "django.contrib.postgres.indexes from django.db import migrations from django.db import models class", "reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount,", "reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text,", "day'::interval day)) lids with no data; create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on", "reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code", "reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS", "model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()),", "\"\"\" ), # reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\" /* add node index", "day'::interval day) UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source,", "cost migrations.RunSQL( \"\"\" /* add node index for like trigram", "mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix on reporting_ocpallcostlineitem_daily_summary using gin (namespace); create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on", "trigram ops */ create index if not exists cost__proj_sum_namespace_like_idx on", "index if not exists cost__proj_sum_node_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node)", "lids.tags, lids.usage_amount, lids.unit, lids.unblended_cost, lids.markup_cost, lids.currency_code, lids.shared_projects, lids.project_costs FROM (", "trigram ops */ create index if not exists ocp_storage_li_namespace_like_idx on", "name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\",", "name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\",", "), # ocp azure cost migrations.RunSQL( \"\"\" /* add node", "OVER () AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.data_source, lids.namespace, lids.node,", "reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start", "AS source_type, reporting_ocpazurecostlineitem_project_daily_summary.cluster_id, reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_project_daily_summary.data_source, reporting_ocpazurecostlineitem_project_daily_summary.namespace, reporting_ocpazurecostlineitem_project_daily_summary.node, reporting_ocpazurecostlineitem_project_daily_summary.pod_labels, reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,", "on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index", "\"\"\" /* add namespace index for like trigram ops */", "*/ create index if not exists cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary using", "(vs startswith) searches # ocp usage line item daily migrations.RunSQL(", "lids with no data; create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary (namespace", "reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpcosts_summary migrations.RunSQL(", "ops */ create index if not exists ocp_namespace_idx on reporting_ocpusagelineitem_daily", "reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary", "exists ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "row_number() OVER () AS id, lids.source_type, lids.cluster_id, lids.cluster_alias, lids.namespace, lids.node,", "migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ),", "like trigram ops */ create index if not exists ocpcostsum_namespace_like_idx", "index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops); create index", "\"\"\" ), migrations.RunSQL( \"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;", "opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "index if not exists ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace)", "name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\",", "reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE", "create index if not exists ocpazure_proj_node_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin", "using gin (UPPER(namespace) gin_trgm_ops); /* add node index for like", "model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\"", "materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; create materialized view if not", "like trigram ops */ create index if not exists ocp_storage_li_namespace_like_idx", "gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_project_daily_summary migrations.RunSQL( \"\"\" /* add namespace", "not exists ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /*", "3 characters) migrations.RunSQL( \"\"\" create extension if not exists pg_trgm", "product_family, reporting_ocpazurecostlineitem_project_daily_summary.instance_type, reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity", "migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"namespace\"], name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\",", "reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,", "migrations.RunSQL( \"\"\" /* add node index for like trigram ops", "model_name=\"costsummary\", index=models.Index( fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"],", "unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code FROM", "*/ create index if not exists ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary using", "reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region,", "migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField() ),", "indexes to aid with text searching. # These cases will", "searches # ocp usage line item daily migrations.RunSQL( \"\"\" /*", "namespace index for like trigram ops */ create index if", "exists ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ),", "ocp_namespace_idx on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add node", "lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.usage_amount, lids.unit, lids.unblended_cost,", "index for like trigram ops */ create index if not", "if exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\"", "FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node,", "field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"usage_start\"], name=\"ocpcostsum_usage_start_idx\") ), migrations.AddIndex(", "index if not exists ocp_summary_node_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node)", "day)) lids with no data; create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix on reporting_ocpallcostlineitem_project_daily_summary", "lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.usage_amount, lids.unit, lids.unblended_cost, lids.project_markup_cost,", "reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /* add node index for", "reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code, NULL::character", "ocpcostsum_node_like_idx on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), migrations.RunSQL(", "on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary using", "NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,", "reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text,", "name=\"ocpcostsum_pod_labels_idx\"), ), # This extension will help specifically with \"col", "mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary", "reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code, NULL::character varying AS product_family, reporting_ocpazurecostlineitem_daily_summary.instance_type, reporting_ocpazurecostlineitem_daily_summary.resource_location AS", "reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,", "cost migrations.RunSQL( \"\"\" /* add namespace index for like trigram", "(As long as val is at least 3 characters) migrations.RunSQL(", "aid with text searching. # These cases will specifically help", "index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "if not exists ocpazure_node_like_idx on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);", "exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ),", "mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\" ), migrations.RunSQL( \"\"\" refresh materialized", "lids.usage_account_id, lids.account_alias_id, lids.product_code, lids.product_family, lids.instance_type, lids.region, lids.availability_zone, lids.tags, lids.usage_amount, lids.unit,", "), migrations.RunSQL( \"\"\" refresh materialized view reporting_ocpallcostlineitem_daily_summary; refresh materialized view", "gin (namespace); create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops); create", "migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ),", "not exists cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops); /*", "gin_trgm_ops); \"\"\" ), # ocp usage line item daily summary", "AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid", "fields=[\"namespace\"], name=\"ocpcostsum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "drop materialized view if exists reporting_ocpallcostlineitem_daily_summary; create materialized view if", "name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ),", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # ocp azure cost", "characters) migrations.RunSQL( \"\"\" create extension if not exists pg_trgm schema", "gin_trgm_ops); \"\"\" ), # reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\" /* add namespace", "lids.usage_amount, lids.unit, lids.unblended_cost, lids.project_markup_cost, lids.pod_cost, lids.currency_code FROM ( SELECT 'AWS'::text", "migrations.AddIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"],", "(usage_start); drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; create materialized view", "name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "Migration(migrations.Migration): dependencies = [(\"reporting\", \"0098_auto_20200221_2034\")] operations = [ migrations.RunSQL( \"\"\"", "gin (UPPER(namespace) gin_trgm_ops); /* add node index for like trigram", "reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS", "reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code,", "trigram ops */ create index if not exists ocpazure_proj_namespace_like_idx on", "*/ create index if not exists ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary using", "name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"total_seconds\", field=models.IntegerField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_end\",", "model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "(UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpstoragelineitem_daily migrations.RunSQL( \"\"\" /* add", "lids.region, lids.availability_zone, lids.tags, lids.usage_amount, lids.unit, lids.unblended_cost, lids.markup_cost, lids.currency_code, lids.shared_projects, lids.project_costs", "ops */ create index if not exists ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily", "index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"], name=\"ocpcostsum_pod_labels_idx\"), ), #", "contains (vs startswith) searches # ocp usage line item daily", "*/ create index if not exists ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily using", "model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix", "reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), migrations.RunSQL( \"\"\" drop", "region, NULL::character varying AS availability_zone, reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure", "migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_end\", field=models.DateField() ),", "index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "trigram ops */ create index if not exists ocp_node_idx on", "exists reporting_ocpallcostlineitem_daily_summary; drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ),", "cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), #", "view if exists reporting_ocpallcostlineitem_daily_summary; create materialized view if not exists", "( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias, reporting_ocpawscostlineitem_daily_summary.namespace, reporting_ocpawscostlineitem_daily_summary.node, reporting_ocpawscostlineitem_daily_summary.resource_id,", "name=\"namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start", "reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs", "name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS", "gin_trgm_ops); \"\"\" ), # ocp azure project cost migrations.RunSQL( \"\"\"", "if not exists reporting_ocpallcostlineitem_daily_summary as SELECT row_number() OVER () AS", "migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"),", "index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"costsummary\", name=\"usage_start\", field=models.DateField()),", "if not exists ocpcostsum_node_like_idx on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);", "varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops); create index", "exists ocpcostsum_namespace_like_idx on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops); /* add", "class Migration(migrations.Migration): dependencies = [(\"reporting\", \"0098_auto_20200221_2034\")] operations = [ migrations.RunSQL(", "lids.unblended_cost, lids.markup_cost, lids.currency_code, lids.shared_projects, lids.project_costs FROM ( SELECT 'AWS'::text AS", "ops */ create index if not exists ocp_storage_li_node_like_idx on reporting_ocpstoragelineitem_daily", "if not exists ocp_summary_namespace_like_idx on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);", "lids.cluster_id, lids.cluster_alias, lids.data_source, lids.namespace, lids.node, lids.pod_labels, lids.resource_id, lids.usage_start, lids.usage_end, lids.usage_account_id,", "name=\"usage_start\", field=models.DateField()), migrations.AddIndex( model_name=\"ocpawscostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\",", "like trigram ops */ create index if not exists ocp_namespace_idx", "gin_trgm_ops); \"\"\" ), # reporting_ocpcosts_summary migrations.RunSQL( \"\"\" /* add namespace", "\"\"\" /* add node index for like trigram ops */", "using gin (UPPER(node) gin_trgm_ops); \"\"\" ), migrations.RunSQL( \"\"\" drop materialized", "now()) - '1 day'::interval day)) lids with no data; create", "lids.instance_type, lids.region, lids.availability_zone, lids.usage_amount, lids.unit, lids.unblended_cost, lids.project_markup_cost, lids.pod_cost, lids.currency_code FROM", "migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index( fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\",", "# Create indexes to aid with text searching. # These", "\"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"),", "Create indexes to aid with text searching. # These cases", "reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code FROM reporting_ocpazurecostlineitem_project_daily_summary WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text,", "if not exists cost__proj_sum_namespace_like_idx on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);", "if not exists cost_summary_node_like_idx on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);", "), migrations.AddIndex( model_name=\"costsummary\", index=models.Index(fields=[\"node\"], name=\"ocpcostsum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"costsummary\", index=django.contrib.postgres.indexes.GinIndex(fields=[\"pod_labels\"],", "val is at least 3 characters) migrations.RunSQL( \"\"\" create extension", "lids.availability_zone, lids.tags, lids.usage_amount, lids.unit, lids.unblended_cost, lids.markup_cost, lids.currency_code, lids.shared_projects, lids.project_costs FROM", "lids.shared_projects, lids.project_costs FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_daily_summary.cluster_id, reporting_ocpawscostlineitem_daily_summary.cluster_alias,", "reporting_ocpazurecostlineitem_daily_summary.tags, reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount, reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,", "index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpazurecostlineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"ocpazure_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ),", "(usage_start); \"\"\" ), migrations.RunSQL( \"\"\" refresh materialized view reporting_ocpallcostlineitem_daily_summary; refresh", "AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code", "reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM reporting_ocpawscostlineitem_daily_summary", "WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)", "reporting_ocpawscostlineitem_daily_summary.resource_id, reporting_ocpawscostlineitem_daily_summary.usage_start::date, reporting_ocpawscostlineitem_daily_summary.usage_end::date, reporting_ocpawscostlineitem_daily_summary.usage_account_id, reporting_ocpawscostlineitem_daily_summary.account_alias_id, reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone,", "mv_reporting_ocpallcostlineitem_daily_summary_node_ix on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix on reporting_ocpallcostlineitem_daily_summary", "line item daily migrations.RunSQL( \"\"\" /* add namespace index for", "(UPPER(namespace) gin_trgm_ops); /* add node index for like trigram ops", "), migrations.AlterField( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"usage_start\", field=models.DateField() ), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpstoragelineitemdaily\",", "fields=[\"namespace\"], name=\"cost__proj_sum_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "name=\"usage_start\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\", name=\"usage_end\", field=models.DateField() ), migrations.AlterField( model_name=\"ocpazurecostlineitemdailysummary\",", "reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit, reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost, reporting_ocpazurecostlineitem_project_daily_summary.pod_cost, reporting_ocpazurecostlineitem_project_daily_summary.currency AS", "create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops); create", "WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)", "view if not exists reporting_ocpallcostlineitem_daily_summary as SELECT row_number() OVER ()", "name=\"namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_namespace_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdailysummary\", name=\"summary_node_idx\"), migrations.AlterField( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"usage_end\",", "usage line item daily summary migrations.RunSQL( \"\"\" /* add namespace", "'1 day'::interval day)) lids with no data; create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix", "AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code, NULL::character varying", "reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM", "reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id, reporting_ocpawscostlineitem_project_daily_summary.usage_start::date, reporting_ocpawscostlineitem_project_daily_summary.usage_end::date, reporting_ocpawscostlineitem_project_daily_summary.usage_account_id, reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type,", "using gin (node gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary (usage_start);", "view if exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\",", "reporting_ocpawscostlineitem_project_daily_summary.account_alias_id, reporting_ocpawscostlineitem_project_daily_summary.product_code, reporting_ocpawscostlineitem_project_daily_summary.product_family, reporting_ocpawscostlineitem_project_daily_summary.instance_type, reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,", "azure cost migrations.RunSQL( \"\"\" /* add node index for like", "reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);", "= [ migrations.RunSQL( \"\"\" drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\",", "reporting_ocpazurecostlineitem_project_daily_summary.resource_id, reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name", "UNION SELECT 'Azure'::text AS source_type, reporting_ocpazurecostlineitem_daily_summary.cluster_id, reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id,", "reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost, reporting_ocpawscostlineitem_daily_summary.currency_code, reporting_ocpawscostlineitem_daily_summary.shared_projects, reporting_ocpawscostlineitem_daily_summary.project_costs FROM", "( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node,", "), # ocp azure project cost migrations.RunSQL( \"\"\" /* add", "lids.unit, lids.unblended_cost, lids.markup_cost, lids.currency_code, lids.shared_projects, lids.project_costs FROM ( SELECT 'AWS'::text", "reporting_ocpazurecostlineitem_daily_summary.cluster_alias, reporting_ocpazurecostlineitem_daily_summary.namespace, reporting_ocpazurecostlineitem_daily_summary.node, reporting_ocpazurecostlineitem_daily_summary.resource_id, reporting_ocpazurecostlineitem_daily_summary.usage_start::date, reporting_ocpazurecostlineitem_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id, NULL::integer", "reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date, reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id, NULL::integer AS account_alias_id, reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,", "/* add node index for like trigram ops */ create", "= [(\"reporting\", \"0098_auto_20200221_2034\")] operations = [ migrations.RunSQL( \"\"\" drop materialized", "exists ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops); /* add", "create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops); create", "trigram ops */ create index if not exists ocpcostsum_namespace_like_idx on", "fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"ocp_storage_li_node_idx\", opclasses=[\"varchar_pattern_ops\"]),", "opclasses=[\"varchar_pattern_ops\"] ), ), migrations.AddIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", index=models.Index(fields=[\"node\"], name=\"cost_proj_sum_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex(", "opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"node\"], name=\"summary_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AlterField(model_name=\"costsummary\", name=\"usage_end\",", "on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), migrations.RunSQL( \"\"\"", "to aid with text searching. # These cases will specifically", "name=\"ocpazure_proj_node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpstoragelineitemdaily\", index=models.Index( fields=[\"namespace\"], name=\"ocp_storage_li_namespace_idx\", opclasses=[\"varchar_pattern_ops\"] ),", "migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex( model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemprojectdailysummary\", name=\"ocpazure_proj_node_idx\"), migrations.RemoveIndex(model_name=\"ocpusagelineitemdaily\", name=\"namespace_idx\"),", "(UPPER(node) gin_trgm_ops); \"\"\" ), # ocp usage line item daily", "reporting_ocpawscostlineitem_project_daily_summary.region, reporting_ocpawscostlineitem_project_daily_summary.availability_zone, reporting_ocpawscostlineitem_project_daily_summary.usage_amount, reporting_ocpawscostlineitem_project_daily_summary.unit, reporting_ocpawscostlineitem_project_daily_summary.unblended_cost, reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost, reporting_ocpawscostlineitem_project_daily_summary.pod_cost, reporting_ocpawscostlineitem_project_daily_summary.currency_code FROM reporting_ocpawscostlineitem_project_daily_summary", "(node gin_trgm_ops); create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\" ),", "on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_project_daily_summary", "index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix on reporting_ocpallcostlineitem_project_daily_summary (usage_start); \"\"\" ), migrations.RunSQL( \"\"\" refresh", "drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary; \"\"\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemdailysummary\", name=\"cost_summary_node_idx\"),", "date_trunc('month'::text, now()) - '1 day'::interval day) UNION SELECT 'Azure'::text AS", "# ocp azure cost migrations.RunSQL( \"\"\" /* add node index", "name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdaily\", name=\"usage_start\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_end\", field=models.DateField()), migrations.AlterField(model_name=\"ocpusagelineitemdailysummary\", name=\"usage_start\",", "reporting_ocpawscostlineitem_daily_summary.product_code, reporting_ocpawscostlineitem_daily_summary.product_family, reporting_ocpawscostlineitem_daily_summary.instance_type, reporting_ocpawscostlineitem_daily_summary.region, reporting_ocpawscostlineitem_daily_summary.availability_zone, reporting_ocpawscostlineitem_daily_summary.tags, reporting_ocpawscostlineitem_daily_summary.usage_amount, reporting_ocpawscostlineitem_daily_summary.unit, reporting_ocpawscostlineitem_daily_summary.unblended_cost, reporting_ocpawscostlineitem_daily_summary.markup_cost,", "migrations.AddIndex( model_name=\"ocpusagelineitemdaily\", index=models.Index(fields=[\"node\"], name=\"node_idx\", opclasses=[\"varchar_pattern_ops\"]), ), migrations.AddIndex( model_name=\"ocpusagelineitemdailysummary\", index=models.Index(fields=[\"namespace\"], name=\"summary_namespace_idx\",", "(UPPER(node) gin_trgm_ops); \"\"\" ), # reporting_ocpawscostlineitem_daily_summary migrations.RunSQL( \"\"\" /* add", "lids.project_markup_cost, lids.pod_cost, lids.currency_code FROM ( SELECT 'AWS'::text AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id,", "long as val is at least 3 characters) migrations.RunSQL( \"\"\"", "startswith) searches # ocp usage line item daily migrations.RunSQL( \"\"\"", "'AWS'::text AS source_type, reporting_ocpawscostlineitem_project_daily_summary.cluster_id, reporting_ocpawscostlineitem_project_daily_summary.cluster_alias, reporting_ocpawscostlineitem_project_daily_summary.data_source, reporting_ocpawscostlineitem_project_daily_summary.namespace, reporting_ocpawscostlineitem_project_daily_summary.node, reporting_ocpawscostlineitem_project_daily_summary.pod_labels, reporting_ocpawscostlineitem_project_daily_summary.resource_id,", "create index if not exists ocpazure_proj_namespace_like_idx on reporting_ocpazurecostlineitem_project_daily_summary using gin", "name=\"cost_summary_node_idx\"), migrations.RemoveIndex( model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost__proj_sum_namespace_idx\" ), migrations.RemoveIndex(model_name=\"ocpawscostlineitemprojectdailysummary\", name=\"cost_proj_sum_node_idx\"), migrations.RemoveIndex(model_name=\"ocpazurecostlineitemdailysummary\", name=\"ocpazure_node_idx\"), migrations.RemoveIndex(", "AS unblended_cost, reporting_ocpazurecostlineitem_daily_summary.markup_cost, reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code, reporting_ocpazurecostlineitem_daily_summary.shared_projects, reporting_ocpazurecostlineitem_daily_summary.project_costs FROM reporting_ocpazurecostlineitem_daily_summary", "reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids", "index if not exists ocp_storage_li_namespace_like_idx on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace)" ]
[ "from sympy.tensor.functions import TensorProduct from sympy import MatrixSymbol, Matrix, Array", "MatrixSymbol(\"C\", 3, 3) def test_TensorProduct_construction(): assert TensorProduct(3, 4) == 12", "expr == TensorProduct(A, B, C) expr = TensorProduct(Matrix.eye(2), [[0, -1],", "y], evaluate=False) assert expr.shape == (2, 2) assert expr.rank() ==", "== (2, 2) assert expr.rank() == 2 expr = TensorProduct(expr,", "x, y, z from sympy.abc import i, j, k, l", "assert expr == TensorProduct(A, B, C) expr = TensorProduct(Matrix.eye(2), [[0,", "expr = TensorProduct(expr, expr, evaluate=False) assert expr.shape == (2, 2,", "expr = TensorProduct(TensorProduct(A, B), C) assert expr == TensorProduct(A, B,", "TensorProduct(3, 4, evaluate=False) assert expr.shape == () assert expr.rank() ==", "import TensorProduct from sympy import MatrixSymbol, Matrix, Array from sympy.abc", "expr.rank() == 4 def test_TensorProduct_getitem(): expr = TensorProduct(A, B) assert", "= TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]]) assert expr == Array([", "[1, 0]], [[0, 0], [0, 0]] ], [ [[0, 0],", "TensorProduct(expr, expr, evaluate=False) assert expr.shape == (2, 2, 2, 2)", "Matrix, Array from sympy.abc import x, y, z from sympy.abc", "4 expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False) assert", "from sympy import MatrixSymbol, Matrix, Array from sympy.abc import x,", "() assert expr.rank() == 0 expr = TensorProduct([1, 2], [x,", "2, 2) assert expr.rank() == 4 def test_TensorProduct_getitem(): expr =", "import i, j, k, l A = MatrixSymbol(\"A\", 3, 3)", "[x, y], evaluate=False) assert expr.shape == (2, 2) assert expr.rank()", "= TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False) assert expr.shape ==", "l A = MatrixSymbol(\"A\", 3, 3) B = MatrixSymbol(\"B\", 3,", "assert isinstance(TensorProduct(A, A), TensorProduct) expr = TensorProduct(TensorProduct(x, y), z) assert", "MatrixSymbol(\"A\", 3, 3) B = MatrixSymbol(\"B\", 3, 3) C =", "y, z from sympy.abc import i, j, k, l A", "MatrixSymbol(\"B\", 3, 3) C = MatrixSymbol(\"C\", 3, 3) def test_TensorProduct_construction():", "sympy.abc import i, j, k, l A = MatrixSymbol(\"A\", 3,", "assert expr.shape == (2, 2, 2, 2) assert expr.rank() ==", "[ [[0, 0], [0, 0]], [[0, -1], [1, 0]] ]", "0]], [[0, 0], [0, 0]] ], [ [[0, 0], [0,", "== (2, 2, 2, 2) assert expr.rank() == 4 expr", "import MatrixSymbol, Matrix, Array from sympy.abc import x, y, z", "], [ [[0, 0], [0, 0]], [[0, -1], [1, 0]]", "evaluate=False) assert expr.shape == (2, 2) assert expr.rank() == 2", "sympy.abc import x, y, z from sympy.abc import i, j,", "4) == 12 assert isinstance(TensorProduct(A, A), TensorProduct) expr = TensorProduct(TensorProduct(x,", "== (2, 2, 2, 2) assert expr.rank() == 4 def", "(2, 2, 2, 2) assert expr.rank() == 4 def test_TensorProduct_getitem():", "2 expr = TensorProduct(expr, expr, evaluate=False) assert expr.shape == (2,", "== 0 expr = TensorProduct([1, 2], [x, y], evaluate=False) assert", "expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]]) assert expr ==", "== () assert expr.rank() == 0 expr = TensorProduct([1, 2],", "def test_TensorProduct_getitem(): expr = TensorProduct(A, B) assert expr[i, j, k,", "TensorProduct from sympy import MatrixSymbol, Matrix, Array from sympy.abc import", "3, 3) B = MatrixSymbol(\"B\", 3, 3) C = MatrixSymbol(\"C\",", "from sympy.abc import x, y, z from sympy.abc import i,", "[0, 0]], [[0, -1], [1, 0]] ] ]) def test_TensorProduct_shape():", "0]] ] ]) def test_TensorProduct_shape(): expr = TensorProduct(3, 4, evaluate=False)", "TensorProduct(TensorProduct(A, B), C) assert expr == TensorProduct(A, B, C) expr", "Array([ [ [[0, -1], [1, 0]], [[0, 0], [0, 0]]", "(2, 2) assert expr.rank() == 2 expr = TensorProduct(expr, expr,", "== 4 expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False)", "= TensorProduct(A, B) assert expr[i, j, k, l] == A[i,", "= TensorProduct(TensorProduct(A, B), C) assert expr == TensorProduct(A, B, C)", "== 12 assert isinstance(TensorProduct(A, A), TensorProduct) expr = TensorProduct(TensorProduct(x, y),", "0], [0, 0]] ], [ [[0, 0], [0, 0]], [[0,", "2) assert expr.rank() == 2 expr = TensorProduct(expr, expr, evaluate=False)", "== 2 expr = TensorProduct(expr, expr, evaluate=False) assert expr.shape ==", "assert expr.rank() == 2 expr = TensorProduct(expr, expr, evaluate=False) assert", "TensorProduct(A, B) assert expr[i, j, k, l] == A[i, j]*B[k,", "x*y*z expr = TensorProduct(TensorProduct(A, B), C) assert expr == TensorProduct(A,", "B = MatrixSymbol(\"B\", 3, 3) C = MatrixSymbol(\"C\", 3, 3)", "= TensorProduct([1, 2], [x, y], evaluate=False) assert expr.shape == (2,", "assert expr.rank() == 0 expr = TensorProduct([1, 2], [x, y],", "[1, 0]]) assert expr == Array([ [ [[0, -1], [1,", "assert expr.shape == (2, 2) assert expr.rank() == 2 expr", "expr.shape == (2, 2, 2, 2) assert expr.rank() == 4", "assert expr.rank() == 4 expr = TensorProduct(Matrix.eye(2), [[0, -1], [1,", "0]]) assert expr == Array([ [ [[0, -1], [1, 0]],", "[[0, -1], [1, 0]], [[0, 0], [0, 0]] ], [", "assert expr == x*y*z expr = TensorProduct(TensorProduct(A, B), C) assert", "expr, evaluate=False) assert expr.shape == (2, 2, 2, 2) assert", "]) def test_TensorProduct_shape(): expr = TensorProduct(3, 4, evaluate=False) assert expr.shape", "[0, 0]] ], [ [[0, 0], [0, 0]], [[0, -1],", "-1], [1, 0]]) assert expr == Array([ [ [[0, -1],", "2], [x, y], evaluate=False) assert expr.shape == (2, 2) assert", "2) assert expr.rank() == 4 def test_TensorProduct_getitem(): expr = TensorProduct(A,", "expr.rank() == 0 expr = TensorProduct([1, 2], [x, y], evaluate=False)", "TensorProduct([1, 2], [x, y], evaluate=False) assert expr.shape == (2, 2)", "assert TensorProduct(3, 4) == 12 assert isinstance(TensorProduct(A, A), TensorProduct) expr", "== x*y*z expr = TensorProduct(TensorProduct(A, B), C) assert expr ==", "[[0, -1], [1, 0]]) assert expr == Array([ [ [[0,", "expr == Array([ [ [[0, -1], [1, 0]], [[0, 0],", "== Array([ [ [[0, -1], [1, 0]], [[0, 0], [0,", "expr.shape == (2, 2) assert expr.rank() == 2 expr =", "2) assert expr.rank() == 4 expr = TensorProduct(Matrix.eye(2), [[0, -1],", "2, 2, 2) assert expr.rank() == 4 expr = TensorProduct(Matrix.eye(2),", "test_TensorProduct_getitem(): expr = TensorProduct(A, B) assert expr[i, j, k, l]", "TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]]) assert expr == Array([ [", "2, 2, 2) assert expr.rank() == 4 def test_TensorProduct_getitem(): expr", "Array from sympy.abc import x, y, z from sympy.abc import", "3) B = MatrixSymbol(\"B\", 3, 3) C = MatrixSymbol(\"C\", 3,", "[[0, -1], [1, 0]], evaluate=False) assert expr.shape == (2, 2,", "[[0, 0], [0, 0]], [[0, -1], [1, 0]] ] ])", "0]], [[0, -1], [1, 0]] ] ]) def test_TensorProduct_shape(): expr", "[1, 0]], evaluate=False) assert expr.shape == (2, 2, 2, 2)", "TensorProduct(A, B, C) expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]])", "sympy.tensor.functions import TensorProduct from sympy import MatrixSymbol, Matrix, Array from", "0]] ], [ [[0, 0], [0, 0]], [[0, -1], [1,", "0]], evaluate=False) assert expr.shape == (2, 2, 2, 2) assert", "y), z) assert expr == x*y*z expr = TensorProduct(TensorProduct(A, B),", "TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False) assert expr.shape == (2,", "C) expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]]) assert expr", "assert expr == Array([ [ [[0, -1], [1, 0]], [[0,", "[[0, -1], [1, 0]] ] ]) def test_TensorProduct_shape(): expr =", "def test_TensorProduct_shape(): expr = TensorProduct(3, 4, evaluate=False) assert expr.shape ==", "B) assert expr[i, j, k, l] == A[i, j]*B[k, l]", "expr == x*y*z expr = TensorProduct(TensorProduct(A, B), C) assert expr", "= MatrixSymbol(\"A\", 3, 3) B = MatrixSymbol(\"B\", 3, 3) C", "j, k, l A = MatrixSymbol(\"A\", 3, 3) B =", "3, 3) def test_TensorProduct_construction(): assert TensorProduct(3, 4) == 12 assert", "12 assert isinstance(TensorProduct(A, A), TensorProduct) expr = TensorProduct(TensorProduct(x, y), z)", "= MatrixSymbol(\"B\", 3, 3) C = MatrixSymbol(\"C\", 3, 3) def", "(2, 2, 2, 2) assert expr.rank() == 4 expr =", "expr.shape == () assert expr.rank() == 0 expr = TensorProduct([1,", "[1, 0]] ] ]) def test_TensorProduct_shape(): expr = TensorProduct(3, 4,", "z from sympy.abc import i, j, k, l A =", "assert expr.shape == () assert expr.rank() == 0 expr =", "expr = TensorProduct(TensorProduct(x, y), z) assert expr == x*y*z expr", "test_TensorProduct_shape(): expr = TensorProduct(3, 4, evaluate=False) assert expr.shape == ()", "3, 3) C = MatrixSymbol(\"C\", 3, 3) def test_TensorProduct_construction(): assert", "-1], [1, 0]], evaluate=False) assert expr.shape == (2, 2, 2,", "B, C) expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]]) assert", "expr.rank() == 4 expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]],", "A), TensorProduct) expr = TensorProduct(TensorProduct(x, y), z) assert expr ==", "[[0, 0], [0, 0]] ], [ [[0, 0], [0, 0]],", "expr = TensorProduct([1, 2], [x, y], evaluate=False) assert expr.shape ==", "def test_TensorProduct_construction(): assert TensorProduct(3, 4) == 12 assert isinstance(TensorProduct(A, A),", "isinstance(TensorProduct(A, A), TensorProduct) expr = TensorProduct(TensorProduct(x, y), z) assert expr", "z) assert expr == x*y*z expr = TensorProduct(TensorProduct(A, B), C)", "B), C) assert expr == TensorProduct(A, B, C) expr =", "k, l A = MatrixSymbol(\"A\", 3, 3) B = MatrixSymbol(\"B\",", "evaluate=False) assert expr.shape == (2, 2, 2, 2) assert expr.rank()", "= MatrixSymbol(\"C\", 3, 3) def test_TensorProduct_construction(): assert TensorProduct(3, 4) ==", "3) C = MatrixSymbol(\"C\", 3, 3) def test_TensorProduct_construction(): assert TensorProduct(3,", "import x, y, z from sympy.abc import i, j, k,", "MatrixSymbol, Matrix, Array from sympy.abc import x, y, z from", "TensorProduct(3, 4) == 12 assert isinstance(TensorProduct(A, A), TensorProduct) expr =", "[ [[0, -1], [1, 0]], [[0, 0], [0, 0]] ],", "expr.rank() == 2 expr = TensorProduct(expr, expr, evaluate=False) assert expr.shape", "2, 2) assert expr.rank() == 4 expr = TensorProduct(Matrix.eye(2), [[0,", "expr = TensorProduct(3, 4, evaluate=False) assert expr.shape == () assert", "= TensorProduct(3, 4, evaluate=False) assert expr.shape == () assert expr.rank()", "evaluate=False) assert expr.shape == () assert expr.rank() == 0 expr", "== TensorProduct(A, B, C) expr = TensorProduct(Matrix.eye(2), [[0, -1], [1,", "from sympy.abc import i, j, k, l A = MatrixSymbol(\"A\",", "sympy import MatrixSymbol, Matrix, Array from sympy.abc import x, y,", "= TensorProduct(expr, expr, evaluate=False) assert expr.shape == (2, 2, 2,", "0 expr = TensorProduct([1, 2], [x, y], evaluate=False) assert expr.shape", "<reponame>iamabhishek0/sympy<filename>sympy/tensor/tests/test_functions.py from sympy.tensor.functions import TensorProduct from sympy import MatrixSymbol, Matrix,", "TensorProduct(TensorProduct(x, y), z) assert expr == x*y*z expr = TensorProduct(TensorProduct(A,", "-1], [1, 0]], [[0, 0], [0, 0]] ], [ [[0,", "3) def test_TensorProduct_construction(): assert TensorProduct(3, 4) == 12 assert isinstance(TensorProduct(A,", "i, j, k, l A = MatrixSymbol(\"A\", 3, 3) B", "assert expr.rank() == 4 def test_TensorProduct_getitem(): expr = TensorProduct(A, B)", "A = MatrixSymbol(\"A\", 3, 3) B = MatrixSymbol(\"B\", 3, 3)", "TensorProduct) expr = TensorProduct(TensorProduct(x, y), z) assert expr == x*y*z", "C = MatrixSymbol(\"C\", 3, 3) def test_TensorProduct_construction(): assert TensorProduct(3, 4)", "4, evaluate=False) assert expr.shape == () assert expr.rank() == 0", "0], [0, 0]], [[0, -1], [1, 0]] ] ]) def", "test_TensorProduct_construction(): assert TensorProduct(3, 4) == 12 assert isinstance(TensorProduct(A, A), TensorProduct)", "-1], [1, 0]] ] ]) def test_TensorProduct_shape(): expr = TensorProduct(3,", "== 4 def test_TensorProduct_getitem(): expr = TensorProduct(A, B) assert expr[i,", "expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False) assert expr.shape", "] ]) def test_TensorProduct_shape(): expr = TensorProduct(3, 4, evaluate=False) assert", "4 def test_TensorProduct_getitem(): expr = TensorProduct(A, B) assert expr[i, j,", "C) assert expr == TensorProduct(A, B, C) expr = TensorProduct(Matrix.eye(2),", "= TensorProduct(TensorProduct(x, y), z) assert expr == x*y*z expr =", "expr = TensorProduct(A, B) assert expr[i, j, k, l] ==" ]
[ "index(request): context = { } template_name = 'pages/app/index.html' return render(request,", "def __call__(self, request): \"\"\" Rewrites the proxy headers so that", "request.META[field].split(',') request.META[field] = parts[-1].strip() return self.get_response(request) def index(request): context =", "return self.get_response(request) def index(request): context = { } template_name =", "in self.FORWARDED_FOR_FIELDS: if field in request.META: if ',' in request.META[field]:", "'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ] def __init__(self, get_response): self.get_response = get_response def", "request): \"\"\" Rewrites the proxy headers so that only the", "import render # Create your views here. class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS", "# Create your views here. class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS = [", "from django.shortcuts import render # Create your views here. class", "FORWARDED_FOR_FIELDS = [ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ] def __init__(self, get_response):", "__init__(self, get_response): self.get_response = get_response def __call__(self, request): \"\"\" Rewrites", "] def __init__(self, get_response): self.get_response = get_response def __call__(self, request):", "self.get_response(request) def index(request): context = { } template_name = 'pages/app/index.html'", "proxy headers so that only the most recent proxy is", "get_response): self.get_response = get_response def __call__(self, request): \"\"\" Rewrites the", "in request.META: if ',' in request.META[field]: parts = request.META[field].split(',') request.META[field]", "def index(request): context = { } template_name = 'pages/app/index.html' return", "\"\"\" Rewrites the proxy headers so that only the most", "the proxy headers so that only the most recent proxy", "most recent proxy is used. \"\"\" for field in self.FORWARDED_FOR_FIELDS:", "= [ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ] def __init__(self, get_response): self.get_response", "self.FORWARDED_FOR_FIELDS: if field in request.META: if ',' in request.META[field]: parts", "your views here. class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS = [ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST',", "MultipleProxyMiddleware: FORWARDED_FOR_FIELDS = [ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ] def __init__(self,", "[ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ] def __init__(self, get_response): self.get_response =", "so that only the most recent proxy is used. \"\"\"", "field in request.META: if ',' in request.META[field]: parts = request.META[field].split(',')", "= request.META[field].split(',') request.META[field] = parts[-1].strip() return self.get_response(request) def index(request): context", "'HTTP_X_FORWARDED_SERVER', ] def __init__(self, get_response): self.get_response = get_response def __call__(self,", "= get_response def __call__(self, request): \"\"\" Rewrites the proxy headers", "= { } template_name = 'pages/app/index.html' return render(request, template_name, context)", "<reponame>Kgermando/sem from django.shortcuts import render # Create your views here.", "render # Create your views here. class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS =", "class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS = [ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ] def", "field in self.FORWARDED_FOR_FIELDS: if field in request.META: if ',' in", "def __init__(self, get_response): self.get_response = get_response def __call__(self, request): \"\"\"", "',' in request.META[field]: parts = request.META[field].split(',') request.META[field] = parts[-1].strip() return", "is used. \"\"\" for field in self.FORWARDED_FOR_FIELDS: if field in", "Rewrites the proxy headers so that only the most recent", "the most recent proxy is used. \"\"\" for field in", "'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ] def __init__(self, get_response): self.get_response = get_response", "django.shortcuts import render # Create your views here. class MultipleProxyMiddleware:", "parts = request.META[field].split(',') request.META[field] = parts[-1].strip() return self.get_response(request) def index(request):", "if ',' in request.META[field]: parts = request.META[field].split(',') request.META[field] = parts[-1].strip()", "only the most recent proxy is used. \"\"\" for field", "request.META[field]: parts = request.META[field].split(',') request.META[field] = parts[-1].strip() return self.get_response(request) def", "self.get_response = get_response def __call__(self, request): \"\"\" Rewrites the proxy", "used. \"\"\" for field in self.FORWARDED_FOR_FIELDS: if field in request.META:", "if field in request.META: if ',' in request.META[field]: parts =", "proxy is used. \"\"\" for field in self.FORWARDED_FOR_FIELDS: if field", "= parts[-1].strip() return self.get_response(request) def index(request): context = { }", "Create your views here. class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS = [ 'HTTP_X_FORWARDED_FOR',", "that only the most recent proxy is used. \"\"\" for", "here. class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS = [ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER', ]", "get_response def __call__(self, request): \"\"\" Rewrites the proxy headers so", "request.META: if ',' in request.META[field]: parts = request.META[field].split(',') request.META[field] =", "views here. class MultipleProxyMiddleware: FORWARDED_FOR_FIELDS = [ 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED_HOST', 'HTTP_X_FORWARDED_SERVER',", "__call__(self, request): \"\"\" Rewrites the proxy headers so that only", "\"\"\" for field in self.FORWARDED_FOR_FIELDS: if field in request.META: if", "recent proxy is used. \"\"\" for field in self.FORWARDED_FOR_FIELDS: if", "headers so that only the most recent proxy is used.", "in request.META[field]: parts = request.META[field].split(',') request.META[field] = parts[-1].strip() return self.get_response(request)", "parts[-1].strip() return self.get_response(request) def index(request): context = { } template_name", "request.META[field] = parts[-1].strip() return self.get_response(request) def index(request): context = {", "for field in self.FORWARDED_FOR_FIELDS: if field in request.META: if ','", "context = { } template_name = 'pages/app/index.html' return render(request, template_name," ]
[ "abstract and initialliy with a call to adapter.START_URLS @abstractmethod def", "the use of scrapy. # Its a generell web crawler,", "web crawler, but the import and use of GhAdapter makes", "abstractmethod import scrapy class BaseLensSpider(scrapy.Spider, ABC): @property @abstractmethod def adapter(self):", "about my webcrawler with the use of scrapy. # Its", "use of scrapy. # Its a generell web crawler, but", "but the import and use of GhAdapter makes it usefull", "raise NotImplementedError() @abstractmethod def create_overview_page_request(self, response): raise NotImplementedError() def parse_overview_page(self,response):", "@abstractmethod def adapter(self): raise NotImplementedError() #TODO: make the start_url thing", "sites. from abc import ABC, abstractmethod import scrapy class BaseLensSpider(scrapy.Spider,", "# This module is about my webcrawler with the use", "the import and use of GhAdapter makes it usefull for", "yield lens_page_request for overview_page_request in self.create_overview_page_request(response): yield overview_page_request def parse(self,", "is about my webcrawler with the use of scrapy. #", "def parse_lens_page(self, response): raise NotImplementedError() @abstractmethod def create_lens_page_requests(self,response): raise NotImplementedError()", "NotImplementedError() @abstractmethod def create_lens_page_requests(self,response): raise NotImplementedError() @abstractmethod def create_overview_page_request(self, response):", "scrapy. # Its a generell web crawler, but the import", "make the start_url thing abstract and initialliy with a call", "of GhAdapter makes it usefull for geizhals.de sites. from abc", "Its a generell web crawler, but the import and use", "with a call to adapter.START_URLS @abstractmethod def parse_lens_page(self, response): raise", "NotImplementedError() #TODO: make the start_url thing abstract and initialliy with", "with the use of scrapy. # Its a generell web", "create_overview_page_request(self, response): raise NotImplementedError() def parse_overview_page(self,response): for lens_page_request in self.create_lens_page_requests(response):", "raise NotImplementedError() #TODO: make the start_url thing abstract and initialliy", "<gh_stars>0 # This module is about my webcrawler with the", "This module is about my webcrawler with the use of", "def adapter(self): raise NotImplementedError() #TODO: make the start_url thing abstract", "of scrapy. # Its a generell web crawler, but the", "adapter.START_URLS @abstractmethod def parse_lens_page(self, response): raise NotImplementedError() @abstractmethod def create_lens_page_requests(self,response):", "@abstractmethod def parse_lens_page(self, response): raise NotImplementedError() @abstractmethod def create_lens_page_requests(self,response): raise", "the start_url thing abstract and initialliy with a call to", "a generell web crawler, but the import and use of", "and use of GhAdapter makes it usefull for geizhals.de sites.", "call to adapter.START_URLS @abstractmethod def parse_lens_page(self, response): raise NotImplementedError() @abstractmethod", "initialliy with a call to adapter.START_URLS @abstractmethod def parse_lens_page(self, response):", "a call to adapter.START_URLS @abstractmethod def parse_lens_page(self, response): raise NotImplementedError()", "makes it usefull for geizhals.de sites. from abc import ABC,", "abc import ABC, abstractmethod import scrapy class BaseLensSpider(scrapy.Spider, ABC): @property", "adapter(self): raise NotImplementedError() #TODO: make the start_url thing abstract and", "lens_page_request in self.create_lens_page_requests(response): yield lens_page_request for overview_page_request in self.create_overview_page_request(response): yield", "scrapy class BaseLensSpider(scrapy.Spider, ABC): @property @abstractmethod def adapter(self): raise NotImplementedError()", "parse_overview_page(self,response): for lens_page_request in self.create_lens_page_requests(response): yield lens_page_request for overview_page_request in", "start_url thing abstract and initialliy with a call to adapter.START_URLS", "response): raise NotImplementedError() def parse_overview_page(self,response): for lens_page_request in self.create_lens_page_requests(response): yield", "import scrapy class BaseLensSpider(scrapy.Spider, ABC): @property @abstractmethod def adapter(self): raise", "for geizhals.de sites. from abc import ABC, abstractmethod import scrapy", "raise NotImplementedError() @abstractmethod def create_lens_page_requests(self,response): raise NotImplementedError() @abstractmethod def create_overview_page_request(self,", "in self.create_lens_page_requests(response): yield lens_page_request for overview_page_request in self.create_overview_page_request(response): yield overview_page_request", "for lens_page_request in self.create_lens_page_requests(response): yield lens_page_request for overview_page_request in self.create_overview_page_request(response):", "def parse_overview_page(self,response): for lens_page_request in self.create_lens_page_requests(response): yield lens_page_request for overview_page_request", "module is about my webcrawler with the use of scrapy.", "parse_lens_page(self, response): raise NotImplementedError() @abstractmethod def create_lens_page_requests(self,response): raise NotImplementedError() @abstractmethod", "crawler, but the import and use of GhAdapter makes it", "def create_lens_page_requests(self,response): raise NotImplementedError() @abstractmethod def create_overview_page_request(self, response): raise NotImplementedError()", "usefull for geizhals.de sites. from abc import ABC, abstractmethod import", "generell web crawler, but the import and use of GhAdapter", "# Its a generell web crawler, but the import and", "import and use of GhAdapter makes it usefull for geizhals.de", "ABC, abstractmethod import scrapy class BaseLensSpider(scrapy.Spider, ABC): @property @abstractmethod def", "geizhals.de sites. from abc import ABC, abstractmethod import scrapy class", "@property @abstractmethod def adapter(self): raise NotImplementedError() #TODO: make the start_url", "thing abstract and initialliy with a call to adapter.START_URLS @abstractmethod", "response): raise NotImplementedError() @abstractmethod def create_lens_page_requests(self,response): raise NotImplementedError() @abstractmethod def", "@abstractmethod def create_overview_page_request(self, response): raise NotImplementedError() def parse_overview_page(self,response): for lens_page_request", "overview_page_request in self.create_overview_page_request(response): yield overview_page_request def parse(self, response): return self.parse_overview_page(response)", "my webcrawler with the use of scrapy. # Its a", "self.create_lens_page_requests(response): yield lens_page_request for overview_page_request in self.create_overview_page_request(response): yield overview_page_request def", "and initialliy with a call to adapter.START_URLS @abstractmethod def parse_lens_page(self,", "it usefull for geizhals.de sites. from abc import ABC, abstractmethod", "for overview_page_request in self.create_overview_page_request(response): yield overview_page_request def parse(self, response): return", "NotImplementedError() @abstractmethod def create_overview_page_request(self, response): raise NotImplementedError() def parse_overview_page(self,response): for", "NotImplementedError() def parse_overview_page(self,response): for lens_page_request in self.create_lens_page_requests(response): yield lens_page_request for", "#TODO: make the start_url thing abstract and initialliy with a", "import ABC, abstractmethod import scrapy class BaseLensSpider(scrapy.Spider, ABC): @property @abstractmethod", "to adapter.START_URLS @abstractmethod def parse_lens_page(self, response): raise NotImplementedError() @abstractmethod def", "webcrawler with the use of scrapy. # Its a generell", "ABC): @property @abstractmethod def adapter(self): raise NotImplementedError() #TODO: make the", "lens_page_request for overview_page_request in self.create_overview_page_request(response): yield overview_page_request def parse(self, response):", "raise NotImplementedError() def parse_overview_page(self,response): for lens_page_request in self.create_lens_page_requests(response): yield lens_page_request", "from abc import ABC, abstractmethod import scrapy class BaseLensSpider(scrapy.Spider, ABC):", "create_lens_page_requests(self,response): raise NotImplementedError() @abstractmethod def create_overview_page_request(self, response): raise NotImplementedError() def", "BaseLensSpider(scrapy.Spider, ABC): @property @abstractmethod def adapter(self): raise NotImplementedError() #TODO: make", "def create_overview_page_request(self, response): raise NotImplementedError() def parse_overview_page(self,response): for lens_page_request in", "GhAdapter makes it usefull for geizhals.de sites. from abc import", "use of GhAdapter makes it usefull for geizhals.de sites. from", "@abstractmethod def create_lens_page_requests(self,response): raise NotImplementedError() @abstractmethod def create_overview_page_request(self, response): raise", "class BaseLensSpider(scrapy.Spider, ABC): @property @abstractmethod def adapter(self): raise NotImplementedError() #TODO:" ]
[ "stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str,", "parser = argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to store logs and models')", "parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data usage ratio, this ratio is", "opt=opt) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True, # 'True' to", "\"\"\" final options \"\"\" # print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as", "for i in tqdm(range(opt.valInterval)): image_tensors, _ = train_dataset.get_batch() image =", "# train part for i in tqdm(range(opt.valInterval)): image_tensors, _ =", "for image_tensors, _ in valid_loader: image = image_tensors.to(device) val_loss =", "Nesterov momentum for SGD') parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')", "help='gradient clipping value. default=5') parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode') \"\"\"", "batch stats to 1-GPU setting, the batch_size is multiplied with", "if opt.sensitive: # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6] #", "\"\"\" Data processing \"\"\" parser.add_argument('--select_data', type=str, default='MJ-ST', help='select training data", "\"\"\" Model Architecture \"\"\" parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')", "True opt.num_gpu = torch.cuda.device_count() # print('device count', opt.num_gpu) if opt.num_gpu", "last_epoch=start_iter-1, gamma=0.1) best_loss = None iteration = start_iter print(device) loss_avg", "log.write('-' * 80 + '\\n') log.close() if opt.rgb: opt.input_channel =", "for SGD') parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD') parser.add_argument('--grad_clip', type=float,", "if best_loss is None: best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif", "Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size,", "stuck too long time with multi-GPU setting, try to set", "type=str, default='0.5-0.5', help='assign ratio for each selected data in the", "only require gradient decent filtered_parameters = [] params_num = []", "which means MJ and ST used as training data)') parser.add_argument('--batch_ratio',", "elif opt.optimizer == 'sgd': optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay,", "help='Where to store logs and models') parser.add_argument('--train_data', required=True, help='path to", "train for') parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation') parser.add_argument('--saved_model',", "from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from simclr_model import FeaturesModel", "images whose label is longer than opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130", "print(byol_learner) # filter that only require gradient decent filtered_parameters =", "== 'adadelta': optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif", "as log: log.write(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(),", "help='character label') parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode') parser.add_argument('--PAD', action='store_true',", "setting (use 94 char). \"\"\" Seed and GPU setting \"\"\"", "lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise Exception('Unknown optimizer') print(\"Optimizer:\") print(optimizer)", "iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner = BYOL( model, image_size=(32,100), hidden_layer=-1, channels=1,", "parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta',", "type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the height of the", "= optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) elif opt.optimizer == 'adadelta': optimizer", "momentum for SGD') parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD') parser.add_argument('--grad_clip',", "lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer == 'sgd': optimizer =", "between each validation') parser.add_argument('--saved_model', default='', help=\"path to model to continue", "required=True, help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet')", "a nonlinear projection head during training') parser.add_argument('--final_feature', type=int, default=256, help='the", "+ '\\n') log.close() if opt.rgb: opt.input_channel = 3 model =", "for adam. default=0.9') parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for", "= opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1)", "not opt.data_filtering_off: print('Filtering the images containing characters which are not", "torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model != '': print(f'loading pretrained model from", "from tqdm import tqdm import matplotlib.pyplot as plt device =", "model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\")", "with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: log.write(\"Iteration {:06d} Loss: {:.06f} Val", ": ', sum(params_num)) # setup optimizer if opt.optimizer == 'adam':", "print('Filtering the images whose label is longer than opt.batch_max_length') #", "from simclr_model import FeaturesModel as Model from test import validation", "p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num : ', sum(params_num))", "the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1 if __name__ ==", "clipping value. default=5') parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode') \"\"\" Data", "elif opt.optimizer == 'adadelta': optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps,", "opt.num_gpu = torch.cuda.device_count() # print('device count', opt.num_gpu) if opt.num_gpu >", "rate, default=1.0 for Adadelta') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam.", "parser.add_argument('--batch_size', type=int, default=192, help='input batch size') parser.add_argument('--num_iter', type=int, default=300000, help='number", "in opt.character') print('Filtering the images whose label is longer than", "the images containing characters which are not in opt.character') print('Filtering", "num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-' * 80) log.write('-' * 80", "opt = parser.parse_args() if not opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name", "param.data.fill_(1) continue # data parallel for multi-GPU model = torch.nn.DataParallel(model).to(device)", "import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from simclr_model import FeaturesModel as Model", "{:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())) if best_loss is None: best_loss = valid_loss_avg.val()", "with torch.no_grad(): for image_tensors, _ in valid_loader: image = image_tensors.to(device)", "= Averager() # kl_loss_avg = Averager() # kl_loss = torch.nn.KLDivLoss()", "default='0.5-0.5', help='assign ratio for each selected data in the batch')", "is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to store logs", "VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20,", "opt.num_gpu > 1: print('------ Use multi-GPU setting ------') print('if you", "setup optimizer if opt.optimizer == 'adam': optimizer = optim.Adam(filtered_parameters, lr=opt.lr,", "keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0,", "0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH,", "type=str, default='1.0', help='total data usage ratio, this ratio is multiplied", "1-GPU setting, num_iter is divided with num_gpu by default.') If", "name, param in model.named_parameters(): if 'localization_fc2' in name: print(f'Skip {name}", "and models') parser.add_argument('--train_data', required=True, help='path to training dataset') parser.add_argument('--valid_data', required=True,", "it is already initialized') continue try: if 'bias' in name:", "if iteration==0: print(\"Epoch {:06d} Loss: {:.04f}\".format(iteration, loss_avg.val())) iteration += 1", "filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num : ', sum(params_num)) # setup", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to store logs and", "is longer than opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-')", "images containing characters which are not in opt.character') print('Filtering the", "of iterations to train for') parser.add_argument('--valInterval', type=int, default=2000, help='Interval between", "imgaug import augmenters as iaa import imgaug as ia from", "help='Momentum for SGD') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')", "valid_loss_avg.val()) + '\\n') print(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration,", "utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from simclr_dataset import hierarchical_dataset,", "as training data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign ratio for each", "model.named_parameters(): if 'localization_fc2' in name: print(f'Skip {name} as it is", "help='select training data (default is MJ-ST, which means MJ and", "print('if you stuck too long time with multi-GPU setting, try", "options \"\"\" # print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log", "Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),", "pretrained model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model))", "[] for p in filter(lambda p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size()))", "help='decay rate rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps", "None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')", "string.printable[:-6] # same with ASTER setting (use 94 char). \"\"\"", "torch.cuda.device_count() # print('device count', opt.num_gpu) if opt.num_gpu > 1: print('------", "batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data usage ratio, this ratio", "print(\"Optimizer:\") print(optimizer) \"\"\" final options \"\"\" # print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt',", "start_iter print(device) loss_avg = Averager() valid_loss_avg = Averager() # kl_loss_avg", "ratio, this ratio is multiplied to total number of data.')", "action='store_true', help='for data_filtering_off mode') \"\"\" Model Architecture \"\"\" parser.add_argument('--Transformation', type=str,", "print('-' * 80) log.write('-' * 80 + '\\n') log.close() if", "character mode') parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad", "is ', opt.batch_size) opt.batch_size = opt.batch_size * opt.num_gpu print('To equalize", "stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int,", "ASTER setting (use 94 char). \"\"\" Seed and GPU setting", "TPS-STN') parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of", "help='the size of the LSTM hidden state') parser.add_argument('--weight_decay', type=float, default=10e-4,", "points of TPS-STN') parser.add_argument('--input_channel', type=int, default=1, help='the number of input", "model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: log.write(\"Iteration {:06d} Loss:", "https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt)", "import string import argparse import torch import torch.backends.cudnn as cudnn", "iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0,", "check training progress with validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log)", "if opt.num_gpu > 1: print('------ Use multi-GPU setting ------') print('if", "argparse import torch import torch.backends.cudnn as cudnn import torch.nn.init as", "opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1,", "= 0 while(True): # train part for i in tqdm(range(opt.valInterval)):", "Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss =", "torch.nn.KLDivLoss() epoch = 0 while(True): # train part for i", "\", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True cudnn.deterministic", "parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode') \"\"\" Data processing \"\"\" parser.add_argument('--select_data',", "0.02))], random_order=True)]) byol_learner = BYOL( model, image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms,", "# print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\" vocab / character number configuration", "help='use rgb input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', action='store_true',", "default.') If you dont care about it, just commnet out", "+=1 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where", "\"\"\" if opt.sensitive: # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6]", "model.eval() with torch.no_grad(): for image_tensors, _ in valid_loader: image =", "same with ASTER setting (use 94 char). \"\"\" Seed and", "parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of the", "size of the output of the final layer') opt =", "print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\" vocab / character number configuration \"\"\"", "help=\"path to model to continue training\") parser.add_argument('--FT', action='store_true', help='whether to", "characters which are not in opt.character') print('Filtering the images whose", "kl_loss = torch.nn.KLDivLoss() epoch = 0 while(True): # train part", "train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms =", "kl_loss_avg.add(kl_div) model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: log.write(\"Iteration {:06d}", "1-GPU setting, the batch_size is multiplied with num_gpu and multiplied", "open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5,", "loading workers', default=4) parser.add_argument('--batch_size', type=int, default=192, help='input batch size') parser.add_argument('--num_iter',", "as optim import torch.utils.data import numpy as np from utils", "plt device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def train(opt):", "'sgd'], help=\"Optimizer\") parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')", "ratio is multiplied to total number of data.') parser.add_argument('--batch_max_length', type=int,", "setting, the batch_size is multiplied with num_gpu and multiplied batch_size", "out these line.) opt.num_iter = int(opt.num_iter / opt.num_gpu) \"\"\" train(opt)", "as init import torch.optim as optim import torch.utils.data import numpy", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def train(opt): \"\"\"", "opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) # weight initialization for name, param", "action='store_true', help='whether to keep ratio then pad for image resize')", "opt.input_channel = 3 model = Model(opt) print('model input parameters', opt.imgH,", "import imgaug as ia from tqdm import tqdm import matplotlib.pyplot", "help='number of fiducial points of TPS-STN') parser.add_argument('--input_channel', type=int, default=1, help='the", "print(f'loading pretrained model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else:", "[] params_num = [] for p in filter(lambda p: p.requires_grad,", "= int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train, start_iter: {start_iter}') except: pass #LR", "default=100, help='the width of the input image') parser.add_argument('--rgb', action='store_true', help='use", "ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0,", "mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms)", "= valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss > valid_loss_avg.val(): best_loss =", "p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num : ',", "Architecture \"\"\" parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str,", "image = image_tensors.to(device) val_loss = byol_learner(image) valid_loss_avg.add(val_loss) # features =", "default=256, help='the size of the output of the final layer')", "adam. default=0.9') parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta.", "model, image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner) # filter that", "imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader =", "milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss = None iteration = start_iter", "opt_file: opt_log = '------------ Options -------------\\n' args = vars(opt) for", "default=192, help='input batch size') parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations", "test import validation from byol_pytorch import BYOL from imgaug import", "if 'bias' in name: init.constant_(param, 0.0) elif 'weight' in name:", "validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-' * 80) log.write('-'", "'adadelta', 'sgd'], help=\"Optimizer\") parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for", "cudnn.benchmark = True cudnn.deterministic = True opt.num_gpu = torch.cuda.device_count() #", "features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:", "torch.no_grad(): for image_tensors, _ in valid_loader: image = image_tensors.to(device) val_loss", "Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())) if best_loss is", "byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0: print(\"Epoch {:06d} Loss: {:.04f}\".format(iteration, loss_avg.val())) iteration", "AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader", "valid_loss_avg.reset() if epoch % 5 == 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if", "torch import torch.backends.cudnn as cudnn import torch.nn.init as init import", "is divided with num_gpu by default.') If you dont care", "# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers * opt.num_gpu", "is None: best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss >", "% 5 == 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration + 1)", "default=5, help='gradient clipping value. default=5') parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')", "require gradient decent filtered_parameters = [] params_num = [] for", "_ in valid_loader: image = image_tensors.to(device) val_loss = byol_learner(image) valid_loss_avg.add(val_loss)", "during training') parser.add_argument('--final_feature', type=int, default=256, help='the size of the output", "if not opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}' #", "params_num = [] for p in filter(lambda p: p.requires_grad, byol_learner.parameters()):", "height of the input image') parser.add_argument('--imgW', type=int, default=100, help='the width", "f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration + 1) >= opt.num_iter: print('end the training')", "help='for random seed setting') parser.add_argument('--workers', type=int, help='number of data loading", "== 'adam': optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) elif opt.optimizer", "torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0: print(\"Epoch {:06d}", "0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)),", "optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise Exception('Unknown", "{opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model) image_transforms", "best_loss = None iteration = start_iter print(device) loss_avg = Averager()", "type=float, default=1e-8, help='eps for Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use Nesterov", "!= '': print(f'loading pretrained model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model),", "parser.add_argument('--imgW', type=int, default=100, help='the width of the input image') parser.add_argument('--rgb',", "multi-GPU setting ------') print('if you stuck too long time with", "imgaug as ia from tqdm import tqdm import matplotlib.pyplot as", "* 80 + '\\n') log.close() if opt.rgb: opt.input_channel = 3", "loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())) if best_loss is None: best_loss =", "iteration==0: print(\"Epoch {:06d} Loss: {:.04f}\".format(iteration, loss_avg.val())) iteration += 1 byol_learner.eval()", "in args.items(): opt_log += f'{str(k)}: {str(v)}\\n' opt_log += '---------------------------------------\\n' print(opt_log)", "choices=['adam', 'adadelta', 'sgd'], help=\"Optimizer\") parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0", "optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer == 'sgd': optimizer", "{:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\\n')", "import torch.optim as optim import torch.utils.data import numpy as np", "f'{str(k)}: {str(v)}\\n' opt_log += '---------------------------------------\\n' print(opt_log) opt_file.write(opt_log) \"\"\" start training", "opt.optimizer == 'adadelta': optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay)", "-------------\\n' args = vars(opt) for k, v in args.items(): opt_log", "model.train() if opt.saved_model != '': print(f'loading pretrained model from {opt.saved_model}')", "3 model = Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial,", "= optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer == 'sgd':", "type=float, default=1, help='learning rate, default=1.0 for Adadelta') parser.add_argument('--beta1', type=float, default=0.9,", "{str(v)}\\n' opt_log += '---------------------------------------\\n' print(opt_log) opt_file.write(opt_log) \"\"\" start training \"\"\"", "log.write(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()) +", "train(opt): \"\"\" dataset preparation \"\"\" if not opt.data_filtering_off: print('Filtering the", "* 80) log.write('-' * 80 + '\\n') log.close() if opt.rgb:", "= [] params_num = [] for p in filter(lambda p:", "0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),", "= 0 if opt.saved_model != '': try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])", "action='store_true', help='for data_filtering_off mode') \"\"\" Data processing \"\"\" parser.add_argument('--select_data', type=str,", "default=32, help='the height of the input image') parser.add_argument('--imgW', type=int, default=100,", "the input image') parser.add_argument('--imgW', type=int, default=100, help='the width of the", "Model Architecture \"\"\" parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction',", "= optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss = None iteration", "valid_loss_avg.add(val_loss) # features = model(image) # features = features.view(-1, 26,", "in model.named_parameters(): if 'localization_fc2' in name: print(f'Skip {name} as it", "action='store_true', help='Use Nesterov momentum for SGD') parser.add_argument('--momentum', type=float, default=0.9, help='Momentum", "opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6] # same with ASTER", "image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner) # filter that only", "not in opt.character') print('Filtering the images whose label is longer", "\"\"\" # print(\"Random Seed: \", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed)", "import argparse import torch import torch.backends.cudnn as cudnn import torch.nn.init", "lr=opt.lr, betas=(opt.beta1, 0.999)) elif opt.optimizer == 'adadelta': optimizer = optim.Adadelta(filtered_parameters,", "\"\"\" start_iter = 0 if opt.saved_model != '': try: start_iter", "image = image_tensors.to(device) optimizer.zero_grad() loss = byol_learner(image) loss.backward() if opt.grad_clip:", "\"\"\" if not opt.data_filtering_off: print('Filtering the images containing characters which", "k, v in args.items(): opt_log += f'{str(k)}: {str(v)}\\n' opt_log +=", "help='the size of the output of the final layer') opt", "print('------ Use multi-GPU setting ------') print('if you stuck too long", "with num_gpu and multiplied batch_size is ', opt.batch_size) opt.batch_size =", "of epochs to 1-GPU setting, num_iter is divided with num_gpu", "rate rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps for", "opt.num_gpu) if opt.num_gpu > 1: print('------ Use multi-GPU setting ------')", "help=\"Optimizer\") parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta') parser.add_argument('--beta1',", "= train_dataset.get_batch() image = image_tensors.to(device) optimizer.zero_grad() loss = byol_learner(image) loss.backward()", "the final layer') opt = parser.parse_args() if not opt.exp_name: opt.exp_name", "import sys import time import random import string import argparse", "0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner = BYOL(", "help='for sensitive character mode') parser.add_argument('--PAD', action='store_true', help='whether to keep ratio", "0 if opt.saved_model != '': try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue", "type=int, help='number of data loading workers', default=4) parser.add_argument('--batch_size', type=int, default=192,", "print(f'Skip {name} as it is already initialized') continue try: if", "Loss: {:.04f}\".format(iteration, loss_avg.val())) iteration += 1 byol_learner.eval() model.eval() with torch.no_grad():", "parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training') parser.add_argument('--final_feature',", "init.kaiming_normal_(param) except Exception as e: # for batchnorm. if 'weight'", "= hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True, #", "'': try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train, start_iter: {start_iter}')", "None iteration = start_iter print(device) loss_avg = Averager() valid_loss_avg =", "of input channel of Feature extractor') parser.add_argument('--output_channel', type=int, default=512, help='the", "1: print('------ Use multi-GPU setting ------') print('if you stuck too", "import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from simclr_dataset import hierarchical_dataset, AlignCollate,", "help='path to training dataset') parser.add_argument('--valid_data', required=True, help='path to validation dataset')", "validation from byol_pytorch import BYOL from imgaug import augmenters as", "opt.batch_size = opt.batch_size * opt.num_gpu \"\"\" previous version print('To equlize", "string import argparse import torch import torch.backends.cudnn as cudnn import", "opt_log = '------------ Options -------------\\n' args = vars(opt) for k,", "this ratio is multiplied to total number of data.') parser.add_argument('--batch_max_length',", "torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True cudnn.deterministic = True opt.num_gpu =", "80) log.write('-' * 80 + '\\n') log.close() if opt.rgb: opt.input_channel", "= torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True, # 'True' to check training", "iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),", "byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num : ', sum(params_num)) #", "parser.add_argument('--workers', type=int, help='number of data loading workers', default=4) parser.add_argument('--batch_size', type=int,", "0 while(True): # train part for i in tqdm(range(opt.valInterval)): image_tensors,", "= image_tensors.to(device) val_loss = byol_learner(image) valid_loss_avg.add(val_loss) # features = model(image)", "type=str, default='MJ-ST', help='select training data (default is MJ-ST, which means", "parser.parse_args() if not opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}'", "for each selected data in the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',", "valid_loss_avg.val())) if best_loss is None: best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')", "= iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0,", "MJ-ST, which means MJ and ST used as training data)')", "'True' to check training progress with validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid,", "'bias' in name: init.constant_(param, 0.0) elif 'weight' in name: init.kaiming_normal_(param)", "default='1.0', help='total data usage ratio, this ratio is multiplied to", "data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the height", "help='the height of the input image') parser.add_argument('--imgW', type=int, default=100, help='the", "projection head during training') parser.add_argument('--final_feature', type=int, default=256, help='the size of", "Seed and GPU setting \"\"\" # print(\"Random Seed: \", opt.manualSeed)", "* opt.num_gpu opt.batch_size = opt.batch_size * opt.num_gpu \"\"\" previous version", "type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float,", "usage ratio, this ratio is multiplied to total number of", "opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt',", "opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms", "GPU setting \"\"\" # print(\"Random Seed: \", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed)", "except: pass #LR Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1,", "whose label is longer than opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data", "Adadelta') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9') parser.add_argument('--rho', type=float,", "multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers * opt.num_gpu opt.batch_size =", "for p in filter(lambda p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable", "rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer == 'sgd': optimizer = optim.SGD(filtered_parameters,", "random seed setting') parser.add_argument('--workers', type=int, help='number of data loading workers',", "default=1e-8, help='eps for Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum", "ST used as training data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign ratio", "hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True, # 'True'", "cudnn.deterministic = True opt.num_gpu = torch.cuda.device_count() # print('device count', opt.num_gpu)", "with validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-' * 80)", "torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1 if __name__ == '__main__': parser", "previous version print('To equlize batch stats to 1-GPU setting, the", "default=4) parser.add_argument('--batch_size', type=int, default=192, help='input batch size') parser.add_argument('--num_iter', type=int, default=300000,", "opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-')", "import torch.nn.init as init import torch.optim as optim import torch.utils.data", "default=5') parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode') \"\"\" Data processing \"\"\"", "'adadelta': optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer", "# kl_loss_avg.add(kl_div) model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: log.write(\"Iteration", "filter that only require gradient decent filtered_parameters = [] params_num", "preparation \"\"\" if not opt.data_filtering_off: print('Filtering the images containing characters", "channel of Feature extractor') parser.add_argument('--output_channel', type=int, default=512, help='the number of", "opt.sensitive: # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6] # same", "equalize the number of epochs to 1-GPU setting, num_iter is", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to", "init.constant_(param, 0.0) elif 'weight' in name: init.kaiming_normal_(param) except Exception as", "batch size') parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train", "default=300000, help='number of iterations to train for') parser.add_argument('--valInterval', type=int, default=2000,", "help='number of iterations to train for') parser.add_argument('--valInterval', type=int, default=2000, help='Interval", "multiplied batch_size is ', opt.batch_size) opt.batch_size = opt.batch_size * opt.num_gpu", "with num_gpu by default.') If you dont care about it,", "= AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)", "parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting') parser.add_argument('--workers', type=int, help='number", "of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of the", "0.0) elif 'weight' in name: init.kaiming_normal_(param) except Exception as e:", "for image resize') parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode') \"\"\" Model", "vars(opt) for k, v in args.items(): opt_log += f'{str(k)}: {str(v)}\\n'", "print(\"Random Seed: \", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark =", "> 1: print('------ Use multi-GPU setting ------') print('if you stuck", "Averager() valid_loss_avg = Averager() # kl_loss_avg = Averager() # kl_loss", "+ 1) >= opt.num_iter: print('end the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit()", "data loading workers', default=4) parser.add_argument('--batch_size', type=int, default=192, help='input batch size')", "fiducial points of TPS-STN') parser.add_argument('--input_channel', type=int, default=1, help='the number of", "start_iter: {start_iter}') except: pass #LR Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter),", "type=int, default=256, help='the size of the LSTM hidden state') parser.add_argument('--weight_decay',", "opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)", "number configuration \"\"\" if opt.sensitive: # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character", "best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset() if epoch", "setting') parser.add_argument('--workers', type=int, help='number of data loading workers', default=4) parser.add_argument('--batch_size',", "are not in opt.character') print('Filtering the images whose label is", "parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use", "and GPU setting \"\"\" # print(\"Random Seed: \", opt.manualSeed) random.seed(opt.manualSeed)", "for') parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation') parser.add_argument('--saved_model', default='',", "character number configuration \"\"\" if opt.sensitive: # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'", "divided with num_gpu by default.') If you dont care about", "care about it, just commnet out these line.) opt.num_iter =", "longer than opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-') opt.batch_ratio", "iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset,", "simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from simclr_model import FeaturesModel as", "valid_loader: image = image_tensors.to(device) val_loss = byol_learner(image) valid_loss_avg.add(val_loss) # features", "f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1 if __name__ == '__main__': parser =", "= 3 model = Model(opt) print('model input parameters', opt.imgH, opt.imgW,", "# kl_loss_avg = Averager() # kl_loss = torch.nn.KLDivLoss() epoch =", "width of the input image') parser.add_argument('--rgb', action='store_true', help='use rgb input')", "params num : ', sum(params_num)) # setup optimizer if opt.optimizer", "commnet out these line.) opt.num_iter = int(opt.num_iter / opt.num_gpu) \"\"\"", "5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)),", "default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head", "# kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train() byol_learner.train() with", "keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader(", "type=int, default=512, help='the number of output channel of Feature extractor')", "np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True cudnn.deterministic = True opt.num_gpu", "it, just commnet out these line.) opt.num_iter = int(opt.num_iter /", "parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode') parser.add_argument('--PAD', action='store_true', help='whether to", "store logs and models') parser.add_argument('--train_data', required=True, help='path to training dataset')", "default=20, help='number of fiducial points of TPS-STN') parser.add_argument('--input_channel', type=int, default=1,", "type=int, default=192, help='input batch size') parser.add_argument('--num_iter', type=int, default=300000, help='number of", "valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss > valid_loss_avg.val(): best_loss = valid_loss_avg.val()", "as np from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from", "else: raise Exception('Unknown optimizer') print(\"Optimizer:\") print(optimizer) \"\"\" final options \"\"\"", "torch.optim as optim import torch.utils.data import numpy as np from", "SGD') parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD') parser.add_argument('--grad_clip', type=float, default=5,", "resize') parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode') \"\"\" Model Architecture \"\"\"", "hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner) # filter that only require", "args = vars(opt) for k, v in args.items(): opt_log +=", "default=256, help='the size of the LSTM hidden state') parser.add_argument('--weight_decay', type=float,", "augmenters as iaa import imgaug as ia from tqdm import", "default=512, help='the number of output channel of Feature extractor') parser.add_argument('--hidden_size',", "batch_size is ', opt.batch_size) opt.batch_size = opt.batch_size * opt.num_gpu print('To", "collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-' * 80) log.write('-' * 80 +", "MJ and ST used as training data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',", "0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5),", "exist_ok=True) \"\"\" vocab / character number configuration \"\"\" if opt.sensitive:", "as iaa import imgaug as ia from tqdm import tqdm", "torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss > valid_loss_avg.val(): best_loss = valid_loss_avg.val() torch.save(model.state_dict(),", "f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss > valid_loss_avg.val(): best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')", "if torch.cuda.is_available() else 'cpu') def train(opt): \"\"\" dataset preparation \"\"\"", "help='Use a nonlinear projection head during training') parser.add_argument('--final_feature', type=int, default=256,", "+= 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6] # same with ASTER setting", "(default is MJ-ST, which means MJ and ST used as", "os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\" vocab / character number configuration \"\"\" if", "function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-' * 80) log.write('-' *", "log.close() if opt.rgb: opt.input_channel = 3 model = Model(opt) print('model", "opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) #", "e: # for batchnorm. if 'weight' in name: param.data.fill_(1) continue", "of data loading workers', default=4) parser.add_argument('--batch_size', type=int, default=192, help='input batch", "training\") parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam',", "Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD') parser.add_argument('--momentum',", "== 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration + 1) >= opt.num_iter:", "data_filtering_off mode') \"\"\" Data processing \"\"\" parser.add_argument('--select_data', type=str, default='MJ-ST', help='select", "tqdm(range(opt.valInterval)): image_tensors, _ = train_dataset.get_batch() image = image_tensors.to(device) optimizer.zero_grad() loss", "if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model) image_transforms =", "rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta.", "simclr_model import FeaturesModel as Model from test import validation from", "torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True cudnn.deterministic = True opt.num_gpu = torch.cuda.device_count()", "# weight initialization for name, param in model.named_parameters(): if 'localization_fc2'", "valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size,", "= byol_learner(image) valid_loss_avg.add(val_loss) # features = model(image) # features =", "parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear", "optimizer if opt.optimizer == 'adam': optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1,", "is already initialized') continue try: if 'bias' in name: init.constant_(param,", "required=True, help='path to training dataset') parser.add_argument('--valid_data', required=True, help='path to validation", "is MJ-ST, which means MJ and ST used as training", "{:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())) if best_loss is None:", "to 1-GPU setting, num_iter is divided with num_gpu by default.')", "= optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise Exception('Unknown optimizer')", "try: if 'bias' in name: init.constant_(param, 0.0) elif 'weight' in", "= byol_learner(image) loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average()", "f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset() if epoch % 5 == 0:", "f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\" vocab", "parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta') parser.add_argument('--beta1', type=float,", "1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0,", "(iteration + 1) >= opt.num_iter: print('end the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')", "train, start_iter: {start_iter}') except: pass #LR Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer,", "image_transforms=image_transforms) valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader( valid_dataset,", "help='eps for Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for", "each selected data in the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total", "\"\"\" previous version print('To equlize batch stats to 1-GPU setting,", "print('end the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1 if __name__", "epoch +=1 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--exp_name',", "help='input batch size') parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to", "from imgaug import augmenters as iaa import imgaug as ia", "too long time with multi-GPU setting, try to set --workers", "= opt.workers * opt.num_gpu opt.batch_size = opt.batch_size * opt.num_gpu \"\"\"", "nonlinear projection head during training') parser.add_argument('--final_feature', type=int, default=256, help='the size", "if opt.rgb: opt.input_channel = 3 model = Model(opt) print('model input", "you stuck too long time with multi-GPU setting, try to", "optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise Exception('Unknown optimizer') print(\"Optimizer:\")", "torch.device('cuda' if torch.cuda.is_available() else 'cpu') def train(opt): \"\"\" dataset preparation", "mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner = BYOL( model, image_size=(32,100), hidden_layer=-1,", "type=int, default=2000, help='Interval between each validation') parser.add_argument('--saved_model', default='', help=\"path to", "= torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model != '': print(f'loading pretrained model", "number of output channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256,", "Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM", "Options -------------\\n' args = vars(opt) for k, v in args.items():", "action='store_true', help='use rgb input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive',", "for SGD') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5') parser.add_argument('--baiduCTC',", "features.shape[1]) # kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train() byol_learner.train()", "if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0:", "hidden state') parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer', action='store_true', help='Use", "setting, num_iter is divided with num_gpu by default.') If you", "help='total data usage ratio, this ratio is multiplied to total", "means MJ and ST used as training data)') parser.add_argument('--batch_ratio', type=str,", "num_gpu by default.') If you dont care about it, just", "filtered_parameters = [] params_num = [] for p in filter(lambda", "channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of", "parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN') parser.add_argument('--input_channel',", "print('To equalize the number of epochs to 1-GPU setting, num_iter", "= None iteration = start_iter print(device) loss_avg = Averager() valid_loss_avg", "default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the height of the input", "name: print(f'Skip {name} as it is already initialized') continue try:", "start training \"\"\" start_iter = 0 if opt.saved_model != '':", "optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss = None iteration =", "in filter(lambda p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num", "name: param.data.fill_(1) continue # data parallel for multi-GPU model =", "parallel for multi-GPU model = torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model !=", "if 'localization_fc2' in name: print(f'Skip {name} as it is already", "import BYOL from imgaug import augmenters as iaa import imgaug", "iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0,", "training data (default is MJ-ST, which means MJ and ST", "as opt_file: opt_log = '------------ Options -------------\\n' args = vars(opt)", "of the LSTM hidden state') parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay')", "= f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\"", "name: init.kaiming_normal_(param) except Exception as e: # for batchnorm. if", "the input image') parser.add_argument('--rgb', action='store_true', help='use rgb input') parser.add_argument('--character', type=str,", "image_tensors, _ in valid_loader: image = image_tensors.to(device) val_loss = byol_learner(image)", "1) >= opt.num_iter: print('end the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch", "label') parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode') parser.add_argument('--PAD', action='store_true', help='whether", "final layer') opt = parser.parse_args() if not opt.exp_name: opt.exp_name =", "= opt.batch_size * opt.num_gpu print('To equalize the number of epochs", "opt.SequenceModeling) # weight initialization for name, param in model.named_parameters(): if", "for k, v in args.items(): opt_log += f'{str(k)}: {str(v)}\\n' opt_log", "print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log = '------------ Options", "0.02))], random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log", "> valid_loss_avg.val(): best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset()", "raise Exception('Unknown optimizer') print(\"Optimizer:\") print(optimizer) \"\"\" final options \"\"\" #", "size of the LSTM hidden state') parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight", "type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage.", "parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9') parser.add_argument('--rho', type=float, default=0.95,", "None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True,", "numpy as np from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager", "used as training data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign ratio for", "channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner) # filter that only require gradient", "input image') parser.add_argument('--rgb', action='store_true', help='use rgb input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz',", "------') print('if you stuck too long time with multi-GPU setting,", "os import sys import time import random import string import", "optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0: print(\"Epoch {:06d} Loss: {:.04f}\".format(iteration,", "hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from simclr_model import FeaturesModel as Model from", "default=0.9') parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')", "random import string import argparse import torch import torch.backends.cudnn as", "default=2000, help='Interval between each validation') parser.add_argument('--saved_model', default='', help=\"path to model", "weight_decay=opt.weight_decay) elif opt.optimizer == 'sgd': optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum,", "Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8') parser.add_argument('--nesterov',", "gamma=0.1) best_loss = None iteration = start_iter print(device) loss_avg =", "from byol_pytorch import BYOL from imgaug import augmenters as iaa", "in name: init.kaiming_normal_(param) except Exception as e: # for batchnorm.", "open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log = '------------ Options -------------\\n' args", "multi-GPU model = torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model != '': print(f'loading", "26, features.shape[1]) # kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train()", "BYOL( model, image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner) # filter", "help='path to validation dataset') parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed", "opt.batch_size * opt.num_gpu \"\"\" previous version print('To equlize batch stats", "of the output of the final layer') opt = parser.parse_args()", "valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset() if epoch % 5", "workers', default=4) parser.add_argument('--batch_size', type=int, default=192, help='input batch size') parser.add_argument('--num_iter', type=int,", "to train, start_iter: {start_iter}') except: pass #LR Scheduler: scheduler =", "iterations to train for') parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each", "default=0.9, help='beta1 for adam. default=0.9') parser.add_argument('--rho', type=float, default=0.95, help='decay rate", "as ia from tqdm import tqdm import matplotlib.pyplot as plt", "required=True, help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial", "for Adadelta') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9') parser.add_argument('--rho',", "loss_avg.add(loss) if iteration==0: print(\"Epoch {:06d} Loss: {:.04f}\".format(iteration, loss_avg.val())) iteration +=", "init import torch.optim as optim import torch.utils.data import numpy as", "number of input channel of Feature extractor') parser.add_argument('--output_channel', type=int, default=512,", "training \"\"\" start_iter = 0 if opt.saved_model != '': try:", "layer') opt = parser.parse_args() if not opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL'", "Exception('Unknown optimizer') print(\"Optimizer:\") print(optimizer) \"\"\" final options \"\"\" # print(opt)", "iteration = start_iter print(device) loss_avg = Averager() valid_loss_avg = Averager()", "see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset =", "epochs to 1-GPU setting, num_iter is divided with num_gpu by", "opt.optimizer == 'sgd': optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov)", "help='the width of the input image') parser.add_argument('--rgb', action='store_true', help='use rgb", "import validation from byol_pytorch import BYOL from imgaug import augmenters", "training progress with validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-'", "initialized') continue try: if 'bias' in name: init.constant_(param, 0.0) elif", "+= '---------------------------------------\\n' print(opt_log) opt_file.write(opt_log) \"\"\" start training \"\"\" start_iter =", "for Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD')", "except Exception as e: # for batchnorm. if 'weight' in", "to total number of data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH',", "= model(image) # features = features.view(-1, 26, features.shape[1]) # kl_div", "mode') \"\"\" Model Architecture \"\"\" parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage.", "lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid =", "print('device count', opt.num_gpu) if opt.num_gpu > 1: print('------ Use multi-GPU", "image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0,", "state') parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer', action='store_true', help='Use a", "\"\"\" Seed and GPU setting \"\"\" # print(\"Random Seed: \",", "stats to 1-GPU setting, the batch_size is multiplied with num_gpu", "AttnLabelConverter, Averager from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from simclr_model", "while(True): # train part for i in tqdm(range(opt.valInterval)): image_tensors, _", "num_iter is divided with num_gpu by default.') If you dont", "iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)])", "batchnorm. if 'weight' in name: param.data.fill_(1) continue # data parallel", "if (iteration + 1) >= opt.num_iter: print('end the training') torch.save(model.state_dict(),", "None: best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss > valid_loss_avg.val():", "parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5') parser.add_argument('--baiduCTC', action='store_true', help='for", "Batch_Balanced_Dataset from simclr_model import FeaturesModel as Model from test import", "1 byol_learner.eval() model.eval() with torch.no_grad(): for image_tensors, _ in valid_loader:", "iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0,", "num : ', sum(params_num)) # setup optimizer if opt.optimizer ==", "= Averager() # kl_loss = torch.nn.KLDivLoss() epoch = 0 while(True):", "pass #LR Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1)", "import torch import torch.backends.cudnn as cudnn import torch.nn.init as init", "import os import sys import time import random import string", "and multiplied batch_size is ', opt.batch_size) opt.batch_size = opt.batch_size *", "image') parser.add_argument('--rgb', action='store_true', help='use rgb input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character", "opt.exp_name += f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\" vocab /", "= True cudnn.deterministic = True opt.num_gpu = torch.cuda.device_count() # print('device", "to keep ratio then pad for image resize') parser.add_argument('--data_filtering_off', action='store_true',", "stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of", "data_filtering_off mode') \"\"\" Model Architecture \"\"\" parser.add_argument('--Transformation', type=str, required=True, help='Transformation", "fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help=\"Optimizer\") parser.add_argument('--lr', type=float, default=1,", "help='for data_filtering_off mode') \"\"\" Model Architecture \"\"\" parser.add_argument('--Transformation', type=str, required=True,", "Use multi-GPU setting ------') print('if you stuck too long time", "opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) # weight initialization for", "initialization for name, param in model.named_parameters(): if 'localization_fc2' in name:", "SGD') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5') parser.add_argument('--baiduCTC', action='store_true',", "to validation dataset') parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')", "parser.add_argument('--valid_data', required=True, help='path to validation dataset') parser.add_argument('--manualSeed', type=int, default=1111, help='for", "multi-GPU setting, try to set --workers 0') # check multi-GPU", "which are not in opt.character') print('Filtering the images whose label", "'weight' in name: param.data.fill_(1) continue # data parallel for multi-GPU", "opt.FeatureExtraction, opt.SequenceModeling) # weight initialization for name, param in model.named_parameters():", "valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True,", "f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\" vocab / character number", "byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: log.write(\"Iteration {:06d} Loss: {:.06f}", "model(image) # features = features.view(-1, 26, features.shape[1]) # kl_div =", "opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0: print(\"Epoch", "Averager from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from simclr_model import", "ratio for each selected data in the batch') parser.add_argument('--total_data_usage_ratio', type=str,", "decent filtered_parameters = [] params_num = [] for p in", "int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train, start_iter: {start_iter}') except: pass #LR Scheduler:", "default=0.95, help='decay rate rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8,", "than opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-') opt.batch_ratio =", "type=int, default=100, help='the width of the input image') parser.add_argument('--rgb', action='store_true',", "try to set --workers 0') # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1", "input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', action='store_true', help='for sensitive", "model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),", "def train(opt): \"\"\" dataset preparation \"\"\" if not opt.data_filtering_off: print('Filtering", "help='whether to do fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help=\"Optimizer\")", "the LSTM hidden state') parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer',", "= valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset() if epoch %", "to continue training\") parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning') parser.add_argument('--optimizer',", "if opt.optimizer == 'adam': optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))", "help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling',", "0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner", "image resize') parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode') \"\"\" Model Architecture", "parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation') parser.add_argument('--saved_model', default='', help=\"path", "betas=(opt.beta1, 0.999)) elif opt.optimizer == 'adadelta': optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr,", "data in the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data usage", "set --workers 0') # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers =", "augmented=True) print(byol_learner) # filter that only require gradient decent filtered_parameters", "print(f'continue to train, start_iter: {start_iter}') except: pass #LR Scheduler: scheduler", "= start_iter print(device) loss_avg = Averager() valid_loss_avg = Averager() #", "= torch.nn.KLDivLoss() epoch = 0 while(True): # train part for", "features = features.view(-1, 26, features.shape[1]) # kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):])", "and ST used as training data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign", "'a') ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)),", "type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')", "multiplied with num_gpu and multiplied batch_size is ', opt.batch_size) opt.batch_size", "help='Interval between each validation') parser.add_argument('--saved_model', default='', help=\"path to model to", "= features.view(-1, 26, features.shape[1]) # kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) #", "loss_avg.val(), valid_loss_avg.val())) if best_loss is None: best_loss = valid_loss_avg.val() torch.save(model.state_dict(),", "opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}',", "keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))],", "https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers * opt.num_gpu opt.batch_size = opt.batch_size *", "if opt.saved_model != '': print(f'loading pretrained model from {opt.saved_model}') if", "byol_learner = BYOL( model, image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner)", "import torch.backends.cudnn as cudnn import torch.nn.init as init import torch.optim", "valid_loss_avg = Averager() # kl_loss_avg = Averager() # kl_loss =", "print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length,", "[iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),", "for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')", "data parallel for multi-GPU model = torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model", "logs and models') parser.add_argument('--train_data', required=True, help='path to training dataset') parser.add_argument('--valid_data',", "{:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\\n') print(\"Iteration {:06d} Loss: {:.06f} Val", "= vars(opt) for k, v in args.items(): opt_log += f'{str(k)}:", "model = torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model != '': print(f'loading pretrained", "= opt.batch_size * opt.num_gpu \"\"\" previous version print('To equlize batch", "opt.character') print('Filtering the images whose label is longer than opt.batch_max_length')", "opt.workers = opt.workers * opt.num_gpu opt.batch_size = opt.batch_size * opt.num_gpu", "', sum(params_num)) # setup optimizer if opt.optimizer == 'adam': optimizer", "+ '\\n') print(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(),", "from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from simclr_dataset import", "Seed: \", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True", ">= opt.num_iter: print('end the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1", "= image_tensors.to(device) optimizer.zero_grad() loss = byol_learner(image) loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(),", "do fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help=\"Optimizer\") parser.add_argument('--lr', type=float,", "help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial',", "opt.batch_size) opt.batch_size = opt.batch_size * opt.num_gpu print('To equalize the number", "image_tensors.to(device) optimizer.zero_grad() loss = byol_learner(image) loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip)", "elif best_loss > valid_loss_avg.val(): best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step()", "open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: log.write(\"Iteration {:06d} Loss: {:.06f} Val loss:", "setting \"\"\" # print(\"Random Seed: \", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed)", "best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss > valid_loss_avg.val(): best_loss", "log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5,", "default=1, help='learning rate, default=1.0 for Adadelta') parser.add_argument('--beta1', type=float, default=0.9, help='beta1", "image') parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')", "output of the final layer') opt = parser.parse_args() if not", "# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6] # same with", "version print('To equlize batch stats to 1-GPU setting, the batch_size", "as Model from test import validation from byol_pytorch import BYOL", "help='Use Nesterov momentum for SGD') parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for", "Averager() # kl_loss_avg = Averager() # kl_loss = torch.nn.KLDivLoss() epoch", "data usage ratio, this ratio is multiplied to total number", "FeaturesModel as Model from test import validation from byol_pytorch import", "print('Trainable params num : ', sum(params_num)) # setup optimizer if", "train part for i in tqdm(range(opt.valInterval)): image_tensors, _ = train_dataset.get_batch()", "import numpy as np from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter,", "help='for data_filtering_off mode') \"\"\" Data processing \"\"\" parser.add_argument('--select_data', type=str, default='MJ-ST',", "_ = train_dataset.get_batch() image = image_tensors.to(device) optimizer.zero_grad() loss = byol_learner(image)", "parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD') parser.add_argument('--momentum', type=float, default=0.9,", "= True opt.num_gpu = torch.cuda.device_count() # print('device count', opt.num_gpu) if", "lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner =", "weight initialization for name, param in model.named_parameters(): if 'localization_fc2' in", "== 'sgd': optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else:", "valid_loss_avg.val(): best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset() if", "default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD') parser.add_argument('--momentum', type=float,", "Data processing \"\"\" parser.add_argument('--select_data', type=str, default='MJ-ST', help='select training data (default", "selected data in the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data", "about it, just commnet out these line.) opt.num_iter = int(opt.num_iter", "argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to store logs and models') parser.add_argument('--train_data', required=True,", "random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log =", "iaa import imgaug as ia from tqdm import tqdm import", "parser.add_argument('--imgH', type=int, default=32, help='the height of the input image') parser.add_argument('--imgW',", "char). \"\"\" Seed and GPU setting \"\"\" # print(\"Random Seed:", "label is longer than opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data =", "best_loss > valid_loss_avg.val(): best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset()", "else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)),", "input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation,", "model to continue training\") parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')", "output channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size", "True cudnn.deterministic = True opt.num_gpu = torch.cuda.device_count() # print('device count',", "Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\\n') print(\"Iteration {:06d} Loss:", "type=str, required=True, help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20, help='number of", "parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling", "from test import validation from byol_pytorch import BYOL from imgaug", "= torch.device('cuda' if torch.cuda.is_available() else 'cpu') def train(opt): \"\"\" dataset", "start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train, start_iter: {start_iter}') except: pass", "parser.add_argument('--select_data', type=str, default='MJ-ST', help='select training data (default is MJ-ST, which", "opt.num_gpu \"\"\" previous version print('To equlize batch stats to 1-GPU", "progress with validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-' *", "\"\"\" parser.add_argument('--select_data', type=str, default='MJ-ST', help='select training data (default is MJ-ST,", "opt.data_filtering_off: print('Filtering the images containing characters which are not in", "type=int, default=1, help='the number of input channel of Feature extractor')", "print('Filtering the images containing characters which are not in opt.character')", "--workers 0') # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers", "0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02,", "in tqdm(range(opt.valInterval)): image_tensors, _ = train_dataset.get_batch() image = image_tensors.to(device) optimizer.zero_grad()", "= opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log =", "parser.add_argument('--exp_name', help='Where to store logs and models') parser.add_argument('--train_data', required=True, help='path", "in the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data usage ratio,", "0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03),", "rgb input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', action='store_true', help='for", "opt.num_gpu print('To equalize the number of epochs to 1-GPU setting,", "i in tqdm(range(opt.valInterval)): image_tensors, _ = train_dataset.get_batch() image = image_tensors.to(device)", "# kl_loss = torch.nn.KLDivLoss() epoch = 0 while(True): # train", "check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers * opt.num_gpu opt.batch_size", "help='beta1 for adam. default=0.9') parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho", "parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help=\"Optimizer\") parser.add_argument('--lr', type=float, default=1, help='learning", "params_num.append(np.prod(p.size())) print('Trainable params num : ', sum(params_num)) # setup optimizer", "of the final layer') opt = parser.parse_args() if not opt.exp_name:", "param in model.named_parameters(): if 'localization_fc2' in name: print(f'Skip {name} as", "opt_file.write(opt_log) \"\"\" start training \"\"\" start_iter = 0 if opt.saved_model", "required=True, help='path to validation dataset') parser.add_argument('--manualSeed', type=int, default=1111, help='for random", "help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')", "data (default is MJ-ST, which means MJ and ST used", "= parser.parse_args() if not opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name +=", "training dataset') parser.add_argument('--valid_data', required=True, help='path to validation dataset') parser.add_argument('--manualSeed', type=int,", "dataset preparation \"\"\" if not opt.data_filtering_off: print('Filtering the images containing", "= Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,", "to 1-GPU setting, the batch_size is multiplied with num_gpu and", "{name} as it is already initialized') continue try: if 'bias'", "v in args.items(): opt_log += f'{str(k)}: {str(v)}\\n' opt_log += '---------------------------------------\\n'", "byol_learner(image) loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss)", "required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')", "= Averager() valid_loss_avg = Averager() # kl_loss_avg = Averager() #", "parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95') parser.add_argument('--eps',", "opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling)", "# print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log = '------------", "torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True, # 'True' to check training progress", "type=str, required=True, help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage.", "'---------------------------------------\\n' print(opt_log) opt_file.write(opt_log) \"\"\" start training \"\"\" start_iter = 0", "{:.04f}\".format(iteration, loss_avg.val())) iteration += 1 byol_learner.eval() model.eval() with torch.no_grad(): for", "Model from test import validation from byol_pytorch import BYOL from", "help='whether to keep ratio then pad for image resize') parser.add_argument('--data_filtering_off',", "model = Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel,", "nesterov=opt.nesterov) else: raise Exception('Unknown optimizer') print(\"Optimizer:\") print(optimizer) \"\"\" final options", "np from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from simclr_dataset", "= Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1,", "{:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\\n') print(\"Iteration {:06d}", "Feature extractor') parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel", "CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset", "# print(\"Random Seed: \", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark", "AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data,", "{:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())) if best_loss", "default='MJ-ST', help='select training data (default is MJ-ST, which means MJ", "setting ------') print('if you stuck too long time with multi-GPU", "just commnet out these line.) opt.num_iter = int(opt.num_iter / opt.num_gpu)", "# data parallel for multi-GPU model = torch.nn.DataParallel(model).to(device) model.train() if", "time import random import string import argparse import torch import", "0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD,", "print(\"Model:\") print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)),", "0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid", "Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\\n') print(\"Iteration", "type=float, default=5, help='gradient clipping value. default=5') parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off", "with ASTER setting (use 94 char). \"\"\" Seed and GPU", "try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train, start_iter: {start_iter}') except:", "0.999)) elif opt.optimizer == 'adadelta': optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho,", "opt.character = string.printable[:-6] # same with ASTER setting (use 94", "0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner = BYOL( model, image_size=(32,100),", "processing \"\"\" parser.add_argument('--select_data', type=str, default='MJ-ST', help='select training data (default is", "5 == 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration + 1) >=", "print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0,", "import FeaturesModel as Model from test import validation from byol_pytorch", "parser.add_argument('--train_data', required=True, help='path to training dataset') parser.add_argument('--valid_data', required=True, help='path to", "parser.add_argument('--rgb', action='store_true', help='use rgb input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')", "parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for') parser.add_argument('--valInterval',", "parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD') parser.add_argument('--grad_clip', type=float, default=5, help='gradient", "parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction", "configuration \"\"\" if opt.sensitive: # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character =", "parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')", "BYOL from imgaug import augmenters as iaa import imgaug as", "0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0,", "+= 1 byol_learner.eval() model.eval() with torch.no_grad(): for image_tensors, _ in", "extractor') parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of", "continue try: if 'bias' in name: init.constant_(param, 0.0) elif 'weight'", "opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) # weight initialization for name,", "type=int, default=300000, help='number of iterations to train for') parser.add_argument('--valInterval', type=int,", "'localization_fc2' in name: print(f'Skip {name} as it is already initialized')", "int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss = None iteration = start_iter print(device)", "part for i in tqdm(range(opt.valInterval)): image_tensors, _ = train_dataset.get_batch() image", "print(\"Epoch {:06d} Loss: {:.04f}\".format(iteration, loss_avg.val())) iteration += 1 byol_learner.eval() model.eval()", "for name, param in model.named_parameters(): if 'localization_fc2' in name: print(f'Skip", "to store logs and models') parser.add_argument('--train_data', required=True, help='path to training", "default=0.9, help='Momentum for SGD') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value.", "0') # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers *", "sys.exit() epoch +=1 if __name__ == '__main__': parser = argparse.ArgumentParser()", "num_gpu and multiplied batch_size is ', opt.batch_size) opt.batch_size = opt.batch_size", "the batch_size is multiplied with num_gpu and multiplied batch_size is", "log: log.write(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())", "of data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the", "val_loss = byol_learner(image) valid_loss_avg.add(val_loss) # features = model(image) # features", "80 + '\\n') log.close() if opt.rgb: opt.input_channel = 3 model", "random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True cudnn.deterministic = True", "print(device) loss_avg = Averager() valid_loss_avg = Averager() # kl_loss_avg =", "input image') parser.add_argument('--imgW', type=int, default=100, help='the width of the input", "equlize batch stats to 1-GPU setting, the batch_size is multiplied", "'': print(f'loading pretrained model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False)", "= open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)),", "torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration + 1) >= opt.num_iter: print('end the", "to training dataset') parser.add_argument('--valid_data', required=True, help='path to validation dataset') parser.add_argument('--manualSeed',", "+= f'{str(k)}: {str(v)}\\n' opt_log += '---------------------------------------\\n' print(opt_log) opt_file.write(opt_log) \"\"\" start", "validation') parser.add_argument('--saved_model', default='', help=\"path to model to continue training\") parser.add_argument('--FT',", "seed setting') parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)", "of the input image') parser.add_argument('--rgb', action='store_true', help='use rgb input') parser.add_argument('--character',", "help='assign ratio for each selected data in the batch') parser.add_argument('--total_data_usage_ratio',", "total number of data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int,", "= [] for p in filter(lambda p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p)", "pad for image resize') parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode') \"\"\"", "continue training\") parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning') parser.add_argument('--optimizer', type=str,", "torch.cuda.is_available() else 'cpu') def train(opt): \"\"\" dataset preparation \"\"\" if", "1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0,", "'a') as opt_file: opt_log = '------------ Options -------------\\n' args =", "print(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())) if", "if epoch % 5 == 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration", "import matplotlib.pyplot as plt device = torch.device('cuda' if torch.cuda.is_available() else", "momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise Exception('Unknown optimizer') print(\"Optimizer:\") print(optimizer) \"\"\"", "kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as", "training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1 if __name__ == '__main__':", "default='', help=\"path to model to continue training\") parser.add_argument('--FT', action='store_true', help='whether", "args.items(): opt_log += f'{str(k)}: {str(v)}\\n' opt_log += '---------------------------------------\\n' print(opt_log) opt_file.write(opt_log)", "with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log = '------------ Options -------------\\n'", "features.view(-1, 26, features.shape[1]) # kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div)", "setting, try to set --workers 0') # check multi-GPU issue", "opt.batch_size = opt.batch_size * opt.num_gpu print('To equalize the number of", "default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode') parser.add_argument('--PAD',", "scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0: print(\"Epoch {:06d} Loss: {:.04f}\".format(iteration, loss_avg.val()))", "scheduler.step() loss_avg.reset() valid_loss_avg.reset() if epoch % 5 == 0: torch.save(model.state_dict(),", "from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model)", "# same with ASTER setting (use 94 char). \"\"\" Seed", "(use 94 char). \"\"\" Seed and GPU setting \"\"\" #", "opt.optimizer == 'adam': optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) elif", "to set --workers 0') # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers", "0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),", "import torch.utils.data import numpy as np from utils import CTCLabelConverter,", "'a') as log: log.write(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration,", "# features = features.view(-1, 26, features.shape[1]) # kl_div = kl_loss(features[:int(features.shape[0]/2)],", "parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20, help='number", "p in filter(lambda p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params", "{:06d} Loss: {:.04f}\".format(iteration, loss_avg.val())) iteration += 1 byol_learner.eval() model.eval() with", "the number of epochs to 1-GPU setting, num_iter is divided", "matplotlib.pyplot as plt device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "# for batchnorm. if 'weight' in name: param.data.fill_(1) continue #", "torch.nn.init as init import torch.optim as optim import torch.utils.data import", "sys import time import random import string import argparse import", "loss_avg.reset() valid_loss_avg.reset() if epoch % 5 == 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')", "keep ratio then pad for image resize') parser.add_argument('--data_filtering_off', action='store_true', help='for", "help='the number of output channel of Feature extractor') parser.add_argument('--hidden_size', type=int,", "action='store_true', help='whether to do fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'],", "count', opt.num_gpu) if opt.num_gpu > 1: print('------ Use multi-GPU setting", "optim import torch.utils.data import numpy as np from utils import", "\"\"\" # print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log =", "byol_learner.eval() model.eval() with torch.no_grad(): for image_tensors, _ in valid_loader: image", "extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden", "= torch.cuda.device_count() # print('device count', opt.num_gpu) if opt.num_gpu > 1:", "torch.backends.cudnn as cudnn import torch.nn.init as init import torch.optim as", "then pad for image resize') parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')", "= '------------ Options -------------\\n' args = vars(opt) for k, v", "epoch = 0 while(True): # train part for i in", "# features = model(image) # features = features.view(-1, 26, features.shape[1])", "features = model(image) # features = features.view(-1, 26, features.shape[1]) #", "multiplied to total number of data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')", "gradient decent filtered_parameters = [] params_num = [] for p", "default=1111, help='for random seed setting') parser.add_argument('--workers', type=int, help='number of data", "'cpu') def train(opt): \"\"\" dataset preparation \"\"\" if not opt.data_filtering_off:", "dataset') parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting') parser.add_argument('--workers', type=int,", "training data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign ratio for each selected", "Averager() # kl_loss = torch.nn.KLDivLoss() epoch = 0 while(True): #", "optimizer.zero_grad() loss = byol_learner(image) loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step()", "byol_learner(image) valid_loss_avg.add(val_loss) # features = model(image) # features = features.view(-1,", "action='store_true', help='for sensitive character mode') parser.add_argument('--PAD', action='store_true', help='whether to keep", "opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) # weight initialization", "sum(params_num)) # setup optimizer if opt.optimizer == 'adam': optimizer =", "of TPS-STN') parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel", "parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature", "opt.batch_size * opt.num_gpu print('To equalize the number of epochs to", "import time import random import string import argparse import torch", "byol_pytorch import BYOL from imgaug import augmenters as iaa import", "tqdm import matplotlib.pyplot as plt device = torch.device('cuda' if torch.cuda.is_available()", "optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) elif opt.optimizer == 'adadelta':", "print(opt_log) opt_file.write(opt_log) \"\"\" start training \"\"\" start_iter = 0 if", "valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True, # 'True' to check", "', opt.batch_size) opt.batch_size = opt.batch_size * opt.num_gpu print('To equalize the", "'\\n') print(\"Iteration {:06d} Loss: {:.06f} Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()))", "by default.') If you dont care about it, just commnet", "import tqdm import matplotlib.pyplot as plt device = torch.device('cuda' if", "kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt',", "value. default=5') parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode') \"\"\" Data processing", "not opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}' # print(opt.exp_name)", "optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer ==", "{start_iter}') except: pass #LR Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)],", "if opt.saved_model != '': try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to", "cudnn import torch.nn.init as init import torch.optim as optim import", "else 'cpu') def train(opt): \"\"\" dataset preparation \"\"\" if not", "to do fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help=\"Optimizer\") parser.add_argument('--lr',", "help='learning rate, default=1.0 for Adadelta') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for", "if not opt.data_filtering_off: print('Filtering the images containing characters which are", "of output channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the", "\"\"\" start training \"\"\" start_iter = 0 if opt.saved_model !=", "scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss = None", "parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode') \"\"\" Model Architecture \"\"\" parser.add_argument('--Transformation',", "long time with multi-GPU setting, try to set --workers 0')", "iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner = BYOL( model,", "kl_loss_avg = Averager() # kl_loss = torch.nn.KLDivLoss() epoch = 0", "for multi-GPU model = torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model != '':", "final options \"\"\" # print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:", "elif 'weight' in name: init.kaiming_normal_(param) except Exception as e: #", "weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise Exception('Unknown optimizer') print(\"Optimizer:\") print(optimizer) \"\"\" final", "'------------ Options -------------\\n' args = vars(opt) for k, v in", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to store", "\"\"\" parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True,", "input channel of Feature extractor') parser.add_argument('--output_channel', type=int, default=512, help='the number", "loss_avg.val(), valid_loss_avg.val()) + '\\n') print(\"Iteration {:06d} Loss: {:.06f} Val loss:", "help='number of data loading workers', default=4) parser.add_argument('--batch_size', type=int, default=192, help='input", "action='store_true', help='Use a nonlinear projection head during training') parser.add_argument('--final_feature', type=int,", "import random import string import argparse import torch import torch.backends.cudnn", "'sgd': optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise", "as it is already initialized') continue try: if 'bias' in", "opt.saved_model != '': print(f'loading pretrained model from {opt.saved_model}') if opt.FT:", "type=int, default=32, help='the height of the input image') parser.add_argument('--imgW', type=int,", "batch_size is multiplied with num_gpu and multiplied batch_size is ',", "parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the height of", "iteration += 1 byol_learner.eval() model.eval() with torch.no_grad(): for image_tensors, _", "sensitive character mode') parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then", "as plt device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def", "the images whose label is longer than opt.batch_max_length') # see", "the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data usage ratio, this", "torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset() if epoch % 5 ==", "print(optimizer) \"\"\" final options \"\"\" # print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a')", "94 char). \"\"\" Seed and GPU setting \"\"\" # print(\"Random", "= kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a')", "parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,", "continue # data parallel for multi-GPU model = torch.nn.DataParallel(model).to(device) model.train()", "# 'True' to check training progress with validation function. num_workers=int(opt.workers),", "'\\n') log.close() if opt.rgb: opt.input_channel = 3 model = Model(opt)", "train_dataset.get_batch() image = image_tensors.to(device) optimizer.zero_grad() loss = byol_learner(image) loss.backward() if", "containing characters which are not in opt.character') print('Filtering the images", "of fiducial points of TPS-STN') parser.add_argument('--input_channel', type=int, default=1, help='the number", "= string.printable[:-6] # same with ASTER setting (use 94 char).", "opt.num_gpu opt.batch_size = opt.batch_size * opt.num_gpu \"\"\" previous version print('To", "parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign ratio for each selected data in", "in valid_loader: image = image_tensors.to(device) val_loss = byol_learner(image) valid_loss_avg.add(val_loss) #", "help='Weight decay') parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during", "# filter that only require gradient decent filtered_parameters = []", "# setup optimizer if opt.optimizer == 'adam': optimizer = optim.Adam(filtered_parameters,", "epoch % 5 == 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration +", "type=float, default=0.9, help='Momentum for SGD') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping", "pin_memory=True) log.write(valid_dataset_log) print('-' * 80) log.write('-' * 80 + '\\n')", "start_iter = 0 if opt.saved_model != '': try: start_iter =", "parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', action='store_true', help='for sensitive character", "\"\"\" vocab / character number configuration \"\"\" if opt.sensitive: #", "is multiplied to total number of data.') parser.add_argument('--batch_max_length', type=int, default=25,", "type=float, default=0.9, help='beta1 for adam. default=0.9') parser.add_argument('--rho', type=float, default=0.95, help='decay", "= BYOL( model, image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner) #", "Val loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val())) if best_loss is None: best_loss", "size') parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')", "log.write(valid_dataset_log) print('-' * 80) log.write('-' * 80 + '\\n') log.close()", "decay') parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training')", "in name: print(f'Skip {name} as it is already initialized') continue", "augment_fn=image_transforms, augmented=True) print(byol_learner) # filter that only require gradient decent", "in name: param.data.fill_(1) continue # data parallel for multi-GPU model", "loss = byol_learner(image) loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step()", "Exception as e: # for batchnorm. if 'weight' in name:", "0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration + 1) >= opt.num_iter: print('end", "best_loss is None: best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss", "random_order=True)]) byol_learner = BYOL( model, image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True)", "name: init.constant_(param, 0.0) elif 'weight' in name: init.kaiming_normal_(param) except Exception", "filter(lambda p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num :", "shuffle=True, # 'True' to check training progress with validation function.", "AlignCollate, Batch_Balanced_Dataset from simclr_model import FeaturesModel as Model from test", "of Feature extractor') parser.add_argument('--output_channel', type=int, default=512, help='the number of output", "* opt.num_gpu print('To equalize the number of epochs to 1-GPU", "loss_avg.val())) iteration += 1 byol_learner.eval() model.eval() with torch.no_grad(): for image_tensors,", "of the input image') parser.add_argument('--imgW', type=int, default=100, help='the width of", "= argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to store logs and models') parser.add_argument('--train_data',", "\"\"\" dataset preparation \"\"\" if not opt.data_filtering_off: print('Filtering the images", "loss: {:06f}\".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\\n') print(\"Iteration {:06d} Loss: {:.06f}", "type=float, default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection", "ia from tqdm import tqdm import matplotlib.pyplot as plt device", "opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) # weight initialization for name, param in", "valid_dataset, batch_size=opt.batch_size, shuffle=True, # 'True' to check training progress with", "already initialized') continue try: if 'bias' in name: init.constant_(param, 0.0)", "mode') \"\"\" Data processing \"\"\" parser.add_argument('--select_data', type=str, default='MJ-ST', help='select training", "# print('device count', opt.num_gpu) if opt.num_gpu > 1: print('------ Use", "image_tensors, _ = train_dataset.get_batch() image = image_tensors.to(device) optimizer.zero_grad() loss =", "time with multi-GPU setting, try to set --workers 0') #", "number of epochs to 1-GPU setting, num_iter is divided with", "with multi-GPU setting, try to set --workers 0') # check", "print('To equlize batch stats to 1-GPU setting, the batch_size is", "torch.utils.data import numpy as np from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc,", "you dont care about it, just commnet out these line.)", "validation dataset') parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting') parser.add_argument('--workers',", "model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5,", "opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) # weight", "opt_log += f'{str(k)}: {str(v)}\\n' opt_log += '---------------------------------------\\n' print(opt_log) opt_file.write(opt_log) \"\"\"", "default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true',", "ratio then pad for image resize') parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off", "mode') parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for", "optimizer') print(\"Optimizer:\") print(optimizer) \"\"\" final options \"\"\" # print(opt) with", "to train for') parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')", "iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW,", "opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True cudnn.deterministic =", "opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log", "for batchnorm. if 'weight' in name: param.data.fill_(1) continue # data", "dont care about it, just commnet out these line.) opt.num_iter", "opt.rgb: opt.input_channel = 3 model = Model(opt) print('model input parameters',", "default=1, help='the number of input channel of Feature extractor') parser.add_argument('--output_channel',", "training') parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of", "optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) elif opt.optimizer == 'adadelta': optimizer =", "help='the number of input channel of Feature extractor') parser.add_argument('--output_channel', type=int,", "If you dont care about it, just commnet out these", "that only require gradient decent filtered_parameters = [] params_num =", "number of data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32,", "loss_avg = Averager() valid_loss_avg = Averager() # kl_loss_avg = Averager()", "image_tensors.to(device) val_loss = byol_learner(image) valid_loss_avg.add(val_loss) # features = model(image) #", "tqdm import tqdm import matplotlib.pyplot as plt device = torch.device('cuda'", "if 'weight' in name: param.data.fill_(1) continue # data parallel for", "parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image", "type=int, default=20, help='number of fiducial points of TPS-STN') parser.add_argument('--input_channel', type=int,", "parser.add_argument('--saved_model', default='', help=\"path to model to continue training\") parser.add_argument('--FT', action='store_true',", "strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print(\"Model:\") print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5,", "the output of the final layer') opt = parser.parse_args() if", "'weight' in name: init.kaiming_normal_(param) except Exception as e: # for", "/ character number configuration \"\"\" if opt.sensitive: # opt.character +=", "opt_log += '---------------------------------------\\n' print(opt_log) opt_file.write(opt_log) \"\"\" start training \"\"\" start_iter", "opt.saved_model != '': try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train,", "in name: init.constant_(param, 0.0) elif 'weight' in name: init.kaiming_normal_(param) except", "'adam': optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) elif opt.optimizer ==", "eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer == 'sgd': optimizer = optim.SGD(filtered_parameters, lr=opt.lr,", "to check training progress with validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True)", "as e: # for batchnorm. if 'weight' in name: param.data.fill_(1)", "opt.workers * opt.num_gpu opt.batch_size = opt.batch_size * opt.num_gpu \"\"\" previous", "as cudnn import torch.nn.init as init import torch.optim as optim", "loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if", "data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign ratio for each selected data", "help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points", "head during training') parser.add_argument('--final_feature', type=int, default=256, help='the size of the", "0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01,", "!= '': try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train, start_iter:", "default=1.0 for Adadelta') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')", "import augmenters as iaa import imgaug as ia from tqdm", "type=str, choices=['adam', 'adadelta', 'sgd'], help=\"Optimizer\") parser.add_argument('--lr', type=float, default=1, help='learning rate,", "dataset') parser.add_argument('--valid_data', required=True, help='path to validation dataset') parser.add_argument('--manualSeed', type=int, default=1111,", "LSTM hidden state') parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer', action='store_true',", "models') parser.add_argument('--train_data', required=True, help='path to training dataset') parser.add_argument('--valid_data', required=True, help='path", "type=int, default=256, help='the size of the output of the final", "issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers * opt.num_gpu opt.batch_size = opt.batch_size", "to model to continue training\") parser.add_argument('--FT', action='store_true', help='whether to do", "#LR Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss", "batch_size=opt.batch_size, shuffle=True, # 'True' to check training progress with validation", "opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0: print(\"Epoch {:06d} Loss:", "type=int, default=1111, help='for random seed setting') parser.add_argument('--workers', type=int, help='number of", "vocab / character number configuration \"\"\" if opt.sensitive: # opt.character", "'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6] # same with ASTER setting (use", "* opt.num_gpu \"\"\" previous version print('To equlize batch stats to", "each validation') parser.add_argument('--saved_model', default='', help=\"path to model to continue training\")", "# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset", "opt.num_iter: print('end the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1 if", "parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature", "+= f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) \"\"\" vocab / character", "CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from", "opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')" ]
[ "import Application define('port', default=8080, help=\"listening port\") define('bind_address', default=\"\", help=\"bind address\")", "define(\"db_port\", default=5432, help=\"database port\") define(\"db_database\", default=\"tornado\", help=\"database name\") define(\"db_user\", default=\"tornado\",", "# -*- coding: utf-8 -*- # -*- mode: python -*-", "await maybe_create_tables(db) app = Application(db) app.listen(options.port, options.bind_address, xheaders=True) print(\"Listening on", "help=\"listening port\") define('bind_address', default=\"\", help=\"bind address\") define(\"db_host\", default=\"127.0.0.1\", help=\"database host\")", "utf-8 -*- # -*- mode: python -*- import aiopg import", "with aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database) as db: await", "#!/usr/bin/env python # -*- coding: utf-8 -*- # -*- mode:", "tornado.web import Application from app.application import Application define('port', default=8080, help=\"listening", "Application from app.application import Application define('port', default=8080, help=\"listening port\") define('bind_address',", "(await db.cursor()) as cur: await cur.execute(\"SELECT COUNT(*) FROM schema LIMIT", "default=\"\", help=\"bind address\") define(\"db_host\", default=\"127.0.0.1\", help=\"database host\") define(\"db_port\", default=5432, help=\"database", "Application(db) app.listen(options.port, options.bind_address, xheaders=True) print(\"Listening on http://%s:%i\" % (options.bind_address, options.port))", "1\") await cur.fetchone() except psycopg2.ProgrammingError: print(\"Database error!\") async def main():", "maybe_create_tables(db) app = Application(db) app.listen(options.port, options.bind_address, xheaders=True) print(\"Listening on http://%s:%i\"", "as cur: await cur.execute(\"SELECT COUNT(*) FROM schema LIMIT 1\") await", "error!\") async def main(): options.parse_command_line() async with aiopg.create_pool( host=options.db_host, port=options.db_port,", "schema LIMIT 1\") await cur.fetchone() except psycopg2.ProgrammingError: print(\"Database error!\") async", "port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database) as db: await maybe_create_tables(db) app =", "<filename>app/__init__.py #!/usr/bin/env python # -*- coding: utf-8 -*- # -*-", "with (await db.cursor()) as cur: await cur.execute(\"SELECT COUNT(*) FROM schema", "async def maybe_create_tables(db): try: with (await db.cursor()) as cur: await", "FROM schema LIMIT 1\") await cur.fetchone() except psycopg2.ProgrammingError: print(\"Database error!\")", "async with aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database) as db:", "define(\"db_database\", default=\"tornado\", help=\"database name\") define(\"db_user\", default=\"tornado\", help=\"database user\") define(\"db_password\", default=\"<PASSWORD>\",", "print(\"Listening on http://%s:%i\" % (options.bind_address, options.port)) shutdown_event = tornado.locks.Event() await", "dbname=options.db_database) as db: await maybe_create_tables(db) app = Application(db) app.listen(options.port, options.bind_address,", "mode: python -*- import aiopg import psycopg2 import tornado.locks from", "from tornado.web import Application from app.application import Application define('port', default=8080,", "await cur.fetchone() except psycopg2.ProgrammingError: print(\"Database error!\") async def main(): options.parse_command_line()", "db.cursor()) as cur: await cur.execute(\"SELECT COUNT(*) FROM schema LIMIT 1\")", "help=\"database password\") async def maybe_create_tables(db): try: with (await db.cursor()) as", "define('bind_address', default=\"\", help=\"bind address\") define(\"db_host\", default=\"127.0.0.1\", help=\"database host\") define(\"db_port\", default=5432,", "user\") define(\"db_password\", default=\"<PASSWORD>\", help=\"database password\") async def maybe_create_tables(db): try: with", "password\") async def maybe_create_tables(db): try: with (await db.cursor()) as cur:", "define('port', default=8080, help=\"listening port\") define('bind_address', default=\"\", help=\"bind address\") define(\"db_host\", default=\"127.0.0.1\",", "db: await maybe_create_tables(db) app = Application(db) app.listen(options.port, options.bind_address, xheaders=True) print(\"Listening", "await cur.execute(\"SELECT COUNT(*) FROM schema LIMIT 1\") await cur.fetchone() except", "default=5432, help=\"database port\") define(\"db_database\", default=\"tornado\", help=\"database name\") define(\"db_user\", default=\"tornado\", help=\"database", "import Application from app.application import Application define('port', default=8080, help=\"listening port\")", "python # -*- coding: utf-8 -*- # -*- mode: python", "Application define('port', default=8080, help=\"listening port\") define('bind_address', default=\"\", help=\"bind address\") define(\"db_host\",", "tornado.options import define, options from tornado.web import Application from app.application", "default=8080, help=\"listening port\") define('bind_address', default=\"\", help=\"bind address\") define(\"db_host\", default=\"127.0.0.1\", help=\"database", "user=options.db_user, password=options.db_password, dbname=options.db_database) as db: await maybe_create_tables(db) app = Application(db)", "from app.application import Application define('port', default=8080, help=\"listening port\") define('bind_address', default=\"\",", "define, options from tornado.web import Application from app.application import Application", "try: with (await db.cursor()) as cur: await cur.execute(\"SELECT COUNT(*) FROM", "app.application import Application define('port', default=8080, help=\"listening port\") define('bind_address', default=\"\", help=\"bind", "print(\"Database error!\") async def main(): options.parse_command_line() async with aiopg.create_pool( host=options.db_host,", "define(\"db_host\", default=\"127.0.0.1\", help=\"database host\") define(\"db_port\", default=5432, help=\"database port\") define(\"db_database\", default=\"tornado\",", "coding: utf-8 -*- # -*- mode: python -*- import aiopg", "cur: await cur.execute(\"SELECT COUNT(*) FROM schema LIMIT 1\") await cur.fetchone()", "import define, options from tornado.web import Application from app.application import", "main(): options.parse_command_line() async with aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database)", "help=\"database user\") define(\"db_password\", default=\"<PASSWORD>\", help=\"database password\") async def maybe_create_tables(db): try:", "address\") define(\"db_host\", default=\"127.0.0.1\", help=\"database host\") define(\"db_port\", default=5432, help=\"database port\") define(\"db_database\",", "on http://%s:%i\" % (options.bind_address, options.port)) shutdown_event = tornado.locks.Event() await shutdown_event.wait()", "help=\"database name\") define(\"db_user\", default=\"tornado\", help=\"database user\") define(\"db_password\", default=\"<PASSWORD>\", help=\"database password\")", "xheaders=True) print(\"Listening on http://%s:%i\" % (options.bind_address, options.port)) shutdown_event = tornado.locks.Event()", "import tornado.locks from tornado.options import define, options from tornado.web import", "from tornado.options import define, options from tornado.web import Application from", "options.bind_address, xheaders=True) print(\"Listening on http://%s:%i\" % (options.bind_address, options.port)) shutdown_event =", "LIMIT 1\") await cur.fetchone() except psycopg2.ProgrammingError: print(\"Database error!\") async def", "tornado.locks from tornado.options import define, options from tornado.web import Application", "app.listen(options.port, options.bind_address, xheaders=True) print(\"Listening on http://%s:%i\" % (options.bind_address, options.port)) shutdown_event", "aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database) as db: await maybe_create_tables(db)", "port\") define('bind_address', default=\"\", help=\"bind address\") define(\"db_host\", default=\"127.0.0.1\", help=\"database host\") define(\"db_port\",", "= Application(db) app.listen(options.port, options.bind_address, xheaders=True) print(\"Listening on http://%s:%i\" % (options.bind_address,", "-*- import aiopg import psycopg2 import tornado.locks from tornado.options import", "python -*- import aiopg import psycopg2 import tornado.locks from tornado.options", "options.parse_command_line() async with aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database) as", "help=\"database port\") define(\"db_database\", default=\"tornado\", help=\"database name\") define(\"db_user\", default=\"tornado\", help=\"database user\")", "define(\"db_user\", default=\"tornado\", help=\"database user\") define(\"db_password\", default=\"<PASSWORD>\", help=\"database password\") async def", "-*- coding: utf-8 -*- # -*- mode: python -*- import", "COUNT(*) FROM schema LIMIT 1\") await cur.fetchone() except psycopg2.ProgrammingError: print(\"Database", "define(\"db_password\", default=\"<PASSWORD>\", help=\"database password\") async def maybe_create_tables(db): try: with (await", "default=\"tornado\", help=\"database user\") define(\"db_password\", default=\"<PASSWORD>\", help=\"database password\") async def maybe_create_tables(db):", "as db: await maybe_create_tables(db) app = Application(db) app.listen(options.port, options.bind_address, xheaders=True)", "app = Application(db) app.listen(options.port, options.bind_address, xheaders=True) print(\"Listening on http://%s:%i\" %", "psycopg2 import tornado.locks from tornado.options import define, options from tornado.web", "aiopg import psycopg2 import tornado.locks from tornado.options import define, options", "def main(): options.parse_command_line() async with aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password,", "name\") define(\"db_user\", default=\"tornado\", help=\"database user\") define(\"db_password\", default=\"<PASSWORD>\", help=\"database password\") async", "except psycopg2.ProgrammingError: print(\"Database error!\") async def main(): options.parse_command_line() async with", "host\") define(\"db_port\", default=5432, help=\"database port\") define(\"db_database\", default=\"tornado\", help=\"database name\") define(\"db_user\",", "def maybe_create_tables(db): try: with (await db.cursor()) as cur: await cur.execute(\"SELECT", "maybe_create_tables(db): try: with (await db.cursor()) as cur: await cur.execute(\"SELECT COUNT(*)", "-*- mode: python -*- import aiopg import psycopg2 import tornado.locks", "psycopg2.ProgrammingError: print(\"Database error!\") async def main(): options.parse_command_line() async with aiopg.create_pool(", "import psycopg2 import tornado.locks from tornado.options import define, options from", "help=\"bind address\") define(\"db_host\", default=\"127.0.0.1\", help=\"database host\") define(\"db_port\", default=5432, help=\"database port\")", "default=\"<PASSWORD>\", help=\"database password\") async def maybe_create_tables(db): try: with (await db.cursor())", "default=\"127.0.0.1\", help=\"database host\") define(\"db_port\", default=5432, help=\"database port\") define(\"db_database\", default=\"tornado\", help=\"database", "default=\"tornado\", help=\"database name\") define(\"db_user\", default=\"tornado\", help=\"database user\") define(\"db_password\", default=\"<PASSWORD>\", help=\"database", "host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database) as db: await maybe_create_tables(db) app", "cur.fetchone() except psycopg2.ProgrammingError: print(\"Database error!\") async def main(): options.parse_command_line() async", "-*- # -*- mode: python -*- import aiopg import psycopg2", "options from tornado.web import Application from app.application import Application define('port',", "async def main(): options.parse_command_line() async with aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user,", "port\") define(\"db_database\", default=\"tornado\", help=\"database name\") define(\"db_user\", default=\"tornado\", help=\"database user\") define(\"db_password\",", "# -*- mode: python -*- import aiopg import psycopg2 import", "password=options.db_password, dbname=options.db_database) as db: await maybe_create_tables(db) app = Application(db) app.listen(options.port,", "cur.execute(\"SELECT COUNT(*) FROM schema LIMIT 1\") await cur.fetchone() except psycopg2.ProgrammingError:", "import aiopg import psycopg2 import tornado.locks from tornado.options import define,", "help=\"database host\") define(\"db_port\", default=5432, help=\"database port\") define(\"db_database\", default=\"tornado\", help=\"database name\")" ]
[ "(Any): Additional keyword arguments \"\"\" if name is not None:", "getattr(module, stage_cls_name) except AttributeError: continue if stage_cls is None: if", "class stage_cls = None for module in modules: try: stage_cls", "name (Optional[str]): Name of stage desc (Optional[str]): Description of stage", "Name of stage desc (Optional[str]): Description of stage kwargs (Any):", "None: \"\"\" Validates and stores static configuration. Arguments: name (Optional[str]):", "else: self.name = self.__class__.__name__ if desc is not None: self.desc", "self.name = self.__class__.__name__ if desc is not None: self.desc =", "def initialize_stage(stage_name, stage_conf, modules): # Get stage's class name stage_cls_name", "@abstractmethod def inlets(self) -> List[str]: raise NotImplementedError() @property @abstractmethod def", "validate_input_path(stage_args.pop(\"infile\")) spec = spec_from_file_location(stage_cls_name, module_infile) module = module_from_spec(spec) spec.loader.exec_module(module) stage_cls", "= next(iter(stage_conf)) # get first key # Get stage's configuration", "desc (Optional[str]): Description of stage kwargs (Any): Additional keyword arguments", "None, **kwargs: Any ) -> None: \"\"\" Validates and stores", "name stage_cls_name = next(iter(stage_conf)) # get first key # Get", "import ABC, abstractmethod from importlib.util import module_from_spec, spec_from_file_location from typing", "# All rights reserved. # # This software may be", "name else: self.name = self.__class__.__name__ if desc is not None:", "validate_input_path def initialize_stage(stage_name, stage_conf, modules): # Get stage's class name", "from abc import ABC, abstractmethod from importlib.util import module_from_spec, spec_from_file_location", "key # Get stage's configuration stage_args = stage_conf.get(stage_cls_name) if stage_args", "= {} # Get stage's class stage_cls = None for", "stage_cls(name=stage_name, **stage_args) class Stage(ABC): \"\"\"Base class for stages.\"\"\" trim_suffixes =", "raise NotImplementedError() @property @abstractmethod def outlets(self) -> List[str]: raise NotImplementedError()", "abc import ABC, abstractmethod from importlib.util import module_from_spec, spec_from_file_location from", "inlets(self) -> List[str]: raise NotImplementedError() @property @abstractmethod def outlets(self) ->", "raise KeyError(f\"Class '{stage_cls_name}' not found\") return stage_cls(name=stage_name, **stage_args) class Stage(ABC):", "= self.__class__.__name__ if desc is not None: self.desc = desc", "static configuration. Arguments: name (Optional[str]): Name of stage desc (Optional[str]):", "if name is not None: self.name = name else: self.name", "def inlets(self) -> List[str]: raise NotImplementedError() @property @abstractmethod def outlets(self)", "first key # Get stage's configuration stage_args = stage_conf.get(stage_cls_name) if", "def __init__( self, name: Optional[str] = None, desc: Optional[str] =", "python # pipescaler/core/stage.py # # Copyright (C) 2020-2021 <NAME> #", "modified and distributed under the terms of the # BSD", "spec = spec_from_file_location(stage_cls_name, module_infile) module = module_from_spec(spec) spec.loader.exec_module(module) stage_cls =", "2020-2021 <NAME> # All rights reserved. # # This software", "\"png\" def __init__( self, name: Optional[str] = None, desc: Optional[str]", "@property @abstractmethod def inlets(self) -> List[str]: raise NotImplementedError() @property @abstractmethod", "name: Optional[str] = None, desc: Optional[str] = None, **kwargs: Any", "= None, desc: Optional[str] = None, **kwargs: Any ) ->", "of the # BSD license. from __future__ import annotations from", "keyword arguments \"\"\" if name is not None: self.name =", "__init__( self, name: Optional[str] = None, desc: Optional[str] = None,", "in modules: try: stage_cls = getattr(module, stage_cls_name) except AttributeError: continue", "stage_cls_name = next(iter(stage_conf)) # get first key # Get stage's", "import validate_input_path def initialize_stage(stage_name, stage_conf, modules): # Get stage's class", "for stages.\"\"\" trim_suffixes = None extension = \"png\" def __init__(", "continue if stage_cls is None: if \"infile\" in stage_args: module_infile", "distributed under the terms of the # BSD license. from", "stage's configuration stage_args = stage_conf.get(stage_cls_name) if stage_args is None: stage_args", "self.name def __repr__(self) -> str: return self.desc def __str__(self) ->", "else: self.desc = self.name def __repr__(self) -> str: return self.desc", "may be modified and distributed under the terms of the", "reserved. # # This software may be modified and distributed", "self.__class__.__name__ if desc is not None: self.desc = desc else:", "module_infile) module = module_from_spec(spec) spec.loader.exec_module(module) stage_cls = getattr(module, stage_cls_name) else:", "abstractmethod from importlib.util import module_from_spec, spec_from_file_location from typing import Any,", "trim_suffixes = None extension = \"png\" def __init__( self, name:", "the # BSD license. from __future__ import annotations from abc", "stage kwargs (Any): Additional keyword arguments \"\"\" if name is", "stage_cls = None for module in modules: try: stage_cls =", "\"\"\" if name is not None: self.name = name else:", "All rights reserved. # # This software may be modified", "stage_conf, modules): # Get stage's class name stage_cls_name = next(iter(stage_conf))", "initialize_stage(stage_name, stage_conf, modules): # Get stage's class name stage_cls_name =", "# This software may be modified and distributed under the", "-> str: return self.name @property @abstractmethod def inlets(self) -> List[str]:", "= validate_input_path(stage_args.pop(\"infile\")) spec = spec_from_file_location(stage_cls_name, module_infile) module = module_from_spec(spec) spec.loader.exec_module(module)", "from __future__ import annotations from abc import ABC, abstractmethod from", "annotations from abc import ABC, abstractmethod from importlib.util import module_from_spec,", "not found\") return stage_cls(name=stage_name, **stage_args) class Stage(ABC): \"\"\"Base class for", "__future__ import annotations from abc import ABC, abstractmethod from importlib.util", "in stage_args: module_infile = validate_input_path(stage_args.pop(\"infile\")) spec = spec_from_file_location(stage_cls_name, module_infile) module", "from pipescaler.common import validate_input_path def initialize_stage(stage_name, stage_conf, modules): # Get", "= getattr(module, stage_cls_name) else: raise KeyError(f\"Class '{stage_cls_name}' not found\") return", "is None: if \"infile\" in stage_args: module_infile = validate_input_path(stage_args.pop(\"infile\")) spec", "is None: stage_args = {} # Get stage's class stage_cls", "pipescaler.common import validate_input_path def initialize_stage(stage_name, stage_conf, modules): # Get stage's", "stores static configuration. Arguments: name (Optional[str]): Name of stage desc", "stage_cls_name) else: raise KeyError(f\"Class '{stage_cls_name}' not found\") return stage_cls(name=stage_name, **stage_args)", "if \"infile\" in stage_args: module_infile = validate_input_path(stage_args.pop(\"infile\")) spec = spec_from_file_location(stage_cls_name,", "\"infile\" in stage_args: module_infile = validate_input_path(stage_args.pop(\"infile\")) spec = spec_from_file_location(stage_cls_name, module_infile)", "configuration. Arguments: name (Optional[str]): Name of stage desc (Optional[str]): Description", "str: return self.desc def __str__(self) -> str: return self.name @property", "class name stage_cls_name = next(iter(stage_conf)) # get first key #", ") -> None: \"\"\" Validates and stores static configuration. Arguments:", "-> None: \"\"\" Validates and stores static configuration. Arguments: name", "be modified and distributed under the terms of the #", "# Get stage's class name stage_cls_name = next(iter(stage_conf)) # get", "Get stage's class name stage_cls_name = next(iter(stage_conf)) # get first", "stage's class name stage_cls_name = next(iter(stage_conf)) # get first key", "extension = \"png\" def __init__( self, name: Optional[str] = None,", "module_from_spec, spec_from_file_location from typing import Any, List, Optional from pipescaler.common", "importlib.util import module_from_spec, spec_from_file_location from typing import Any, List, Optional", "stage_args = stage_conf.get(stage_cls_name) if stage_args is None: stage_args = {}", "Optional from pipescaler.common import validate_input_path def initialize_stage(stage_name, stage_conf, modules): #", "List, Optional from pipescaler.common import validate_input_path def initialize_stage(stage_name, stage_conf, modules):", "def __str__(self) -> str: return self.name @property @abstractmethod def inlets(self)", "None: if \"infile\" in stage_args: module_infile = validate_input_path(stage_args.pop(\"infile\")) spec =", "= None for module in modules: try: stage_cls = getattr(module,", "desc: Optional[str] = None, **kwargs: Any ) -> None: \"\"\"", "of stage kwargs (Any): Additional keyword arguments \"\"\" if name", "Optional[str] = None, **kwargs: Any ) -> None: \"\"\" Validates", "str: return self.name @property @abstractmethod def inlets(self) -> List[str]: raise", "stage_cls = getattr(module, stage_cls_name) except AttributeError: continue if stage_cls is", "return self.name @property @abstractmethod def inlets(self) -> List[str]: raise NotImplementedError()", "-> List[str]: raise NotImplementedError() @property @abstractmethod def outlets(self) -> List[str]:", "self.desc = desc else: self.desc = self.name def __repr__(self) ->", "(Optional[str]): Description of stage kwargs (Any): Additional keyword arguments \"\"\"", "return stage_cls(name=stage_name, **stage_args) class Stage(ABC): \"\"\"Base class for stages.\"\"\" trim_suffixes", "**stage_args) class Stage(ABC): \"\"\"Base class for stages.\"\"\" trim_suffixes = None", "= stage_conf.get(stage_cls_name) if stage_args is None: stage_args = {} #", "__repr__(self) -> str: return self.desc def __str__(self) -> str: return", "Arguments: name (Optional[str]): Name of stage desc (Optional[str]): Description of", "the terms of the # BSD license. from __future__ import", "stages.\"\"\" trim_suffixes = None extension = \"png\" def __init__( self,", "is not None: self.name = name else: self.name = self.__class__.__name__", "software may be modified and distributed under the terms of", "= module_from_spec(spec) spec.loader.exec_module(module) stage_cls = getattr(module, stage_cls_name) else: raise KeyError(f\"Class", "ABC, abstractmethod from importlib.util import module_from_spec, spec_from_file_location from typing import", "= spec_from_file_location(stage_cls_name, module_infile) module = module_from_spec(spec) spec.loader.exec_module(module) stage_cls = getattr(module,", "if stage_args is None: stage_args = {} # Get stage's", "'{stage_cls_name}' not found\") return stage_cls(name=stage_name, **stage_args) class Stage(ABC): \"\"\"Base class", "get first key # Get stage's configuration stage_args = stage_conf.get(stage_cls_name)", "This software may be modified and distributed under the terms", "-> str: return self.desc def __str__(self) -> str: return self.name", "self.name @property @abstractmethod def inlets(self) -> List[str]: raise NotImplementedError() @property", "try: stage_cls = getattr(module, stage_cls_name) except AttributeError: continue if stage_cls", "None: stage_args = {} # Get stage's class stage_cls =", "name is not None: self.name = name else: self.name =", "Additional keyword arguments \"\"\" if name is not None: self.name", "desc is not None: self.desc = desc else: self.desc =", "for module in modules: try: stage_cls = getattr(module, stage_cls_name) except", "# # Copyright (C) 2020-2021 <NAME> # All rights reserved.", "if stage_cls is None: if \"infile\" in stage_args: module_infile =", "pipescaler/core/stage.py # # Copyright (C) 2020-2021 <NAME> # All rights", "next(iter(stage_conf)) # get first key # Get stage's configuration stage_args", "except AttributeError: continue if stage_cls is None: if \"infile\" in", "None: self.name = name else: self.name = self.__class__.__name__ if desc", "self.name = name else: self.name = self.__class__.__name__ if desc is", "Description of stage kwargs (Any): Additional keyword arguments \"\"\" if", "module in modules: try: stage_cls = getattr(module, stage_cls_name) except AttributeError:", "stage_conf.get(stage_cls_name) if stage_args is None: stage_args = {} # Get", "is not None: self.desc = desc else: self.desc = self.name", "# # This software may be modified and distributed under", "stage_cls_name) except AttributeError: continue if stage_cls is None: if \"infile\"", "spec_from_file_location(stage_cls_name, module_infile) module = module_from_spec(spec) spec.loader.exec_module(module) stage_cls = getattr(module, stage_cls_name)", "from importlib.util import module_from_spec, spec_from_file_location from typing import Any, List,", "= \"png\" def __init__( self, name: Optional[str] = None, desc:", "and stores static configuration. Arguments: name (Optional[str]): Name of stage", "of stage desc (Optional[str]): Description of stage kwargs (Any): Additional", "None: self.desc = desc else: self.desc = self.name def __repr__(self)", "self.desc = self.name def __repr__(self) -> str: return self.desc def", "class for stages.\"\"\" trim_suffixes = None extension = \"png\" def", "#!/usr/bin/env python # pipescaler/core/stage.py # # Copyright (C) 2020-2021 <NAME>", "spec.loader.exec_module(module) stage_cls = getattr(module, stage_cls_name) else: raise KeyError(f\"Class '{stage_cls_name}' not", "desc else: self.desc = self.name def __repr__(self) -> str: return", "= name else: self.name = self.__class__.__name__ if desc is not", "import module_from_spec, spec_from_file_location from typing import Any, List, Optional from", "stage desc (Optional[str]): Description of stage kwargs (Any): Additional keyword", "not None: self.name = name else: self.name = self.__class__.__name__ if", "stage_args = {} # Get stage's class stage_cls = None", "module_infile = validate_input_path(stage_args.pop(\"infile\")) spec = spec_from_file_location(stage_cls_name, module_infile) module = module_from_spec(spec)", "Stage(ABC): \"\"\"Base class for stages.\"\"\" trim_suffixes = None extension =", "stage_args is None: stage_args = {} # Get stage's class", "Get stage's configuration stage_args = stage_conf.get(stage_cls_name) if stage_args is None:", "(Optional[str]): Name of stage desc (Optional[str]): Description of stage kwargs", "arguments \"\"\" if name is not None: self.name = name", "import annotations from abc import ABC, abstractmethod from importlib.util import", "stage_args: module_infile = validate_input_path(stage_args.pop(\"infile\")) spec = spec_from_file_location(stage_cls_name, module_infile) module =", "None extension = \"png\" def __init__( self, name: Optional[str] =", "from typing import Any, List, Optional from pipescaler.common import validate_input_path", "not None: self.desc = desc else: self.desc = self.name def", "return self.desc def __str__(self) -> str: return self.name @property @abstractmethod", "import Any, List, Optional from pipescaler.common import validate_input_path def initialize_stage(stage_name,", "None for module in modules: try: stage_cls = getattr(module, stage_cls_name)", "Optional[str] = None, desc: Optional[str] = None, **kwargs: Any )", "class Stage(ABC): \"\"\"Base class for stages.\"\"\" trim_suffixes = None extension", "module = module_from_spec(spec) spec.loader.exec_module(module) stage_cls = getattr(module, stage_cls_name) else: raise", "spec_from_file_location from typing import Any, List, Optional from pipescaler.common import", "Get stage's class stage_cls = None for module in modules:", "= getattr(module, stage_cls_name) except AttributeError: continue if stage_cls is None:", "def __repr__(self) -> str: return self.desc def __str__(self) -> str:", "List[str]: raise NotImplementedError() @property @abstractmethod def outlets(self) -> List[str]: raise", "AttributeError: continue if stage_cls is None: if \"infile\" in stage_args:", "__str__(self) -> str: return self.name @property @abstractmethod def inlets(self) ->", "and distributed under the terms of the # BSD license.", "BSD license. from __future__ import annotations from abc import ABC,", "stage_cls is None: if \"infile\" in stage_args: module_infile = validate_input_path(stage_args.pop(\"infile\"))", "modules): # Get stage's class name stage_cls_name = next(iter(stage_conf)) #", "Copyright (C) 2020-2021 <NAME> # All rights reserved. # #", "Any, List, Optional from pipescaler.common import validate_input_path def initialize_stage(stage_name, stage_conf,", "stage's class stage_cls = None for module in modules: try:", "terms of the # BSD license. from __future__ import annotations", "\"\"\"Base class for stages.\"\"\" trim_suffixes = None extension = \"png\"", "typing import Any, List, Optional from pipescaler.common import validate_input_path def", "Validates and stores static configuration. Arguments: name (Optional[str]): Name of", "self.desc def __str__(self) -> str: return self.name @property @abstractmethod def", "# Get stage's class stage_cls = None for module in", "Any ) -> None: \"\"\" Validates and stores static configuration.", "modules: try: stage_cls = getattr(module, stage_cls_name) except AttributeError: continue if", "# get first key # Get stage's configuration stage_args =", "<filename>pipescaler/core/stage.py #!/usr/bin/env python # pipescaler/core/stage.py # # Copyright (C) 2020-2021", "found\") return stage_cls(name=stage_name, **stage_args) class Stage(ABC): \"\"\"Base class for stages.\"\"\"", "rights reserved. # # This software may be modified and", "self, name: Optional[str] = None, desc: Optional[str] = None, **kwargs:", "# pipescaler/core/stage.py # # Copyright (C) 2020-2021 <NAME> # All", "under the terms of the # BSD license. from __future__", "= self.name def __repr__(self) -> str: return self.desc def __str__(self)", "getattr(module, stage_cls_name) else: raise KeyError(f\"Class '{stage_cls_name}' not found\") return stage_cls(name=stage_name,", "license. from __future__ import annotations from abc import ABC, abstractmethod", "# BSD license. from __future__ import annotations from abc import", "# Get stage's configuration stage_args = stage_conf.get(stage_cls_name) if stage_args is", "{} # Get stage's class stage_cls = None for module", "KeyError(f\"Class '{stage_cls_name}' not found\") return stage_cls(name=stage_name, **stage_args) class Stage(ABC): \"\"\"Base", "= None, **kwargs: Any ) -> None: \"\"\" Validates and", "kwargs (Any): Additional keyword arguments \"\"\" if name is not", "\"\"\" Validates and stores static configuration. Arguments: name (Optional[str]): Name", "else: raise KeyError(f\"Class '{stage_cls_name}' not found\") return stage_cls(name=stage_name, **stage_args) class", "module_from_spec(spec) spec.loader.exec_module(module) stage_cls = getattr(module, stage_cls_name) else: raise KeyError(f\"Class '{stage_cls_name}'", "configuration stage_args = stage_conf.get(stage_cls_name) if stage_args is None: stage_args =", "None, desc: Optional[str] = None, **kwargs: Any ) -> None:", "stage_cls = getattr(module, stage_cls_name) else: raise KeyError(f\"Class '{stage_cls_name}' not found\")", "if desc is not None: self.desc = desc else: self.desc", "= None extension = \"png\" def __init__( self, name: Optional[str]", "(C) 2020-2021 <NAME> # All rights reserved. # # This", "**kwargs: Any ) -> None: \"\"\" Validates and stores static", "= desc else: self.desc = self.name def __repr__(self) -> str:", "<NAME> # All rights reserved. # # This software may", "# Copyright (C) 2020-2021 <NAME> # All rights reserved. #" ]
[ "x, y, bound): \"\"\" :type x: int :type y: int", "range(exponent): z = x ** i + y ** j", "1 # Brute force all of the exponent trials hashset", "in range(exponent): z = x ** i + y **", "\"\"\" :type x: int :type y: int :type bound: int", "range(exponent): for j in range(exponent): z = x ** i", "\"\"\" # Find max exponent base = max(x, y) if", "int :rtype: List[int] \"\"\" # Find max exponent base =", "set() for i in range(exponent): for j in range(exponent): z", "in range(exponent): for j in range(exponent): z = x **", "x: int :type y: int :type bound: int :rtype: List[int]", "while base ** exponent <= bound: exponent += 1 #", ":type x: int :type y: int :type bound: int :rtype:", "1: while base ** exponent <= bound: exponent += 1", "= x ** i + y ** j if z", "exponent += 1 # Brute force all of the exponent", "exponent = 1 if base != 1: while base **", "= max(x, y) if x == 1 or y ==", "int :type bound: int :rtype: List[int] \"\"\" # Find max", "exponent <= bound: exponent += 1 # Brute force all", "1 or y == 1 else min(x, y) exponent =", "bound): \"\"\" :type x: int :type y: int :type bound:", "max exponent base = max(x, y) if x == 1", "hashset = set() for i in range(exponent): for j in", "+= 1 # Brute force all of the exponent trials", "List[int] \"\"\" # Find max exponent base = max(x, y)", "= 1 if base != 1: while base ** exponent", "bound: exponent += 1 # Brute force all of the", "of the exponent trials hashset = set() for i in", ":type bound: int :rtype: List[int] \"\"\" # Find max exponent", "i in range(exponent): for j in range(exponent): z = x", "x ** i + y ** j if z <=", "if base != 1: while base ** exponent <= bound:", "y ** j if z <= bound: hashset.add(z) return list(hashset)", "the exponent trials hashset = set() for i in range(exponent):", "force all of the exponent trials hashset = set() for", "** exponent <= bound: exponent += 1 # Brute force", "exponent trials hashset = set() for i in range(exponent): for", "bound: int :rtype: List[int] \"\"\" # Find max exponent base", "if x == 1 or y == 1 else min(x,", "== 1 else min(x, y) exponent = 1 if base", "powerfulIntegers(self, x, y, bound): \"\"\" :type x: int :type y:", "base ** exponent <= bound: exponent += 1 # Brute", "y, bound): \"\"\" :type x: int :type y: int :type", "<gh_stars>0 class Solution(object): def powerfulIntegers(self, x, y, bound): \"\"\" :type", "Solution(object): def powerfulIntegers(self, x, y, bound): \"\"\" :type x: int", "trials hashset = set() for i in range(exponent): for j", "for i in range(exponent): for j in range(exponent): z =", ":type y: int :type bound: int :rtype: List[int] \"\"\" #", "base != 1: while base ** exponent <= bound: exponent", "x == 1 or y == 1 else min(x, y)", "y: int :type bound: int :rtype: List[int] \"\"\" # Find", "base = max(x, y) if x == 1 or y", "for j in range(exponent): z = x ** i +", "# Find max exponent base = max(x, y) if x", "max(x, y) if x == 1 or y == 1", "** i + y ** j if z <= bound:", "min(x, y) exponent = 1 if base != 1: while", "Find max exponent base = max(x, y) if x ==", "y == 1 else min(x, y) exponent = 1 if", "int :type y: int :type bound: int :rtype: List[int] \"\"\"", "z = x ** i + y ** j if", "def powerfulIntegers(self, x, y, bound): \"\"\" :type x: int :type", "== 1 or y == 1 else min(x, y) exponent", "or y == 1 else min(x, y) exponent = 1", "1 if base != 1: while base ** exponent <=", "y) exponent = 1 if base != 1: while base", "all of the exponent trials hashset = set() for i", "i + y ** j if z <= bound: hashset.add(z)", "<= bound: exponent += 1 # Brute force all of", "= set() for i in range(exponent): for j in range(exponent):", "class Solution(object): def powerfulIntegers(self, x, y, bound): \"\"\" :type x:", "1 else min(x, y) exponent = 1 if base !=", "# Brute force all of the exponent trials hashset =", "j in range(exponent): z = x ** i + y", "else min(x, y) exponent = 1 if base != 1:", ":rtype: List[int] \"\"\" # Find max exponent base = max(x,", "exponent base = max(x, y) if x == 1 or", "y) if x == 1 or y == 1 else", "+ y ** j if z <= bound: hashset.add(z) return", "!= 1: while base ** exponent <= bound: exponent +=", "Brute force all of the exponent trials hashset = set()" ]
[ "include from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame urlpatterns =", "django.conf.urls import url, include from project.api.rankings.api import AddRanking, AddScore, GetScoresUser,", "from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame urlpatterns = [", "AddRanking, AddScore, GetScoresUser, GetScoresGame urlpatterns = [ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$',", "url, include from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame urlpatterns", "GetScoresGame urlpatterns = [ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$', AddScore.as_view()), url(r'get_scores_game$', GetScoresGame.as_view()),", "<filename>src/project/api/rankings/urls.py from django.conf.urls import url, include from project.api.rankings.api import AddRanking,", "urlpatterns = [ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$', AddScore.as_view()), url(r'get_scores_game$', GetScoresGame.as_view()), url(r'get_scores_user$',", "= [ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$', AddScore.as_view()), url(r'get_scores_game$', GetScoresGame.as_view()), url(r'get_scores_user$', GetScoresUser.as_view())", "from django.conf.urls import url, include from project.api.rankings.api import AddRanking, AddScore,", "[ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$', AddScore.as_view()), url(r'get_scores_game$', GetScoresGame.as_view()), url(r'get_scores_user$', GetScoresUser.as_view()) ]", "project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame urlpatterns = [ url(r'add_ranking$',", "AddScore, GetScoresUser, GetScoresGame urlpatterns = [ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$', AddScore.as_view()),", "import url, include from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame", "import AddRanking, AddScore, GetScoresUser, GetScoresGame urlpatterns = [ url(r'add_ranking$', AddRanking.as_view()),", "GetScoresUser, GetScoresGame urlpatterns = [ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$', AddScore.as_view()), url(r'get_scores_game$'," ]
[ "/ 2, cap_style=2, join_style=2) cap_cut = draw.translate(cap_cut, -(N * p.cap_width", "* cap_width: '10um' -- The width of the finger capacitor", "chip='main', layer='1') \"\"\"Default connector options\"\"\" def make(self): \"\"\"Build the component.\"\"\"", "pos_x='0um', pos_y='0um', orientation='0', chip='main', layer='1') \"\"\"Default connector options\"\"\" def make(self):", "-- Distance of the north point of the capacitor from", "| | | |-----|-----| | | | | + Options:", "in the root directory # of this source tree or", "coupling/fingers * cap_gap_ground: '6um' -- Width of the dielectric between", "transmission line. 0 degrees is -y, following a counter-clockwise rotation", "width=p.south_width + 2 * p.south_gap, layer=p.layer, subtract=True) self.add_qgeometry('poly', {'cap_body': cap_body},", "the 'north' portion of the CPW transmission line * north_gap:", "| | | --|-----|-- | | | | | |-----|-----|", "draw.translate(cap_cut, -(N * p.cap_width + (N - 1) * p.cap_gap)", "* rotation: '0' -- The direction of the transmission line.", "works of this code must retain this # copyright notice,", "* finger_count: '5' -- Number of fingers in the capacitor", "* (p.cap_width) + (2 * i - 1) * (p.cap_gap", "at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of", "must retain this # copyright notice, and modified files need", "together via a finger capacitor. Such a structure can be", "cap_etch] = c_items #Add to qgeometry tables self.add_qgeometry('path', {'north_cpw': north_cpw},", "cap_gap='6um', cap_gap_ground='6um', finger_length='20um', finger_count='5', cap_distance='50um', pos_x='0um', pos_y='0um', orientation='0', chip='main', layer='1')", "cap_distance='50um', pos_x='0um', pos_y='0um', orientation='0', chip='main', layer='1') \"\"\"Default connector options\"\"\" def", "0) make_cut_list = [] make_cut_list.append([0, (p.finger_length) / 2]) make_cut_list.append([(p.cap_width) +", "+ (N - 1) * p.cap_gap + 2 * p.cap_gap_ground,", "capacitor is found via. (cap_width * finger_count + * cap_gap", "[north_cpw, south_cpw, cap_body, cap_etch] = c_items #Add to qgeometry tables", "example, for generating CPW resonators. (0,0) represents the center position", "can be used, as an example, for generating CPW resonators.", "(and islands) * cap_gap: '6um' -- The width of dielectric", "Width of the dielectric between the capacitor and ground *", "directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. #", "(+) structure comprised of a north CPW transmission line, and", "-- Number of fingers in the capacitor * cap_distance: '50um'", "component.\"\"\" p = self.p N = int(p.finger_count) #Finger Capacitor cap_box", "= draw.translate(c_items, p.pos_x, p.pos_y) [north_cpw, south_cpw, cap_body, cap_etch] = c_items", "90 is +x) * chip: 'main' -- The chip the", "i + 1) * (p.cap_gap / 2), flip * (p.finger_length)", "capacitor gap to ground) * cap_width: '10um' -- The width", "]) flip = flip * -1 cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap /", "be used, as an example, for generating CPW resonators. (0,0)", "a finger capacitor. Such a structure can be used, as", "Apache License, Version 2.0. You may # obtain a copy", "-*- # This code is part of Qiskit. # #", "-- The dielectric gap of the 'south' portion of the", "#May want it to be it's own value that the", "2 * p.cap_width + p.finger_length) / 2) #CPW north_cpw =", "of the component. Setting finger length to 0 gives a", "metal (and islands) * cap_gap: '6um' -- The width of", "+ (2 * i + 1) * (p.cap_gap / 2),", "\"\"\"Component metadata\"\"\" #Currently setting the primary CPW length based on", "the originals. from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core import", "= c_items #Add to qgeometry tables self.add_qgeometry('path', {'north_cpw': north_cpw}, width=p.north_width,", "Copyright IBM 2017, 2021. # # This code is licensed", "for generating CPW resonators. (0,0) represents the center position of", "'south' portion of the CPW transmission line (also for the", "cap_gap: '6um' -- The width of dielectric for the capacitive", "may # obtain a copy of this license in the", "* p.cap_width + (N - 1) * p.cap_gap, p.cap_gap +", "+ (N - 1) * p.cap_gap, p.cap_gap + 2 *", "(p.finger_length) / 2 ]) flip = flip * -1 cap_cut", "finger length to 0 gives a simple gap capacitor. The", "is +x) * chip: 'main' -- The chip the capacitor", "* p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length)", "(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2) #CPW", "'50um' -- Distance of the north point of the capacitor", "of the north pin * rotation: '0' -- The direction", "The depth of the finger islands of the capacitor *", "* p.cap_width + (N - 1) * p.cap_gap) / 2,", "# obtain a copy of this license in the LICENSE.txt", "| | | | + Options: * north_width: '10um' --", "degrees is -y, following a counter-clockwise rotation (eg. 90 is", "/ 2), flip * (p.finger_length) / 2 ]) flip =", "'0' -- The direction of the transmission line. 0 degrees", "flip * (p.finger_length) / 2 ]) make_cut_list.append([ (i + 1)", "of the CPW transmission line * north_gap: '6um' -- The", "the CPW transmission line (also for the capacitor gap to", "p.cap_width + (N - 1) * p.cap_gap + 2 *", "+ 1) * (p.cap_gap / 2), flip * (p.finger_length) /", "\"\"\" component_metadata = Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component metadata\"\"\" #Currently setting", "line * south_gap: '6um' -- The dielectric gap of the", "to be it's own value that the user can control?", "and Translate c_items = [north_cpw, south_cpw, cap_body, cap_etch] c_items =", "user can control? default_options = Dict(north_width='10um', north_gap='6um', south_width='10um', south_gap='6um', cap_width='10um',", "+ (2 * i - 1) * (p.cap_gap / 2),", "p.orientation, origin=(0, 0)) c_items = draw.translate(c_items, p.pos_x, p.pos_y) [north_cpw, south_cpw,", "'6um' -- Width of the dielectric between the capacitor and", "setting the primary CPW length based on the coupling_length #May", "import QComponent import numpy as np class CapNInterdigital(QComponent): \"\"\"Generates a", "(p.cap_gap / 2), (p.finger_length) / 2]) flip = -1 for", "CPW length based on the coupling_length #May want it to", "p.finger_length) ]]) #Rotate and Translate c_items = [north_cpw, south_cpw, cap_body,", "fingers in the capacitor * cap_distance: '50um' -- Distance of", "derivative works of this code must retain this # copyright", "the CPW transmission line * north_gap: '6um' -- The dielectric", "'0um' -- The x/y position of the north pin *", "the CPW transmission line * south_gap: '6um' -- The dielectric", "counter-clockwise rotation (eg. 90 is +x) * chip: 'main' --", "self.p N = int(p.finger_count) #Finger Capacitor cap_box = draw.rectangle(N *", "-- The width of the 'north' portion of the CPW", "The width of dielectric for the capacitive coupling/fingers * cap_gap_ground:", "+ p.finger_length + 2 * p.cap_gap_ground, 0, -p.cap_distance - (p.cap_gap", "cap_etch] c_items = draw.rotate(c_items, p.orientation, origin=(0, 0)) c_items = draw.translate(c_items,", "-y, following a counter-clockwise rotation (eg. 90 is +x) *", "cap_width: '10um' -- The width of the finger capacitor metal", "= int(p.finger_count) #Finger Capacitor cap_box = draw.rectangle(N * p.cap_width +", "'north' portion of the CPW transmission line * south_width: '10um'", "the north point of the capacitor from the north pin", "islands of the capacitor * finger_count: '5' -- Number of", "control? default_options = Dict(north_width='10um', north_gap='6um', south_width='10um', south_gap='6um', cap_width='10um', cap_gap='6um', cap_gap_ground='6um',", "* p.cap_width + p.finger_length, 0, 0) make_cut_list = [] make_cut_list.append([0,", "|-----|-----| | | | | + Options: * north_width: '10um'", "make_cut_list.append([0, (p.finger_length) / 2]) make_cut_list.append([(p.cap_width) + (p.cap_gap / 2), (p.finger_length)", "flip = flip * -1 cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2,", "range(1, N): make_cut_list.append([ i * (p.cap_width) + (2 * i", "is on. \"\"\" component_metadata = Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component metadata\"\"\"", "capacitor. The width of the gap capacitor is found via.", "p.cap_gap + 2 * p.cap_width + p.finger_length, 0, 0) make_cut_list", "| | | | | |-----|-----| | | | |", "'main' -- The chip the capacitor should be on. *", "cap_body, cap_etch] c_items = draw.rotate(c_items, p.orientation, origin=(0, 0)) c_items =", "(0,0) represents the center position of the component. Setting finger", "p.finger_length) / 2) #CPW north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]])", "depth of the finger islands of the capacitor * finger_count:", "- (p.cap_gap + 2 * p.cap_width + p.finger_length) ]]) #Rotate", "portion of the CPW transmission line * north_gap: '6um' --", "(2 * i - 1) * (p.cap_gap / 2), flip", "{'south_cpw_sub': south_cpw}, width=p.south_width + 2 * p.south_gap, layer=p.layer, subtract=True) self.add_qgeometry('poly',", "+ * cap_gap * (finger_count-1)). Inherits QComponent class. :: (0,0)", "of the capacitor * finger_count: '5' -- Number of fingers", "IBM 2017, 2021. # # This code is licensed under", "south_width: '10um' -- The width of the 'south' portion of", "-- The direction of the transmission line. 0 degrees is", "import numpy as np class CapNInterdigital(QComponent): \"\"\"Generates a two pin", "file in the root directory # of this source tree", "= draw.subtract(cap_box, cap_cut) cap_body = draw.translate( cap_body, 0, -p.cap_distance -", "/ 2, 0) cap_body = draw.subtract(cap_box, cap_cut) cap_body = draw.translate(", "north_cpw}, width=p.north_width, layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub': north_cpw}, width=p.north_width + 2 *", "+ p.finger_length) / 2) #CPW north_cpw = draw.LineString([[0, 0], [0,", "* p.cap_gap_ground, 0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width", "draw.rotate(c_items, p.orientation, origin=(0, 0)) c_items = draw.translate(c_items, p.pos_x, p.pos_y) [north_cpw,", "= draw.rotate(c_items, p.orientation, origin=(0, 0)) c_items = draw.translate(c_items, p.pos_x, p.pos_y)", "'5' -- Number of fingers in the capacitor * cap_distance:", "line. 0 degrees is -y, following a counter-clockwise rotation (eg.", "# # (C) Copyright IBM 2017, 2021. # # This", "a notice indicating # that they have been altered from", "in range(1, N): make_cut_list.append([ i * (p.cap_width) + (2 *", "dielectric for the capacitive coupling/fingers * cap_gap_ground: '6um' -- Width", "CPW resonators. (0,0) represents the center position of the component.", "copyright notice, and modified files need to carry a notice", "north CPW transmission line, and a south transmission line, coupled", "(p.cap_width) + (2 * i + 1) * (p.cap_gap /", "* north_gap: '6um' -- The dielectric gap of the 'north'", "class. :: (0,0) N + ^ | | | |", "* p.cap_width + p.finger_length) / 2) cap_etch = draw.rectangle( N", "# (C) Copyright IBM 2017, 2021. # # This code", "or derivative works of this code must retain this #", "(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2) cap_etch", "-- The dielectric gap of the 'north' portion of the", "cap_gap * (finger_count-1)). Inherits QComponent class. :: (0,0) N +", "(cap_width * finger_count + * cap_gap * (finger_count-1)). Inherits QComponent", "The dielectric gap of the 'north' portion of the CPW", "1) * (p.cap_width) + (2 * i + 1) *", "make_cut_list.append([ (i + 1) * (p.cap_width) + (2 * i", "'6um' -- The dielectric gap of the 'south' portion of", "pins north_pin_list = north_cpw.coords south_pin_list = south_cpw.coords self.add_pin('north_end', points=np.array(north_pin_list[::-1]), width=p.north_width,", "cap_body = draw.translate( cap_body, 0, -p.cap_distance - (p.cap_gap + 2", "between the capacitor and ground * finger_length: '20um' -- The", "line * north_gap: '6um' -- The dielectric gap of the", "is -y, following a counter-clockwise rotation (eg. 90 is +x)", "/ 2) cap_etch = draw.rectangle( N * p.cap_width + (N", "'20um' -- The depth of the finger islands of the", "an example, for generating CPW resonators. (0,0) represents the center", "Any modifications or derivative works of this code must retain", "modifications or derivative works of this code must retain this", "* i - 1) * (p.cap_gap / 2), flip *", "[ 0, -2 * p.cap_distance - (p.cap_gap + 2 *", "= -1 for i in range(1, N): make_cut_list.append([ i *", "files need to carry a notice indicating # that they", "* south_width: '10um' -- The width of the 'south' portion", "p.cap_width + p.finger_length + 2 * p.cap_gap_ground, 0, -p.cap_distance -", "and modified files need to carry a notice indicating #", "numpy as np class CapNInterdigital(QComponent): \"\"\"Generates a two pin (+)", "'south' portion of the CPW transmission line * south_gap: '6um'", "from the north pin * pos_x/_y: '0um' -- The x/y", "1) * (p.cap_gap / 2), flip * (p.finger_length) / 2", "= north_cpw.coords south_pin_list = south_cpw.coords self.add_pin('north_end', points=np.array(north_pin_list[::-1]), width=p.north_width, input_as_norm=True) self.add_pin('south_end',", "as np class CapNInterdigital(QComponent): \"\"\"Generates a two pin (+) structure", "#Add pins north_pin_list = north_cpw.coords south_pin_list = south_cpw.coords self.add_pin('north_end', points=np.array(north_pin_list[::-1]),", "p.finger_length + 2 * p.cap_gap_ground, 0, -p.cap_distance - (p.cap_gap +", "width of the 'north' portion of the CPW transmission line", "+ (N - 1) * p.cap_gap) / 2, 0) cap_body", "north_gap: '6um' -- The dielectric gap of the 'north' portion", "and a south transmission line, coupled together via a finger", "= Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component metadata\"\"\" #Currently setting the primary", "the component.\"\"\" p = self.p N = int(p.finger_count) #Finger Capacitor", "qgeometry tables self.add_qgeometry('path', {'north_cpw': north_cpw}, width=p.north_width, layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub': north_cpw},", "ground) * cap_width: '10um' -- The width of the finger", "], [ 0, -2 * p.cap_distance - (p.cap_gap + 2", "class CapNInterdigital(QComponent): \"\"\"Generates a two pin (+) structure comprised of", "capacitor is on. \"\"\" component_metadata = Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component", "qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core import QComponent import numpy", "default_options = Dict(north_width='10um', north_gap='6um', south_width='10um', south_gap='6um', cap_width='10um', cap_gap='6um', cap_gap_ground='6um', finger_length='20um',", "cap_body, cap_etch] = c_items #Add to qgeometry tables self.add_qgeometry('path', {'north_cpw':", "The width of the gap capacitor is found via. (cap_width", "finger_length='20um', finger_count='5', cap_distance='50um', pos_x='0um', pos_y='0um', orientation='0', chip='main', layer='1') \"\"\"Default connector", "+ p.finger_length) ]]) #Rotate and Translate c_items = [north_cpw, south_cpw,", "south_cpw, cap_body, cap_etch] = c_items #Add to qgeometry tables self.add_qgeometry('path',", "| | | | |-----|-----| | | | | +", "p.finger_length) ], [ 0, -2 * p.cap_distance - (p.cap_gap +", "* p.cap_width + p.finger_length) ], [ 0, -2 * p.cap_distance", "is found via. (cap_width * finger_count + * cap_gap *", "= draw.LineString(make_cut_list).buffer(p.cap_gap / 2, cap_style=2, join_style=2) cap_cut = draw.translate(cap_cut, -(N", "x/y position of the north pin * rotation: '0' --", "a copy of this license in the LICENSE.txt file in", "# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # #", "0], [0, -p.cap_distance]]) south_cpw = draw.LineString( [[ 0, -p.cap_distance -", "metadata\"\"\" #Currently setting the primary CPW length based on the", "the capacitor * finger_count: '5' -- Number of fingers in", "have been altered from the originals. from qiskit_metal import draw,", "The width of the 'south' portion of the CPW transmission", "via. (cap_width * finger_count + * cap_gap * (finger_count-1)). Inherits", "import draw, Dict from qiskit_metal.qlibrary.core import QComponent import numpy as", "p.finger_length, 0, 0) make_cut_list = [] make_cut_list.append([0, (p.finger_length) / 2])", "south_pin_list = south_cpw.coords self.add_pin('north_end', points=np.array(north_pin_list[::-1]), width=p.north_width, input_as_norm=True) self.add_pin('south_end', points=np.array(south_pin_list), width=p.south_width,", "finger capacitor metal (and islands) * cap_gap: '6um' -- The", "cap_etch}, layer=p.layer, subtract=True) #Add pins north_pin_list = north_cpw.coords south_pin_list =", "QComponent import numpy as np class CapNInterdigital(QComponent): \"\"\"Generates a two", "c_items = draw.rotate(c_items, p.orientation, origin=(0, 0)) c_items = draw.translate(c_items, p.pos_x,", "2), flip * (p.finger_length) / 2 ]) make_cut_list.append([ (i +", "flip = -1 for i in range(1, N): make_cut_list.append([ i", "# # This code is licensed under the Apache License,", "it's own value that the user can control? default_options =", "line, coupled together via a finger capacitor. Such a structure", "* finger_length: '20um' -- The depth of the finger islands", "north_gap='6um', south_width='10um', south_gap='6um', cap_width='10um', cap_gap='6um', cap_gap_ground='6um', finger_length='20um', finger_count='5', cap_distance='50um', pos_x='0um',", "dielectric gap of the 'south' portion of the CPW transmission", "draw.translate( cap_body, 0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width", "the 'south' portion of the CPW transmission line * south_gap:", "a simple gap capacitor. The width of the gap capacitor", "Options: * north_width: '10um' -- The width of the 'north'", "+ 1) * (p.cap_width) + (2 * i + 1)", "coupled together via a finger capacitor. Such a structure can", "portion of the CPW transmission line * south_gap: '6um' --", "transmission line (also for the capacitor gap to ground) *", "'10um' -- The width of the finger capacitor metal (and", "+ p.finger_length) ], [ 0, -2 * p.cap_distance - (p.cap_gap", "= [] make_cut_list.append([0, (p.finger_length) / 2]) make_cut_list.append([(p.cap_width) + (p.cap_gap /", "You may # obtain a copy of this license in", "# # Any modifications or derivative works of this code", "\"\"\"Build the component.\"\"\" p = self.p N = int(p.finger_count) #Finger", "of fingers in the capacitor * cap_distance: '50um' -- Distance", "the finger capacitor metal (and islands) * cap_gap: '6um' --", "structure can be used, as an example, for generating CPW", "on. * layer: '1' -- Layer the capacitor is on.", "cap_cut = draw.translate(cap_cut, -(N * p.cap_width + (N - 1)", "* p.cap_gap + 2 * p.cap_gap_ground, p.cap_gap + 2 *", "(p.finger_length) / 2]) make_cut_list.append([(p.cap_width) + (p.cap_gap / 2), (p.finger_length) /", "carry a notice indicating # that they have been altered", "transmission line * south_gap: '6um' -- The dielectric gap of", "the north pin * rotation: '0' -- The direction of", "+ 2 * p.cap_width + p.finger_length) / 2) cap_etch =", "of the dielectric between the capacitor and ground * finger_length:", "* p.north_gap, layer=p.layer, subtract=True) self.add_qgeometry('path', {'south_cpw': south_cpw}, width=p.south_width, layer=p.layer) self.add_qgeometry('path',", "notice indicating # that they have been altered from the", "(p.cap_gap + 2 * p.cap_width + p.finger_length) ], [ 0,", "2 ]) make_cut_list.append([ (i + 1) * (p.cap_width) + (2", "draw.LineString( [[ 0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width", "south_cpw, cap_body, cap_etch] c_items = draw.rotate(c_items, p.orientation, origin=(0, 0)) c_items", "this license in the LICENSE.txt file in the root directory", "been altered from the originals. from qiskit_metal import draw, Dict", "2), (p.finger_length) / 2]) flip = -1 for i in", "the root directory # of this source tree or at", "Layer the capacitor is on. \"\"\" component_metadata = Dict(short_name='cpw', _qgeometry_table_poly='True',", "p.pos_x, p.pos_y) [north_cpw, south_cpw, cap_body, cap_etch] = c_items #Add to", "the Apache License, Version 2.0. You may # obtain a", "{'south_cpw': south_cpw}, width=p.south_width, layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub': south_cpw}, width=p.south_width + 2", "of the finger capacitor metal (and islands) * cap_gap: '6um'", "Inherits QComponent class. :: (0,0) N + ^ | |", "-- The width of the 'south' portion of the CPW", "* p.south_gap, layer=p.layer, subtract=True) self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer) self.add_qgeometry('poly', {'cap_etch':", "0) cap_body = draw.subtract(cap_box, cap_cut) cap_body = draw.translate( cap_body, 0,", "* south_gap: '6um' -- The dielectric gap of the 'south'", "self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer) self.add_qgeometry('poly', {'cap_etch': cap_etch}, layer=p.layer, subtract=True) #Add", "the transmission line. 0 degrees is -y, following a counter-clockwise", "* cap_gap * (finger_count-1)). Inherits QComponent class. :: (0,0) N", "^ | | | | --|-----|-- | | | |", "* pos_x/_y: '0um' -- The x/y position of the north", "Such a structure can be used, as an example, for", "The dielectric gap of the 'south' portion of the CPW", "Number of fingers in the capacitor * cap_distance: '50um' --", "i * (p.cap_width) + (2 * i - 1) *", "cap_body = draw.subtract(cap_box, cap_cut) cap_body = draw.translate( cap_body, 0, -p.cap_distance", "'6um' -- The dielectric gap of the 'north' portion of", "the capacitor gap to ground) * cap_width: '10um' -- The", "retain this # copyright notice, and modified files need to", "2 * p.cap_width + p.finger_length) / 2) cap_etch = draw.rectangle(", "0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length)", "can control? default_options = Dict(north_width='10um', north_gap='6um', south_width='10um', south_gap='6um', cap_width='10um', cap_gap='6um',", "N + ^ | | | | --|-----|-- | |", "p.cap_gap + 2 * p.cap_gap_ground, p.cap_gap + 2 * p.cap_width", "of the finger islands of the capacitor * finger_count: '5'", "+ 2 * p.cap_width + p.finger_length) ], [ 0, -2", "subtract=True) self.add_qgeometry('path', {'south_cpw': south_cpw}, width=p.south_width, layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub': south_cpw}, width=p.south_width", "i in range(1, N): make_cut_list.append([ i * (p.cap_width) + (2", "flip * (p.finger_length) / 2 ]) flip = flip *", "Distance of the north point of the capacitor from the", "and ground * finger_length: '20um' -- The depth of the", "position of the north pin * rotation: '0' -- The", "* (p.finger_length) / 2 ]) make_cut_list.append([ (i + 1) *", "value that the user can control? default_options = Dict(north_width='10um', north_gap='6um',", "p.cap_width + (N - 1) * p.cap_gap) / 2, 0)", "= self.p N = int(p.finger_count) #Finger Capacitor cap_box = draw.rectangle(N", "for i in range(1, N): make_cut_list.append([ i * (p.cap_width) +", "/ 2]) flip = -1 for i in range(1, N):", "self.add_qgeometry('path', {'south_cpw_sub': south_cpw}, width=p.south_width + 2 * p.south_gap, layer=p.layer, subtract=True)", "'north' portion of the CPW transmission line * north_gap: '6um'", "* p.cap_width + p.finger_length) / 2) #CPW north_cpw = draw.LineString([[0,", "-- The chip the capacitor should be on. * layer:", "2 * p.cap_width + p.finger_length, 0, 0) make_cut_list = []", "= draw.rectangle( N * p.cap_width + (N - 1) *", "* p.cap_width + (N - 1) * p.cap_gap + 2", "rotation (eg. 90 is +x) * chip: 'main' -- The", "+ 2 * p.north_gap, layer=p.layer, subtract=True) self.add_qgeometry('path', {'south_cpw': south_cpw}, width=p.south_width,", "2 * p.cap_gap_ground, 0, -p.cap_distance - (p.cap_gap + 2 *", "\"\"\"Generates a two pin (+) structure comprised of a north", "CPW transmission line (also for the capacitor gap to ground)", "CPW transmission line * south_width: '10um' -- The width of", "/ 2), flip * (p.finger_length) / 2 ]) make_cut_list.append([ (i", "+ ^ | | | | --|-----|-- | | |", "-(N * p.cap_width + (N - 1) * p.cap_gap) /", "license in the LICENSE.txt file in the root directory #", "-*- coding: utf-8 -*- # This code is part of", "direction of the transmission line. 0 degrees is -y, following", "(p.cap_width) + (2 * i - 1) * (p.cap_gap /", "north_cpw.coords south_pin_list = south_cpw.coords self.add_pin('north_end', points=np.array(north_pin_list[::-1]), width=p.north_width, input_as_norm=True) self.add_pin('south_end', points=np.array(south_pin_list),", "the gap capacitor is found via. (cap_width * finger_count +", "cap_gap_ground: '6um' -- Width of the dielectric between the capacitor", "c_items = [north_cpw, south_cpw, cap_body, cap_etch] c_items = draw.rotate(c_items, p.orientation,", "= flip * -1 cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2, cap_style=2,", "layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub': north_cpw}, width=p.north_width + 2 * p.north_gap, layer=p.layer,", "= Dict(north_width='10um', north_gap='6um', south_width='10um', south_gap='6um', cap_width='10um', cap_gap='6um', cap_gap_ground='6um', finger_length='20um', finger_count='5',", "north pin * pos_x/_y: '0um' -- The x/y position of", "width of dielectric for the capacitive coupling/fingers * cap_gap_ground: '6um'", "is licensed under the Apache License, Version 2.0. You may", "/ 2 ]) flip = flip * -1 cap_cut =", "from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core import QComponent import", "/ 2), (p.finger_length) / 2]) flip = -1 for i", "the north pin * pos_x/_y: '0um' -- The x/y position", "modified files need to carry a notice indicating # that", "position of the component. Setting finger length to 0 gives", "CPW transmission line * south_gap: '6um' -- The dielectric gap", "Translate c_items = [north_cpw, south_cpw, cap_body, cap_etch] c_items = draw.rotate(c_items,", "need to carry a notice indicating # that they have", "| | | + Options: * north_width: '10um' -- The", "-1 for i in range(1, N): make_cut_list.append([ i * (p.cap_width)", "self.add_qgeometry('path', {'north_cpw_sub': north_cpw}, width=p.north_width + 2 * p.north_gap, layer=p.layer, subtract=True)", "(p.finger_length) / 2 ]) make_cut_list.append([ (i + 1) * (p.cap_width)", "The chip the capacitor should be on. * layer: '1'", "(i + 1) * (p.cap_width) + (2 * i +", "2 * p.cap_width + p.finger_length) ]]) #Rotate and Translate c_items", "gap capacitor is found via. (cap_width * finger_count + *", "p.cap_gap_ground, 0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width +", "2, cap_style=2, join_style=2) cap_cut = draw.translate(cap_cut, -(N * p.cap_width +", "draw.LineString(make_cut_list).buffer(p.cap_gap / 2, cap_style=2, join_style=2) cap_cut = draw.translate(cap_cut, -(N *", "north point of the capacitor from the north pin *", "they have been altered from the originals. from qiskit_metal import", "2]) make_cut_list.append([(p.cap_width) + (p.cap_gap / 2), (p.finger_length) / 2]) flip", "p.pos_y) [north_cpw, south_cpw, cap_body, cap_etch] = c_items #Add to qgeometry", "utf-8 -*- # This code is part of Qiskit. #", "2021. # # This code is licensed under the Apache", "finger capacitor. Such a structure can be used, as an", "--|-----|-- | | | | | |-----|-----| | | |", "p.south_gap, layer=p.layer, subtract=True) self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer) self.add_qgeometry('poly', {'cap_etch': cap_etch},", "length based on the coupling_length #May want it to be", "point of the capacitor from the north pin * pos_x/_y:", "(0,0) N + ^ | | | | --|-----|-- |", "layer: '1' -- Layer the capacitor is on. \"\"\" component_metadata", "_qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component metadata\"\"\" #Currently setting the primary CPW length", "{'cap_etch': cap_etch}, layer=p.layer, subtract=True) #Add pins north_pin_list = north_cpw.coords south_pin_list", "pos_y='0um', orientation='0', chip='main', layer='1') \"\"\"Default connector options\"\"\" def make(self): \"\"\"Build", "draw.rectangle(N * p.cap_width + (N - 1) * p.cap_gap, p.cap_gap", "width of the gap capacitor is found via. (cap_width *", "root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.", "altered from the originals. from qiskit_metal import draw, Dict from", "south transmission line, coupled together via a finger capacitor. Such", "make_cut_list.append([ i * (p.cap_width) + (2 * i - 1)", "(p.cap_gap + 2 * p.cap_width + p.finger_length) ]]) #Rotate and", "'1' -- Layer the capacitor is on. \"\"\" component_metadata =", "transmission line * north_gap: '6um' -- The dielectric gap of", "2017, 2021. # # This code is licensed under the", "south_gap='6um', cap_width='10um', cap_gap='6um', cap_gap_ground='6um', finger_length='20um', finger_count='5', cap_distance='50um', pos_x='0um', pos_y='0um', orientation='0',", "+ Options: * north_width: '10um' -- The width of the", "* cap_gap_ground: '6um' -- Width of the dielectric between the", "following a counter-clockwise rotation (eg. 90 is +x) * chip:", "join_style=2) cap_cut = draw.translate(cap_cut, -(N * p.cap_width + (N -", "flip * -1 cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2, cap_style=2, join_style=2)", "= draw.rectangle(N * p.cap_width + (N - 1) * p.cap_gap,", "p.cap_gap) / 2, 0) cap_body = draw.subtract(cap_box, cap_cut) cap_body =", "layer=p.layer, subtract=True) self.add_qgeometry('path', {'south_cpw': south_cpw}, width=p.south_width, layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub': south_cpw},", "CapNInterdigital(QComponent): \"\"\"Generates a two pin (+) structure comprised of a", "a structure can be used, as an example, for generating", "finger islands of the capacitor * finger_count: '5' -- Number", "primary CPW length based on the coupling_length #May want it", "= [north_cpw, south_cpw, cap_body, cap_etch] c_items = draw.rotate(c_items, p.orientation, origin=(0,", "c_items #Add to qgeometry tables self.add_qgeometry('path', {'north_cpw': north_cpw}, width=p.north_width, layer=p.layer)", "draw.rectangle( N * p.cap_width + (N - 1) * p.cap_gap", "used, as an example, for generating CPW resonators. (0,0) represents", "1) * p.cap_gap, p.cap_gap + 2 * p.cap_width + p.finger_length,", "north pin * rotation: '0' -- The direction of the", "[] make_cut_list.append([0, (p.finger_length) / 2]) make_cut_list.append([(p.cap_width) + (p.cap_gap / 2),", "north_width: '10um' -- The width of the 'north' portion of", "'10um' -- The width of the 'south' portion of the", "the dielectric between the capacitor and ground * finger_length: '20um'", "capacitor * finger_count: '5' -- Number of fingers in the", "* layer: '1' -- Layer the capacitor is on. \"\"\"", "+ 2 * p.south_gap, layer=p.layer, subtract=True) self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer)", "the capacitor from the north pin * pos_x/_y: '0um' --", "layer=p.layer, subtract=True) #Add pins north_pin_list = north_cpw.coords south_pin_list = south_cpw.coords", "layer=p.layer, subtract=True) self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer) self.add_qgeometry('poly', {'cap_etch': cap_etch}, layer=p.layer,", "p.cap_width + p.finger_length) ], [ 0, -2 * p.cap_distance -", "pin * pos_x/_y: '0um' -- The x/y position of the", "* i + 1) * (p.cap_gap / 2), flip *", "of the capacitor from the north pin * pos_x/_y: '0um'", "north_pin_list = north_cpw.coords south_pin_list = south_cpw.coords self.add_pin('north_end', points=np.array(north_pin_list[::-1]), width=p.north_width, input_as_norm=True)", "that the user can control? default_options = Dict(north_width='10um', north_gap='6um', south_width='10um',", "capacitor should be on. * layer: '1' -- Layer the", "N = int(p.finger_count) #Finger Capacitor cap_box = draw.rectangle(N * p.cap_width", "as an example, for generating CPW resonators. (0,0) represents the", "make_cut_list = [] make_cut_list.append([0, (p.finger_length) / 2]) make_cut_list.append([(p.cap_width) + (p.cap_gap", "LICENSE.txt file in the root directory # of this source", "Dict(north_width='10um', north_gap='6um', south_width='10um', south_gap='6um', cap_width='10um', cap_gap='6um', cap_gap_ground='6um', finger_length='20um', finger_count='5', cap_distance='50um',", "indicating # that they have been altered from the originals.", "connector options\"\"\" def make(self): \"\"\"Build the component.\"\"\" p = self.p", "= draw.LineString([[0, 0], [0, -p.cap_distance]]) south_cpw = draw.LineString( [[ 0,", "p.cap_gap_ground, p.cap_gap + 2 * p.cap_width + p.finger_length + 2", "chip the capacitor should be on. * layer: '1' --", "- 1) * p.cap_gap, p.cap_gap + 2 * p.cap_width +", "the capacitor * cap_distance: '50um' -- Distance of the north", "CPW transmission line, and a south transmission line, coupled together", "[north_cpw, south_cpw, cap_body, cap_etch] c_items = draw.rotate(c_items, p.orientation, origin=(0, 0))", "2 * p.north_gap, layer=p.layer, subtract=True) self.add_qgeometry('path', {'south_cpw': south_cpw}, width=p.south_width, layer=p.layer)", "This code is part of Qiskit. # # (C) Copyright", "Qiskit. # # (C) Copyright IBM 2017, 2021. # #", "licensed under the Apache License, Version 2.0. You may #", "component_metadata = Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component metadata\"\"\" #Currently setting the", "0 degrees is -y, following a counter-clockwise rotation (eg. 90", "be it's own value that the user can control? default_options", "cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2, cap_style=2, join_style=2) cap_cut = draw.translate(cap_cut,", "the capacitor and ground * finger_length: '20um' -- The depth", "- 1) * (p.cap_gap / 2), flip * (p.finger_length) /", "the 'south' portion of the CPW transmission line (also for", "-p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length) /", "islands) * cap_gap: '6um' -- The width of dielectric for", "the LICENSE.txt file in the root directory # of this", "\"\"\"Default connector options\"\"\" def make(self): \"\"\"Build the component.\"\"\" p =", "cap_cut) cap_body = draw.translate( cap_body, 0, -p.cap_distance - (p.cap_gap +", ":: (0,0) N + ^ | | | | --|-----|--", "- (p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)", "capacitor and ground * finger_length: '20um' -- The depth of", "p.north_gap, layer=p.layer, subtract=True) self.add_qgeometry('path', {'south_cpw': south_cpw}, width=p.south_width, layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub':", "the capacitive coupling/fingers * cap_gap_ground: '6um' -- Width of the", "width=p.north_width + 2 * p.north_gap, layer=p.layer, subtract=True) self.add_qgeometry('path', {'south_cpw': south_cpw},", "from the originals. from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core", "Version 2.0. You may # obtain a copy of this", "{'cap_body': cap_body}, layer=p.layer) self.add_qgeometry('poly', {'cap_etch': cap_etch}, layer=p.layer, subtract=True) #Add pins", "(N - 1) * p.cap_gap) / 2, 0) cap_body =", "transmission line, and a south transmission line, coupled together via", "+ 2 * p.cap_gap_ground, 0, -p.cap_distance - (p.cap_gap + 2", "(finger_count-1)). Inherits QComponent class. :: (0,0) N + ^ |", "[0, -p.cap_distance]]) south_cpw = draw.LineString( [[ 0, -p.cap_distance - (p.cap_gap", "layer=p.layer) self.add_qgeometry('poly', {'cap_etch': cap_etch}, layer=p.layer, subtract=True) #Add pins north_pin_list =", "portion of the CPW transmission line (also for the capacitor", "dielectric gap of the 'north' portion of the CPW transmission", "represents the center position of the component. Setting finger length", "south_cpw}, width=p.south_width + 2 * p.south_gap, layer=p.layer, subtract=True) self.add_qgeometry('poly', {'cap_body':", "self.add_qgeometry('path', {'north_cpw': north_cpw}, width=p.north_width, layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub': north_cpw}, width=p.north_width +", "of this license in the LICENSE.txt file in the root", "+ p.finger_length, 0, 0) make_cut_list = [] make_cut_list.append([0, (p.finger_length) /", "(C) Copyright IBM 2017, 2021. # # This code is", "2 * p.south_gap, layer=p.layer, subtract=True) self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer) self.add_qgeometry('poly',", "np class CapNInterdigital(QComponent): \"\"\"Generates a two pin (+) structure comprised", "_qgeometry_table_path='True') \"\"\"Component metadata\"\"\" #Currently setting the primary CPW length based", "(2 * i + 1) * (p.cap_gap / 2), flip", "* p.cap_gap) / 2, 0) cap_body = draw.subtract(cap_box, cap_cut) cap_body", "of the 'south' portion of the CPW transmission line (also", "| --|-----|-- | | | | | |-----|-----| | |", "* (p.cap_gap / 2), flip * (p.finger_length) / 2 ])", "p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length) ]])", "cap_distance: '50um' -- Distance of the north point of the", "the user can control? default_options = Dict(north_width='10um', north_gap='6um', south_width='10um', south_gap='6um',", "of the 'north' portion of the CPW transmission line *", "| | --|-----|-- | | | | | |-----|-----| |", "= draw.translate( cap_body, 0, -p.cap_distance - (p.cap_gap + 2 *", "#Rotate and Translate c_items = [north_cpw, south_cpw, cap_body, cap_etch] c_items", "0 gives a simple gap capacitor. The width of the", "the capacitor should be on. * layer: '1' -- Layer", "0, 0) make_cut_list = [] make_cut_list.append([0, (p.finger_length) / 2]) make_cut_list.append([(p.cap_width)", "tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative", "chip: 'main' -- The chip the capacitor should be on.", "cap_box = draw.rectangle(N * p.cap_width + (N - 1) *", "-p.cap_distance]]) south_cpw = draw.LineString( [[ 0, -p.cap_distance - (p.cap_gap +", "the CPW transmission line * south_width: '10um' -- The width", "capacitor * cap_distance: '50um' -- Distance of the north point", "of this code must retain this # copyright notice, and", "simple gap capacitor. The width of the gap capacitor is", "portion of the CPW transmission line * south_width: '10um' --", "-1 cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2, cap_style=2, join_style=2) cap_cut =", "notice, and modified files need to carry a notice indicating", "the coupling_length #May want it to be it's own value", "draw.translate(c_items, p.pos_x, p.pos_y) [north_cpw, south_cpw, cap_body, cap_etch] = c_items #Add", "south_gap: '6um' -- The dielectric gap of the 'south' portion", "License, Version 2.0. You may # obtain a copy of", "i - 1) * (p.cap_gap / 2), flip * (p.finger_length)", "of the 'south' portion of the CPW transmission line *", "p.cap_width + p.finger_length, 0, 0) make_cut_list = [] make_cut_list.append([0, (p.finger_length)", "2 ]) flip = flip * -1 cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap", "gap of the 'north' portion of the CPW transmission line", "obtain a copy of this license in the LICENSE.txt file", "http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this", "rotation: '0' -- The direction of the transmission line. 0", "for the capacitor gap to ground) * cap_width: '10um' --", "/ 2]) make_cut_list.append([(p.cap_width) + (p.cap_gap / 2), (p.finger_length) / 2])", "generating CPW resonators. (0,0) represents the center position of the", "gap of the 'south' portion of the CPW transmission line", "def make(self): \"\"\"Build the component.\"\"\" p = self.p N =", "2.0. You may # obtain a copy of this license", "capacitor. Such a structure can be used, as an example,", "2, 0) cap_body = draw.subtract(cap_box, cap_cut) cap_body = draw.translate( cap_body,", "pos_x/_y: '0um' -- The x/y position of the north pin", "* north_width: '10um' -- The width of the 'north' portion", "that they have been altered from the originals. from qiskit_metal", "draw.LineString([[0, 0], [0, -p.cap_distance]]) south_cpw = draw.LineString( [[ 0, -p.cap_distance", "N * p.cap_width + (N - 1) * p.cap_gap +", "#Currently setting the primary CPW length based on the coupling_length", "1) * p.cap_gap) / 2, 0) cap_body = draw.subtract(cap_box, cap_cut)", "= draw.translate(cap_cut, -(N * p.cap_width + (N - 1) *", "based on the coupling_length #May want it to be it's", "from qiskit_metal.qlibrary.core import QComponent import numpy as np class CapNInterdigital(QComponent):", "* cap_gap: '6um' -- The width of dielectric for the", "0)) c_items = draw.translate(c_items, p.pos_x, p.pos_y) [north_cpw, south_cpw, cap_body, cap_etch]", "to 0 gives a simple gap capacitor. The width of", "south_width='10um', south_gap='6um', cap_width='10um', cap_gap='6um', cap_gap_ground='6um', finger_length='20um', finger_count='5', cap_distance='50um', pos_x='0um', pos_y='0um',", "2]) flip = -1 for i in range(1, N): make_cut_list.append([", "-- The x/y position of the north pin * rotation:", "-- Layer the capacitor is on. \"\"\" component_metadata = Dict(short_name='cpw',", "this code must retain this # copyright notice, and modified", "to ground) * cap_width: '10um' -- The width of the", "gap to ground) * cap_width: '10um' -- The width of", "- 1) * p.cap_gap + 2 * p.cap_gap_ground, p.cap_gap +", "# copyright notice, and modified files need to carry a", "comprised of a north CPW transmission line, and a south", "Capacitor cap_box = draw.rectangle(N * p.cap_width + (N - 1)", "options\"\"\" def make(self): \"\"\"Build the component.\"\"\" p = self.p N", "2) #CPW north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]]) south_cpw =", "This code is licensed under the Apache License, Version 2.0.", "finger_count: '5' -- Number of fingers in the capacitor *", "subtract=True) #Add pins north_pin_list = north_cpw.coords south_pin_list = south_cpw.coords self.add_pin('north_end',", "| | | | --|-----|-- | | | | |", "is part of Qiskit. # # (C) Copyright IBM 2017,", "* p.cap_gap_ground, p.cap_gap + 2 * p.cap_width + p.finger_length +", "(also for the capacitor gap to ground) * cap_width: '10um'", "* finger_count + * cap_gap * (finger_count-1)). Inherits QComponent class.", "via a finger capacitor. Such a structure can be used,", "the center position of the component. Setting finger length to", "found via. (cap_width * finger_count + * cap_gap * (finger_count-1)).", "| + Options: * north_width: '10um' -- The width of", "N): make_cut_list.append([ i * (p.cap_width) + (2 * i -", "layer='1') \"\"\"Default connector options\"\"\" def make(self): \"\"\"Build the component.\"\"\" p", "dielectric between the capacitor and ground * finger_length: '20um' --", "for the capacitive coupling/fingers * cap_gap_ground: '6um' -- Width of", "| |-----|-----| | | | | + Options: * north_width:", "+ 2 * p.cap_gap_ground, p.cap_gap + 2 * p.cap_width +", "of dielectric for the capacitive coupling/fingers * cap_gap_ground: '6um' --", "+ 2 * p.cap_width + p.finger_length + 2 * p.cap_gap_ground,", "* cap_distance: '50um' -- Distance of the north point of", "# that they have been altered from the originals. from", "of the transmission line. 0 degrees is -y, following a", "2), flip * (p.finger_length) / 2 ]) flip = flip", "draw.subtract(cap_box, cap_cut) cap_body = draw.translate( cap_body, 0, -p.cap_distance - (p.cap_gap", "north_cpw}, width=p.north_width + 2 * p.north_gap, layer=p.layer, subtract=True) self.add_qgeometry('path', {'south_cpw':", "of the CPW transmission line * south_gap: '6um' -- The", "QComponent class. :: (0,0) N + ^ | | |", "The width of the finger capacitor metal (and islands) *", "(N - 1) * p.cap_gap + 2 * p.cap_gap_ground, p.cap_gap", "a south transmission line, coupled together via a finger capacitor.", "of the north point of the capacitor from the north", "ground * finger_length: '20um' -- The depth of the finger", "finger_length: '20um' -- The depth of the finger islands of", "- (p.cap_gap + 2 * p.cap_width + p.finger_length) ], [", "length to 0 gives a simple gap capacitor. The width", "draw, Dict from qiskit_metal.qlibrary.core import QComponent import numpy as np", "= south_cpw.coords self.add_pin('north_end', points=np.array(north_pin_list[::-1]), width=p.north_width, input_as_norm=True) self.add_pin('south_end', points=np.array(south_pin_list), width=p.south_width, input_as_norm=True)", "# This code is part of Qiskit. # # (C)", "center position of the component. Setting finger length to 0", "finger_count + * cap_gap * (finger_count-1)). Inherits QComponent class. ::", "- 1) * p.cap_gap) / 2, 0) cap_body = draw.subtract(cap_box,", "want it to be it's own value that the user", "own value that the user can control? default_options = Dict(north_width='10um',", "p.finger_length) / 2) cap_etch = draw.rectangle( N * p.cap_width +", "capacitor from the north pin * pos_x/_y: '0um' -- The", "structure comprised of a north CPW transmission line, and a", "it to be it's own value that the user can", "a two pin (+) structure comprised of a north CPW", "a counter-clockwise rotation (eg. 90 is +x) * chip: 'main'", "p.cap_width + (N - 1) * p.cap_gap, p.cap_gap + 2", "0, -2 * p.cap_distance - (p.cap_gap + 2 * p.cap_width", "The x/y position of the north pin * rotation: '0'", "coding: utf-8 -*- # This code is part of Qiskit.", "c_items = draw.translate(c_items, p.pos_x, p.pos_y) [north_cpw, south_cpw, cap_body, cap_etch] =", "the finger islands of the capacitor * finger_count: '5' --", "in the capacitor * cap_distance: '50um' -- Distance of the", "p.cap_gap + 2 * p.cap_width + p.finger_length + 2 *", "#CPW north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]]) south_cpw = draw.LineString(", "layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub': south_cpw}, width=p.south_width + 2 * p.south_gap, layer=p.layer,", "of Qiskit. # # (C) Copyright IBM 2017, 2021. #", "* (finger_count-1)). Inherits QComponent class. :: (0,0) N + ^", "The direction of the transmission line. 0 degrees is -y,", "should be on. * layer: '1' -- Layer the capacitor", "+ 2 * p.cap_width + p.finger_length) ]]) #Rotate and Translate", "width of the finger capacitor metal (and islands) * cap_gap:", "source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or", "coupling_length #May want it to be it's own value that", "cap_width='10um', cap_gap='6um', cap_gap_ground='6um', finger_length='20um', finger_count='5', cap_distance='50um', pos_x='0um', pos_y='0um', orientation='0', chip='main',", "code must retain this # copyright notice, and modified files", "-p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length) ],", "-2 * p.cap_distance - (p.cap_gap + 2 * p.cap_width +", "on the coupling_length #May want it to be it's own", "(eg. 90 is +x) * chip: 'main' -- The chip", "]) make_cut_list.append([ (i + 1) * (p.cap_width) + (2 *", "orientation='0', chip='main', layer='1') \"\"\"Default connector options\"\"\" def make(self): \"\"\"Build the", "capacitive coupling/fingers * cap_gap_ground: '6um' -- Width of the dielectric", "of the gap capacitor is found via. (cap_width * finger_count", "Setting finger length to 0 gives a simple gap capacitor.", "of the CPW transmission line (also for the capacitor gap", "originals. from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core import QComponent", "#Finger Capacitor cap_box = draw.rectangle(N * p.cap_width + (N -", "cap_etch = draw.rectangle( N * p.cap_width + (N - 1)", "the primary CPW length based on the coupling_length #May want", "two pin (+) structure comprised of a north CPW transmission", "the component. Setting finger length to 0 gives a simple", "# -*- coding: utf-8 -*- # This code is part", "under the Apache License, Version 2.0. You may # obtain", "2 * p.cap_width + p.finger_length) ], [ 0, -2 *", "<filename>qiskit_metal/qlibrary/lumped/cap_n_interdigital.py # -*- coding: utf-8 -*- # This code is", "p.cap_gap, p.cap_gap + 2 * p.cap_width + p.finger_length, 0, 0)", "capacitor metal (and islands) * cap_gap: '6um' -- The width", "* chip: 'main' -- The chip the capacitor should be", "or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works", "'10um' -- The width of the 'north' portion of the", "p.cap_width + p.finger_length) ]]) #Rotate and Translate c_items = [north_cpw,", "of a north CPW transmission line, and a south transmission", "part of Qiskit. # # (C) Copyright IBM 2017, 2021.", "the capacitor is on. \"\"\" component_metadata = Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True')", "of the CPW transmission line * south_width: '10um' -- The", "2 * p.cap_gap_ground, p.cap_gap + 2 * p.cap_width + p.finger_length", "# This code is licensed under the Apache License, Version", "]]) #Rotate and Translate c_items = [north_cpw, south_cpw, cap_body, cap_etch]", "+ (p.cap_gap / 2), (p.finger_length) / 2]) flip = -1", "Dict from qiskit_metal.qlibrary.core import QComponent import numpy as np class", "gives a simple gap capacitor. The width of the gap", "this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications", "gap capacitor. The width of the gap capacitor is found", "| | + Options: * north_width: '10um' -- The width", "south_cpw}, width=p.south_width, layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub': south_cpw}, width=p.south_width + 2 *", "pin (+) structure comprised of a north CPW transmission line,", "pin * rotation: '0' -- The direction of the transmission", "(p.finger_length) / 2]) flip = -1 for i in range(1,", "line (also for the capacitor gap to ground) * cap_width:", "subtract=True) self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer) self.add_qgeometry('poly', {'cap_etch': cap_etch}, layer=p.layer, subtract=True)", "line, and a south transmission line, coupled together via a", "to qgeometry tables self.add_qgeometry('path', {'north_cpw': north_cpw}, width=p.north_width, layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub':", "(p.cap_gap / 2), flip * (p.finger_length) / 2 ]) make_cut_list.append([", "to carry a notice indicating # that they have been", "make(self): \"\"\"Build the component.\"\"\" p = self.p N = int(p.finger_count)", "cap_body}, layer=p.layer) self.add_qgeometry('poly', {'cap_etch': cap_etch}, layer=p.layer, subtract=True) #Add pins north_pin_list", "a north CPW transmission line, and a south transmission line,", "origin=(0, 0)) c_items = draw.translate(c_items, p.pos_x, p.pos_y) [north_cpw, south_cpw, cap_body,", "tables self.add_qgeometry('path', {'north_cpw': north_cpw}, width=p.north_width, layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub': north_cpw}, width=p.north_width", "{'north_cpw_sub': north_cpw}, width=p.north_width + 2 * p.north_gap, layer=p.layer, subtract=True) self.add_qgeometry('path',", "* -1 cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2, cap_style=2, join_style=2) cap_cut", "* (p.cap_width) + (2 * i + 1) * (p.cap_gap", "+ 2 * p.cap_width + p.finger_length) / 2) #CPW north_cpw", "code is part of Qiskit. # # (C) Copyright IBM", "/ 2 ]) make_cut_list.append([ (i + 1) * (p.cap_width) +", "| | |-----|-----| | | | | + Options: *", "= draw.LineString( [[ 0, -p.cap_distance - (p.cap_gap + 2 *", "line * south_width: '10um' -- The width of the 'south'", "self.add_qgeometry('path', {'south_cpw': south_cpw}, width=p.south_width, layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub': south_cpw}, width=p.south_width +", "of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any", "resonators. (0,0) represents the center position of the component. Setting", "cap_gap_ground='6um', finger_length='20um', finger_count='5', cap_distance='50um', pos_x='0um', pos_y='0um', orientation='0', chip='main', layer='1') \"\"\"Default", "(p.cap_gap / 2), flip * (p.finger_length) / 2 ]) flip", "code is licensed under the Apache License, Version 2.0. You", "cap_body, 0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width +", "north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]]) south_cpw = draw.LineString( [[", "-- The width of dielectric for the capacitive coupling/fingers *", "* p.cap_gap, p.cap_gap + 2 * p.cap_width + p.finger_length, 0,", "+ 2 * p.cap_width + p.finger_length, 0, 0) make_cut_list =", "p = self.p N = int(p.finger_count) #Finger Capacitor cap_box =", "CPW transmission line * north_gap: '6um' -- The dielectric gap", "1) * p.cap_gap + 2 * p.cap_gap_ground, p.cap_gap + 2", "#Add to qgeometry tables self.add_qgeometry('path', {'north_cpw': north_cpw}, width=p.north_width, layer=p.layer) self.add_qgeometry('path',", "p.cap_width + p.finger_length) / 2) cap_etch = draw.rectangle( N *", "copy of this license in the LICENSE.txt file in the", "-- Width of the dielectric between the capacitor and ground", "width of the 'south' portion of the CPW transmission line", "int(p.finger_count) #Finger Capacitor cap_box = draw.rectangle(N * p.cap_width + (N", "make_cut_list.append([(p.cap_width) + (p.cap_gap / 2), (p.finger_length) / 2]) flip =", "p.cap_width + p.finger_length) / 2) #CPW north_cpw = draw.LineString([[0, 0],", "width=p.north_width, layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub': north_cpw}, width=p.north_width + 2 * p.north_gap,", "south_cpw = draw.LineString( [[ 0, -p.cap_distance - (p.cap_gap + 2", "transmission line * south_width: '10um' -- The width of the", "[[ 0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width +", "self.add_qgeometry('poly', {'cap_etch': cap_etch}, layer=p.layer, subtract=True) #Add pins north_pin_list = north_cpw.coords", "in the LICENSE.txt file in the root directory # of", "2) cap_etch = draw.rectangle( N * p.cap_width + (N -", "The width of the 'north' portion of the CPW transmission", "'6um' -- The width of dielectric for the capacitive coupling/fingers", "be on. * layer: '1' -- Layer the capacitor is", "(N - 1) * p.cap_gap, p.cap_gap + 2 * p.cap_width", "Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component metadata\"\"\" #Currently setting the primary CPW", "# Any modifications or derivative works of this code must", "finger_count='5', cap_distance='50um', pos_x='0um', pos_y='0um', orientation='0', chip='main', layer='1') \"\"\"Default connector options\"\"\"", "this # copyright notice, and modified files need to carry", "2 * p.cap_width + p.finger_length + 2 * p.cap_gap_ground, 0,", "+x) * chip: 'main' -- The chip the capacitor should", "qiskit_metal.qlibrary.core import QComponent import numpy as np class CapNInterdigital(QComponent): \"\"\"Generates", "-- The depth of the finger islands of the capacitor", "width=p.south_width, layer=p.layer) self.add_qgeometry('path', {'south_cpw_sub': south_cpw}, width=p.south_width + 2 * p.south_gap,", "transmission line, coupled together via a finger capacitor. Such a", "* p.cap_width + p.finger_length + 2 * p.cap_gap_ground, 0, -p.cap_distance", "{'north_cpw': north_cpw}, width=p.north_width, layer=p.layer) self.add_qgeometry('path', {'north_cpw_sub': north_cpw}, width=p.north_width + 2", "/ 2) #CPW north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]]) south_cpw", "on. \"\"\" component_metadata = Dict(short_name='cpw', _qgeometry_table_poly='True', _qgeometry_table_path='True') \"\"\"Component metadata\"\"\" #Currently", "component. Setting finger length to 0 gives a simple gap", "* (p.finger_length) / 2 ]) flip = flip * -1", "+ p.finger_length) / 2) cap_etch = draw.rectangle( N * p.cap_width", "-- The width of the finger capacitor metal (and islands)", "the 'north' portion of the CPW transmission line * south_width:", "cap_style=2, join_style=2) cap_cut = draw.translate(cap_cut, -(N * p.cap_width + (N", "* p.cap_width + p.finger_length) ]]) #Rotate and Translate c_items =" ]
[ "full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1,", "full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[", "protobufs/services/feature/actions/get_flags.proto from google.protobuf import descriptor as _descriptor from google.protobuf import", "import message as _message from google.protobuf import reflection as _reflection", "dict( DESCRIPTOR = _REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) ))", "number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "@@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True _RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(),", "compiler. DO NOT EDIT! # source: protobufs/services/feature/actions/get_flags.proto from google.protobuf import", "# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True _RESPONSEV1_FLAGSENTRY._options =", "_reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import", "is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ],", "dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) ))", "serialized_end=101, ) _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None,", "number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "_message from google.protobuf import reflection as _reflection from google.protobuf import", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1,", "full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None,", "_RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1',", "DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,),", "name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None,", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11, cpp_type=10, label=3,", "reflection as _reflection from google.protobuf import symbol_database as _symbol_database from", "@@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR = _RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' #", "@@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry =", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=192, serialized_end=236, ) _RESPONSEV1 =", "_RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR =", "\\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR,", "= _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags',", "full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None,", "= _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1 =", "= _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(", "__module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR = _RESPONSEV1,", "DO NOT EDIT! # source: protobufs/services/feature/actions/get_flags.proto from google.protobuf import descriptor", "# @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags',", "__module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options =", "# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry", "_descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key',", "\\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None,", "fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,", "type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "by the protocol buffer compiler. DO NOT EDIT! # source:", "oneofs=[ ], serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type =", "serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor( name='RequestV1',", "extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[", "], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=90, serialized_end=101, )", "_descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR)", "_symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02", "DESCRIPTOR = _REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1)", "name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[],", "name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0,", "], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=192, serialized_end=236,", "index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None,", "_descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False,", "import reflection as _reflection from google.protobuf import symbol_database as _symbol_database", "as _descriptor from google.protobuf import message as _message from google.protobuf", "name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0,", "name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None,", "google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 #", "fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3',", "extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False, syntax='proto3', extension_ranges=[],", "as _message from google.protobuf import reflection as _reflection from google.protobuf", "label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ],", "'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR = _RESPONSEV1, __module__ =", "from google.protobuf import message as _message from google.protobuf import reflection", "_sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01", "_RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1", "= _REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1", "source: protobufs/services/feature/actions/get_flags.proto from google.protobuf import descriptor as _descriptor from google.protobuf", "= _RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry)", "# Generated by the protocol buffer compiler. DO NOT EDIT!", "extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'),", "], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104,", "extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type", ")) _sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry',", "serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] =", "options=None), ], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ], options=None, is_extendable=False,", "package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 =", "is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(),", "name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None,", "'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(", "oneofs=[ ], serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry',", ", DESCRIPTOR = _RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) ))", "enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=90, serialized_end=101,", "], serialized_start=192, serialized_end=236, ) _RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY,", "google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR =", "enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=192,", "options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type", "index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "import descriptor as _descriptor from google.protobuf import message as _message", "extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8, cpp_type=7, label=1,", "dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__", "_REQUESTV1 = _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ],", "= _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR = _REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2'", "_reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR =", "buffer compiler. DO NOT EDIT! # source: protobufs/services/feature/actions/get_flags.proto from google.protobuf", "DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR =", "Generated by the protocol buffer compiler. DO NOT EDIT! #", "options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False,", "__module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1',", "options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=192, serialized_end=236, )", "message as _message from google.protobuf import reflection as _reflection from", "oneofs=[ ], serialized_start=192, serialized_end=236, ) _RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1',", "RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR = _REQUESTV1, __module__ =", "b'8\\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=192, serialized_end=236, ) _RESPONSEV1", "options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY", "EDIT! # source: protobufs/services/feature/actions/get_flags.proto from google.protobuf import descriptor as _descriptor", "_reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR = _REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' #", "has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value',", "index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None,", ")) , DESCRIPTOR = _RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1)", "as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db =", ") _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR,", "default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value',", "_reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' #", ")) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True _RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001')", "name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1", "DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) ,", "descriptor as _descriptor from google.protobuf import message as _message from", "_sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True _RESPONSEV1_FLAGSENTRY._options", "_RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR = _REQUESTV1, __module__", "google.protobuf import descriptor as _descriptor from google.protobuf import message as", "default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ],", "default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ],", "_RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "_RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1']", "label=1, has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor(", "containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[", "_descriptor from google.protobuf import message as _message from google.protobuf import", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=192, serialized_end=236, ) _RESPONSEV1 = _descriptor.Descriptor(", "], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=236, )", "containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ],", "file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None,", "the protocol buffer compiler. DO NOT EDIT! # source: protobufs/services/feature/actions/get_flags.proto", "_RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1", "(_message.Message,), dict( DESCRIPTOR = _REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1)", "as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf", "from google.protobuf import reflection as _reflection from google.protobuf import symbol_database", "_RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options", "_REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR", "google.protobuf import reflection as _reflection from google.protobuf import symbol_database as", "nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=90,", "], serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None,", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ],", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1,", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor(", "containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False,", "FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__ =", "DESCRIPTOR = _RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1)", "syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor(", "(_message.Message,), dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry)", "from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2", "], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "_sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True _RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001') # @@protoc_insertion_point(module_scope)", "number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False,", "(_message.Message,), dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY,", "_descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b\"\".decode('utf-8'),", "options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False,", "_descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[],", "= _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' )", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9,", "= _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] =", "extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "= 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR = _RESPONSEV1, __module__", "= _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01", "descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto',", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9, cpp_type=9,", "extension_scope=None, options=None), ], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ], options=None,", "\\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1',", ") _RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "_REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1 =", "has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[", "enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=236,", "nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor(", "= _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[", "extension_ranges=[], oneofs=[ ], serialized_start=192, serialized_end=236, ) _RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1',", "= _RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR = _REQUESTV1,", "symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY =", "full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None,", "_descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ],", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[],", ") _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1", "extension_ranges=[], oneofs=[ ], serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry',", "protocol buffer compiler. DO NOT EDIT! # source: protobufs/services/feature/actions/get_flags.proto from", "= _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2'", "NOT EDIT! # source: protobufs/services/feature/actions/get_flags.proto from google.protobuf import descriptor as", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11,", "containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[", "= _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key',", "type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False, syntax='proto3',", "serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1']", "full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1,", "_descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags',", "cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None),", "nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR", "= 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True", "import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports)", "type=9, cpp_type=9, label=1, has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "@@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3',", "label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ],", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1", "is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8, cpp_type=7,", "# source: protobufs/services/feature/actions/get_flags.proto from google.protobuf import descriptor as _descriptor from", "], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "], serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY", "cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None),", "_symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default()", "DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\\n2protobufs/services/feature/actions/get_flags.proto\\x12\\\"services.feature.actions.get_flags\\\"\\x0b\\n\\tRequestV1\\\"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05\\x66lags\\x18\\x01 \\x03(\\x0b\\x32\\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x62\\x06proto3'", ") _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None,", "], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ],", "google.protobuf import message as _message from google.protobuf import reflection as", "_sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,),", "# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR = _RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2'", "_sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True _RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\\001') #", "serialized_start=192, serialized_end=236, ) _RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR,", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11, cpp_type=10,", "cpp_type=9, label=1, has_default_value=False, default_value=b\"\".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None),", "= _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR", "= 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,),", "serialized_end=236, ) _RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None,", "], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3',", "ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict(", "from google.protobuf import descriptor as _descriptor from google.protobuf import message", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type =", "fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2,", "= _RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR", "_RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(" ]
[ "# Similar BFS solution but use a little more spaces.", "here can be res.append(temp), res will not change as temp", "= None # self.right = None class Solution: def levelOrder(self,", "# Summary: # Similar BFS solution but use a little", "return its level order traversal as: # [ # [3],", "<reponame>Hellofafar/Leetcode # ------------------------------ # Binary Tree Level Order Traversal #", "# / \\ # 9 20 # / \\ #", "takes O(n) time because it needs to remap the index", "------------------------------ # Summary: # Similar BFS solution but use a", "[] # next level of nodes for node in queue:", "a binary tree node. # class TreeNode: # def __init__(self,", "self.left = None # self.right = None class Solution: def", "3 # / \\ # 9 20 # / \\", "def __init__(self, x): # self.val = x # self.left =", "TreeNode: # def __init__(self, x): # self.val = x #", "actually takes O(n) time because it needs to remap the", "use a little more spaces. # On 102.py, using list.pop(0)", "because it needs to remap the index # of values.", "None class Solution: def levelOrder(self, root: TreeNode) -> List[List[int]]: if", "9 20 # / \\ # 15 7 # return", "for a binary tree node. # class TreeNode: # def", "tree node. # class TreeNode: # def __init__(self, x): #", "= [] # values of this level of nodes children", "children.append(node.left) if node.right: children.append(node.right) res.append(temp[:]) # actually here can be", "root: TreeNode) -> List[List[int]]: if not root: return [] #", "its level order traversal as: # [ # [3], #", "] # # Version: 2.0 # 11/11/19 by Jianfa #", "nodes' values. (ie, from # left to right, level by", "nodes children = [] # next level of nodes for", "if node.left: children.append(node.left) if node.right: children.append(node.right) res.append(temp[:]) # actually here", "# of values. Use collections.deque instead. # # O(N) time", "binary tree node. # class TreeNode: # def __init__(self, x):", "# def __init__(self, x): # self.val = x # self.left", "will not change as temp changes queue = children[:] #", "in queue: temp.append(node.val) if node.left: children.append(node.left) if node.right: children.append(node.right) res.append(temp[:])", "# ------------------------------ # Binary Tree Level Order Traversal # #", "------------------------------ # Binary Tree Level Order Traversal # # Description:", "[ # [3], # [9,20], # [15,7] # ] #", "values. Use collections.deque instead. # # O(N) time O(N) space", "11/11/19 by Jianfa # ------------------------------ # Definition for a binary", "children changes return res # Used for testing if __name__", "# Binary Tree Level Order Traversal # # Description: #", "# # Version: 2.0 # 11/11/19 by Jianfa # ------------------------------", "x): # self.val = x # self.left = None #", "Solution: def levelOrder(self, root: TreeNode) -> List[List[int]]: if not root:", "queue: temp = [] # values of this level of", "temp.append(node.val) if node.left: children.append(node.left) if node.right: children.append(node.right) res.append(temp[:]) # actually", "= [root] while queue: temp = [] # values of", "children[:] # here must be children[:] otherwise queue will change", "7 # return its level order traversal as: # [", "\\ # 15 7 # return its level order traversal", "be children[:] otherwise queue will change as children changes return", "time because it needs to remap the index # of", "[] # BFS res = [] queue = [root] while", "Summary: # Similar BFS solution but use a little more", "by Jianfa # ------------------------------ # Definition for a binary tree", "# ------------------------------ # Definition for a binary tree node. #", "For example: # Given binary tree [3,9,20,null,null,15,7], # 3 #", "if not root: return [] # BFS res = []", "Used for testing if __name__ == \"__main__\": test = Solution()", "queue will change as children changes return res # Used", "Definition for a binary tree node. # class TreeNode: #", "left to right, level by level). # # For example:", "node in queue: temp.append(node.val) if node.left: children.append(node.left) if node.right: children.append(node.right)", "this level of nodes children = [] # next level", "needs to remap the index # of values. Use collections.deque", "= children[:] # here must be children[:] otherwise queue will", "root: return [] # BFS res = [] queue =", "of its nodes' values. (ie, from # left to right,", "# [9,20], # [15,7] # ] # # Version: 2.0", "values. (ie, from # left to right, level by level).", "# self.val = x # self.left = None # self.right", "= [] # next level of nodes for node in", "solution but use a little more spaces. # On 102.py,", "Binary Tree Level Order Traversal # # Description: # Given", "# # For example: # Given binary tree [3,9,20,null,null,15,7], #", "self.val = x # self.left = None # self.right =", "can be res.append(temp), res will not change as temp changes", "15 7 # return its level order traversal as: #", "changes queue = children[:] # here must be children[:] otherwise", "if __name__ == \"__main__\": test = Solution() # ------------------------------ #", "as children changes return res # Used for testing if", "# Given binary tree [3,9,20,null,null,15,7], # 3 # / \\", "class TreeNode: # def __init__(self, x): # self.val = x", "# values of this level of nodes children = []", "otherwise queue will change as children changes return res #", "Jianfa # ------------------------------ # Definition for a binary tree node.", "Version: 2.0 # 11/11/19 by Jianfa # ------------------------------ # Definition", "tree, return the level order traversal of its nodes' values.", "test = Solution() # ------------------------------ # Summary: # Similar BFS", "BFS res = [] queue = [root] while queue: temp", "# For example: # Given binary tree [3,9,20,null,null,15,7], # 3", "Description: # Given a binary tree, return the level order", "a binary tree, return the level order traversal of its", "# left to right, level by level). # # For", "it needs to remap the index # of values. Use", "to remap the index # of values. Use collections.deque instead.", "node.right: children.append(node.right) res.append(temp[:]) # actually here can be res.append(temp), res", "change as temp changes queue = children[:] # here must", "list.pop(0) actually takes O(n) time because it needs to remap", "[3], # [9,20], # [15,7] # ] # # Version:", "20 # / \\ # 15 7 # return its", "level of nodes for node in queue: temp.append(node.val) if node.left:", "# On 102.py, using list.pop(0) actually takes O(n) time because", "Similar BFS solution but use a little more spaces. #", "traversal as: # [ # [3], # [9,20], # [15,7]", "# 15 7 # return its level order traversal as:", "(ie, from # left to right, level by level). #", "# self.right = None class Solution: def levelOrder(self, root: TreeNode)", "levelOrder(self, root: TreeNode) -> List[List[int]]: if not root: return []", "[15,7] # ] # # Version: 2.0 # 11/11/19 by", "x # self.left = None # self.right = None class", "return [] # BFS res = [] queue = [root]", "/ \\ # 15 7 # return its level order", "changes return res # Used for testing if __name__ ==", "must be children[:] otherwise queue will change as children changes", "of nodes children = [] # next level of nodes", "res # Used for testing if __name__ == \"__main__\": test", "\"__main__\": test = Solution() # ------------------------------ # Summary: # Similar", "order traversal as: # [ # [3], # [9,20], #", "more spaces. # On 102.py, using list.pop(0) actually takes O(n)", "# class TreeNode: # def __init__(self, x): # self.val =", "nodes for node in queue: temp.append(node.val) if node.left: children.append(node.left) if", "for testing if __name__ == \"__main__\": test = Solution() #", "Traversal # # Description: # Given a binary tree, return", "Given binary tree [3,9,20,null,null,15,7], # 3 # / \\ #", "traversal of its nodes' values. (ie, from # left to", "a little more spaces. # On 102.py, using list.pop(0) actually", "-> List[List[int]]: if not root: return [] # BFS res", "# actually here can be res.append(temp), res will not change", "# Given a binary tree, return the level order traversal", "Order Traversal # # Description: # Given a binary tree,", "not change as temp changes queue = children[:] # here", "actually here can be res.append(temp), res will not change as", "# [ # [3], # [9,20], # [15,7] # ]", "/ \\ # 9 20 # / \\ # 15", "of values. Use collections.deque instead. # # O(N) time O(N)", "[] queue = [root] while queue: temp = [] #", "= None class Solution: def levelOrder(self, root: TreeNode) -> List[List[int]]:", "[3,9,20,null,null,15,7], # 3 # / \\ # 9 20 #", "level). # # For example: # Given binary tree [3,9,20,null,null,15,7],", "# 11/11/19 by Jianfa # ------------------------------ # Definition for a", "------------------------------ # Definition for a binary tree node. # class", "None # self.right = None class Solution: def levelOrder(self, root:", "level of nodes children = [] # next level of", "of nodes for node in queue: temp.append(node.val) if node.left: children.append(node.left)", "O(n) time because it needs to remap the index #", "the index # of values. Use collections.deque instead. # #", "example: # Given binary tree [3,9,20,null,null,15,7], # 3 # /", "temp changes queue = children[:] # here must be children[:]", "== \"__main__\": test = Solution() # ------------------------------ # Summary: #", "__init__(self, x): # self.val = x # self.left = None", "temp = [] # values of this level of nodes", "[9,20], # [15,7] # ] # # Version: 2.0 #", "be res.append(temp), res will not change as temp changes queue", "# self.left = None # self.right = None class Solution:", "not root: return [] # BFS res = [] queue", "BFS solution but use a little more spaces. # On", "the level order traversal of its nodes' values. (ie, from", "but use a little more spaces. # On 102.py, using", "next level of nodes for node in queue: temp.append(node.val) if", "for node in queue: temp.append(node.val) if node.left: children.append(node.left) if node.right:", "as temp changes queue = children[:] # here must be", "as: # [ # [3], # [9,20], # [15,7] #", "change as children changes return res # Used for testing", "if node.right: children.append(node.right) res.append(temp[:]) # actually here can be res.append(temp),", "will change as children changes return res # Used for", "\\ # 9 20 # / \\ # 15 7", "level order traversal of its nodes' values. (ie, from #", "queue: temp.append(node.val) if node.left: children.append(node.left) if node.right: children.append(node.right) res.append(temp[:]) #", "# 9 20 # / \\ # 15 7 #", "res will not change as temp changes queue = children[:]", "# 3 # / \\ # 9 20 # /", "queue = children[:] # here must be children[:] otherwise queue", "Given a binary tree, return the level order traversal of", "of this level of nodes children = [] # next", "# here must be children[:] otherwise queue will change as", "[root] while queue: temp = [] # values of this", "= Solution() # ------------------------------ # Summary: # Similar BFS solution", "102.py, using list.pop(0) actually takes O(n) time because it needs", "binary tree, return the level order traversal of its nodes'", "2.0 # 11/11/19 by Jianfa # ------------------------------ # Definition for", "order traversal of its nodes' values. (ie, from # left", "here must be children[:] otherwise queue will change as children", "TreeNode) -> List[List[int]]: if not root: return [] # BFS", "node.left: children.append(node.left) if node.right: children.append(node.right) res.append(temp[:]) # actually here can", "children = [] # next level of nodes for node", "= [] queue = [root] while queue: temp = []", "return res # Used for testing if __name__ == \"__main__\":", "# / \\ # 15 7 # return its level", "On 102.py, using list.pop(0) actually takes O(n) time because it", "class Solution: def levelOrder(self, root: TreeNode) -> List[List[int]]: if not", "its nodes' values. (ie, from # left to right, level", "from # left to right, level by level). # #", "node. # class TreeNode: # def __init__(self, x): # self.val", "right, level by level). # # For example: # Given", "# Version: 2.0 # 11/11/19 by Jianfa # ------------------------------ #", "res.append(temp[:]) # actually here can be res.append(temp), res will not", "level by level). # # For example: # Given binary", "by level). # # For example: # Given binary tree", "queue = [root] while queue: temp = [] # values", "# BFS res = [] queue = [root] while queue:", "# [15,7] # ] # # Version: 2.0 # 11/11/19", "self.right = None class Solution: def levelOrder(self, root: TreeNode) ->", "res = [] queue = [root] while queue: temp =", "testing if __name__ == \"__main__\": test = Solution() # ------------------------------", "index # of values. Use collections.deque instead. # # O(N)", "List[List[int]]: if not root: return [] # BFS res =", "[] # values of this level of nodes children =", "= x # self.left = None # self.right = None", "while queue: temp = [] # values of this level", "# Definition for a binary tree node. # class TreeNode:", "# Description: # Given a binary tree, return the level", "# return its level order traversal as: # [ #", "res.append(temp), res will not change as temp changes queue =", "# Used for testing if __name__ == \"__main__\": test =", "Tree Level Order Traversal # # Description: # Given a", "# next level of nodes for node in queue: temp.append(node.val)", "__name__ == \"__main__\": test = Solution() # ------------------------------ # Summary:", "spaces. # On 102.py, using list.pop(0) actually takes O(n) time", "using list.pop(0) actually takes O(n) time because it needs to", "level order traversal as: # [ # [3], # [9,20],", "Solution() # ------------------------------ # Summary: # Similar BFS solution but", "def levelOrder(self, root: TreeNode) -> List[List[int]]: if not root: return", "values of this level of nodes children = [] #", "children.append(node.right) res.append(temp[:]) # actually here can be res.append(temp), res will", "remap the index # of values. Use collections.deque instead. #", "# ] # # Version: 2.0 # 11/11/19 by Jianfa", "binary tree [3,9,20,null,null,15,7], # 3 # / \\ # 9", "to right, level by level). # # For example: #", "# [3], # [9,20], # [15,7] # ] # #", "# ------------------------------ # Summary: # Similar BFS solution but use", "return the level order traversal of its nodes' values. (ie,", "children[:] otherwise queue will change as children changes return res", "tree [3,9,20,null,null,15,7], # 3 # / \\ # 9 20", "# # Description: # Given a binary tree, return the", "little more spaces. # On 102.py, using list.pop(0) actually takes", "Level Order Traversal # # Description: # Given a binary" ]
[ "True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer = OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params =", "resolution)).astype('float32') pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs = None", "focal=flength) run_ids_1 = [29, ] # with sqrt(2) # run_ids_1", "# run_ids_1 = [7, 8, 3] settings_for_runs = \\ {24:", "int(np.log2(resolution) - 2) root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2", "flame_param = fl_param_dict[key] flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'],", "flm_batch = flm_params[batch_idx:batch_idx+batch_size, :] flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder,", "9, 3)) texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _, _,", "constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb; ipdb.set_trace() light_code = \\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true,", "flm_batch = position_to_given_location(flame_decoder, flm_batch) batch_size_true = flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or", "num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2 = True flength = 5000 cam_t", "flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) # tz = camera_params['f'][0] /", "mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) #", "exp, pose, light_code, texture_code), camera_params=cam) rend_flm = torch.clamp(rend_flm, 0, 1)", "'051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name': 'norm_mp_tex_interp',", "# import ipdb; ipdb.set_trace() images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs", "= torch.clamp(norma_map_img, 0, 1) * 2 - 1 rend_flm =", "normal_map_cond, texture_cond): if normal_map_cond and texture_cond: return torch.cat((textured_rndr, norm_map), dim=1)", "'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images + 1) / 2, show_prog_bar=True)", "General settings save_images = True code_size = 236 use_inst_norm =", "for run_idx in run_ids_1: # import ipdb; ipdb.set_trace() generator_1 =", "flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) # tz = camera_params['f'][0] / (camera_params['c'][0]", "# import ipdb; ipdb.set_trace() flm_params[i, :] = flame_param.astype('float32') if i", "= generator_1.eval() # images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar", "{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin':", "2 - 1 rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear') norma_map_img", "= 64 flame_decoder = overlay_visualizer.deca.flame.eval() for run_idx in run_ids_1: #", "= True code_size = 236 use_inst_norm = True core_tensor_res =", "cam_t = np.array([0., 0., 0]) camera_params = camera_ringnetpp((512, 512), trans=cam_t,", "= overlay_visualizer.deca.flame.eval() for run_idx in run_ids_1: # import ipdb; ipdb.set_trace()", "flength = 5000 cam_t = np.array([0., 0., 0]) camera_params =", "2, show_prog_bar=True) #save flam rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])", "+ 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1)", "exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import", "import torch from my_utils import generic_utils from my_utils.eye_centering import position_to_given_location", "3] settings_for_runs = \\ {24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond':", "from my_utils.visualize_flame_overlay import OverLayViz from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from my_utils.generate_gif", "'normal_maps_as_cond': True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx':", "import save_set_of_images from my_utils import compute_fid import constants from dataset_loaders", "os.environ['PYTHONHASHSEED'] = '2' import tqdm from model.stg2_generator import StyledGenerator import", "- 1: break batch_size = 64 flame_decoder = overlay_visualizer.deca.flame.eval() for", "= generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import", "compute_fid import constants from dataset_loaders import fast_image_reshape import torch from", "enumerate(fl_param_dict): flame_param = fl_param_dict[key] flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],", "ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond): if normal_map_cond and texture_cond: return", "'model_idx': '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer =", "'mdl2_' if settings_for_runs[run_idx]['name'] == 'full_model': mdl_id = 'mdl1_' for batch_idx", "flm_params[batch_idx:batch_idx+batch_size, :] flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder, flm_batch) batch_size_true", "1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images +", "_, _, _, rend_flm = \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code,", "width_out=256, mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear') else: rend_flm", "0, 1) * 2 - 1 norma_map_img = torch.clamp(norma_map_img, 0,", "fl_param_dict[key] flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) #", "/ (camera_params['c'][0] * flame_param[:, 156:157]) # flame_param[:, 156:159] = np.concatenate((flame_param[:,", "'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name': 'flm_rndr_tex_interp', 'model_idx':", "False},} overlay_visualizer = OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')", "= camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157]) # flame_param[:, 156:159]", "flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb; ipdb.set_trace() light_code = \\ flm_batch[:,", "rend_flm = None norma_map_img = None gen_1_in = ge_gen_in(flm_batch, rend_flm,", "settings save_images = True code_size = 236 use_inst_norm = True", "settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp", "'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 29:", "0, 1) * 2 - 1 rend_flm = fast_image_reshape(rend_flm, height_out=256,", "ipdb.set_trace() images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm, -1,", "8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin':", "cnst import os os.environ['PYTHONHASHSEED'] = '2' import tqdm from model.stg2_generator", "= 256 alpha = 1 step_max = int(np.log2(resolution) - 2)", "high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None,", "width_out=256, mode='bilinear') else: rend_flm = None norma_map_img = None gen_1_in", "False, 'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,", "np.concatenate((flame_param[:, 157:], tz), axis=1) # import ipdb; ipdb.set_trace() flm_params[i, :]", "images=((mdl_1_gen_images + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images +", "flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])", "norma_map_img = None gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition'])", "torch.clamp(norma_map_img, 0, 1) * 2 - 1 rend_flm = fast_image_reshape(rend_flm,", "True core_tensor_res = 4 resolution = 256 alpha = 1", "= 'mdl2_' if settings_for_runs[run_idx]['name'] == 'full_model': mdl_id = 'mdl1_' for", "= 4 resolution = 256 alpha = 1 step_max =", "if settings_for_runs[run_idx]['name'] == 'full_model': mdl_id = 'mdl1_' for batch_idx in", "model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval()", "== num_smpl_to_eval_on - 1: break batch_size = 64 flame_decoder =", "OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file,", "* 2 - 1 rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')", "dim=1) elif normal_map_cond: return norm_map elif texture_cond: return textured_rndr else:", "1: break batch_size = 64 flame_decoder = overlay_visualizer.deca.flame.eval() for run_idx", "- 1 norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 -", "'294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name': 'flm_rndr_tex_interp',", "np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images')", "'normal_maps_as_cond': False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name': 'full_model', 'model_idx':", "import sys sys.path.append('../../') import constants as cnst import os os.environ['PYTHONHASHSEED']", "156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1) # import ipdb; ipdb.set_trace()", "return torch.cat((textured_rndr, norm_map), dim=1) elif normal_map_cond: return norm_map elif texture_cond:", "return flm_params # General settings save_images = True code_size =", "flame_param.astype('float32') if i == num_smpl_to_eval_on - 1: break batch_size =", "StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx']", "= flm_params[batch_idx:batch_idx+batch_size, :] flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder, flm_batch)", "flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder, flm_batch) batch_size_true = flm_batch.shape[0]", "'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True}, 7:", "my_utils.visualize_flame_overlay import OverLayViz from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from my_utils.generate_gif import", "# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157]) #", "'mdl1_' for batch_idx in pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size, :] flm_batch", "light_code = \\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code = flm_batch[:,", "constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _,", "'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition':", "ipdb; ipdb.set_trace() light_code = \\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code", "torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(),", "512), trans=cam_t, focal=flength) run_ids_1 = [29, ] # with sqrt(2)", "[7, 8, 3] settings_for_runs = \\ {24: {'name': 'vector_cond', 'model_idx':", "normal_map_cond: return norm_map elif texture_cond: return textured_rndr else: return flm_params", "= 1 step_max = int(np.log2(resolution) - 2) root_out_dir = f'{cnst.output_root}sample/'", "= flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _, _, rend_flm = \\", "break batch_size = 64 flame_decoder = overlay_visualizer.deca.flame.eval() for run_idx in", "images=((norma_map_img + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images +", "import fast_image_reshape import torch from my_utils import generic_utils from my_utils.eye_centering", "3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin':", "else: rend_flm = None norma_map_img = None gen_1_in = ge_gen_in(flm_batch,", "# import ipdb; ipdb.set_trace() generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],", "'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond':", "'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition':", ":] = flame_param.astype('float32') if i == num_smpl_to_eval_on - 1: break", "29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin':", "= os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images + 1) /", "# import ipdb; ipdb.set_trace() light_code = \\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9,", "import OverLayViz from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from my_utils.generate_gif import generate_from_flame_sequence", "= torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx", "StyledGenerator import numpy as np from my_utils.visualize_flame_overlay import OverLayViz from", "'203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp',", "pbar.set_description('Generating_images') flame_mesh_imgs = None mdl_id = 'mdl2_' if settings_for_runs[run_idx]['name'] ==", "generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import ipdb;", "{'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},", "'009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer = OverLayViz()", "= \\ {24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition':", "_, rend_flm = \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code), camera_params=cam)", "3, resolution, resolution)).astype('float32') pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs", "core_tensor_res = 4 resolution = 256 alpha = 1 step_max", "save_set_of_images from my_utils import compute_fid import constants from dataset_loaders import", "2 - 1 norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2", "def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond): if normal_map_cond and texture_cond:", "in run_ids_1: # import ipdb; ipdb.set_trace() generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158,", "= int(np.log2(resolution) - 2) root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000", "tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs = None mdl_id = 'mdl2_'", "save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_',", "= '2' import tqdm from model.stg2_generator import StyledGenerator import numpy", "import StyledGenerator import numpy as np from my_utils.visualize_flame_overlay import OverLayViz", "code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i, key in enumerate(fl_param_dict):", "= torch.clamp(rend_flm, 0, 1) * 2 - 1 norma_map_img =", "flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import ipdb; ipdb.set_trace()", "flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]", "# torch.manual_seed(2) identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda')", "+ 1) / 2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img +", "157:], tz), axis=1) # import ipdb; ipdb.set_trace() flm_params[i, :] =", "constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb; ipdb.set_trace() light_code", "settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval() # images", "torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx =", "run_ids_1 = [29, ] # with sqrt(2) # run_ids_1 =", "settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True) #", "'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition':", "if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape", "save_images = True code_size = 236 use_inst_norm = True core_tensor_res", "show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy()) #", "pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs = None mdl_id", "constants as cnst import os os.environ['PYTHONHASHSEED'] = '2' import tqdm", "np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i, key in", "= tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs = None mdl_id =", ":] flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder, flm_batch) batch_size_true =", "None norma_map_img = None gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],", "dtype=torch.long, device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max, alpha=alpha,", "normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1 =", "w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running'])", "prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images", "/ 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())", "ipdb.set_trace() light_code = \\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code =", "trans=cam_t, focal=flength) run_ids_1 = [29, ] # with sqrt(2) #", "ipdb.set_trace() generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],", "core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')", "import constants as cnst import os os.environ['PYTHONHASHSEED'] = '2' import", "'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer", "8, 3] settings_for_runs = \\ {24: {'name': 'vector_cond', 'model_idx': '216000_1',", "sqrt(2) # run_ids_1 = [7, 24, 8, 3] # run_ids_1", "resolution = 256 alpha = 1 step_max = int(np.log2(resolution) -", "256 alpha = 1 step_max = int(np.log2(resolution) - 2) root_out_dir", "), dtype=torch.long, device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max,", "torch.manual_seed(2) identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda') mdl_1_gen_images", "overlay_visualizer = OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict", "False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name': 'full_model', 'model_idx': '294000_1',", "= np.concatenate((flame_param[:, 157:], tz), axis=1) # import ipdb; ipdb.set_trace() flm_params[i,", "or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:,", "= True flength = 5000 cam_t = np.array([0., 0., 0])", "as cnst import os os.environ['PYTHONHASHSEED'] = '2' import tqdm from", "+ 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1)", "- 1 rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear') norma_map_img =", "rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1 norma_map_img", "rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings = torch.randint(low=0, high=69158,", "= OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict =", "= [7, 8, 3] settings_for_runs = \\ {24: {'name': 'vector_cond',", "# run_ids_1 = [7, 24, 8, 3] # run_ids_1 =", "# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar = tqdm.tqdm(range(0,", "alpha = 1 step_max = int(np.log2(resolution) - 2) root_out_dir =", "= flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb; ipdb.set_trace() light_code = \\", "my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from my_utils.generate_gif import generate_from_flame_sequence from my_utils.generic_utils import", "flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1) # import ipdb;", "light_code, texture_code), camera_params=cam) rend_flm = torch.clamp(rend_flm, 0, 1) * 2", "import camera_ringnetpp from my_utils.generate_gif import generate_from_flame_sequence from my_utils.generic_utils import save_set_of_images", "import numpy as np from my_utils.visualize_flame_overlay import OverLayViz from my_utils.flm_dynamic_fit_overlay", "from my_utils import generic_utils from my_utils.eye_centering import position_to_given_location def ge_gen_in(flm_params,", "if normal_map_cond and texture_cond: return torch.cat((textured_rndr, norm_map), dim=1) elif normal_map_cond:", "texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _, _, rend_flm =", "settings_for_runs[run_idx]['name'] == 'full_model': mdl_id = 'mdl1_' for batch_idx in pbar:", "n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 =", "= ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings =", "/ 2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) /", "use_inst_norm = True core_tensor_res = 4 resolution = 256 alpha", "pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb; ipdb.set_trace() light_code =", "import tqdm from model.stg2_generator import StyledGenerator import numpy as np", "rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs +", "elif normal_map_cond: return norm_map elif texture_cond: return textured_rndr else: return", "flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) # tz", "flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _, _, rend_flm = \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape,", "from model.stg2_generator import StyledGenerator import numpy as np from my_utils.visualize_flame_overlay", "flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]", "8, 3] # run_ids_1 = [7, 8, 3] settings_for_runs =", "flame_decoder = overlay_visualizer.deca.flame.eval() for run_idx in run_ids_1: # import ipdb;", "camera_ringnetpp from my_utils.generate_gif import generate_from_flame_sequence from my_utils.generic_utils import save_set_of_images from", "= [29, ] # with sqrt(2) # run_ids_1 = [7,", "height_out=256, width_out=256, mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear') else:", "'model_idx': '203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name':", "norma_map_img, _, _, _, rend_flm = \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose,", "= torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params(", "overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item() for", "{'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},", "flame_param['tex'], flame_param['lit'].flatten())) # tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:,", "True code_size = 236 use_inst_norm = True core_tensor_res = 4", "norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0],", "flame_mesh_imgs = None mdl_id = 'mdl2_' if settings_for_runs[run_idx]['name'] == 'full_model':", "num_smpl_to_eval_on - 1: break batch_size = 64 flame_decoder = overlay_visualizer.deca.flame.eval()", "= fl_param_dict[key] flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten()))", "images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()", "texture_cond): if normal_map_cond and texture_cond: return torch.cat((textured_rndr, norm_map), dim=1) elif", "settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,", "ipdb.set_trace() flm_params[i, :] = flame_param.astype('float32') if i == num_smpl_to_eval_on -", "# General settings save_images = True code_size = 236 use_inst_norm", "/ 2, show_prog_bar=True) #save flam rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',", "shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose =", "from my_utils.generate_gif import generate_from_flame_sequence from my_utils.generic_utils import save_set_of_images from my_utils", "apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1", "flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i,", "flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _,", "run_ids_1 = [7, 24, 8, 3] # run_ids_1 = [7,", "from dataset_loaders import fast_image_reshape import torch from my_utils import generic_utils", "as np from my_utils.visualize_flame_overlay import OverLayViz from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp", "save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1)", "7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin':", "my_utils.generic_utils import save_set_of_images from my_utils import compute_fid import constants from", "step_max = int(np.log2(resolution) - 2) root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on =", "np from my_utils.visualize_flame_overlay import OverLayViz from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from", "= torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id,", "{'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},", "tqdm from model.stg2_generator import StyledGenerator import numpy as np from", "settings_for_runs[run_idx]['normal_maps_as_cond'] or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape =", "# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt,", "'model_idx': '294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name':", "save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_',", "#save flam rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',", "ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings = torch.randint(low=0,", "1000 use_styled_conv_stylegan2 = True flength = 5000 cam_t = np.array([0.,", "axis=1) # import ipdb; ipdb.set_trace() flm_params[i, :] = flame_param.astype('float32') if", "'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond':", "= position_to_given_location(flame_decoder, flm_batch) batch_size_true = flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\", "'model_idx': '216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name':", "flm_params[i, :] = flame_param.astype('float32') if i == num_smpl_to_eval_on - 1:", "= flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:,", "prefix=f'{mdl_id}_{batch_idx}', images=(images + 1) / 2, show_prog_bar=True) #save flam rndr", "norm_map, normal_map_cond, texture_cond): if normal_map_cond and texture_cond: return torch.cat((textured_rndr, norm_map),", "ipdb; ipdb.set_trace() flm_params[i, :] = flame_param.astype('float32') if i == num_smpl_to_eval_on", "tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157]) # flame_param[:,", "in pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size, :] flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch", "torch from my_utils import generic_utils from my_utils.eye_centering import position_to_given_location def", "- 2) root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2 =", "flm_batch) batch_size_true = flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam", "= \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code), camera_params=cam) rend_flm =", "flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) # tz = camera_params['f'][0] / (camera_params['c'][0] *", "= None gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) #", "import generic_utils from my_utils.eye_centering import position_to_given_location def ge_gen_in(flm_params, textured_rndr, norm_map,", "] # with sqrt(2) # run_ids_1 = [7, 24, 8,", "import os os.environ['PYTHONHASHSEED'] = '2' import tqdm from model.stg2_generator import", "= 236 use_inst_norm = True core_tensor_res = 4 resolution =", "24, 8, 3] # run_ids_1 = [7, 8, 3] settings_for_runs", "flm_params # General settings save_images = True code_size = 236", "resolution, resolution)).astype('float32') pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs =", "dataset_loaders import fast_image_reshape import torch from my_utils import generic_utils from", "= 5000 cam_t = np.array([0., 0., 0]) camera_params = camera_ringnetpp((512,", "= np.array([0., 0., 0]) camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)", "settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ),", "'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond':", "ipdb; ipdb.set_trace() images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm,", "2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy()) #", "rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1", "1 step_max = int(np.log2(resolution) - 2) root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on", "1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison',", "1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) /", "np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) # tz = camera_params['f'][0]", "allow_pickle=True).item() for i, key in enumerate(fl_param_dict): flame_param = fl_param_dict[key] flame_param", "settings_for_runs = \\ {24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,", "3)) texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _, _, rend_flm", "camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength) run_ids_1 = [29, ]", "-1, 1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir,", "run_idx in run_ids_1: # import ipdb; ipdb.set_trace() generator_1 = torch.nn.DataParallel(", "= torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval() # images = np.zeros((num_smpl_to_eval_on,", "# with sqrt(2) # run_ids_1 = [7, 24, 8, 3]", "'normal_maps_as_cond': False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name': 'norm_mp_tex_interp', 'model_idx':", "torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder, flm_batch) batch_size_true = flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond']", "0., 0]) camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength) run_ids_1 =", "i == num_smpl_to_eval_on - 1: break batch_size = 64 flame_decoder", "textured_rndr else: return flm_params # General settings save_images = True", "= 1000 use_styled_conv_stylegan2 = True flength = 5000 cam_t =", "{'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}", "'2' import tqdm from model.stg2_generator import StyledGenerator import numpy as", "156:157]) # flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1) #", "my_utils import compute_fid import constants from dataset_loaders import fast_image_reshape import", "rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img, height_out=256,", "run_ids_1: # import ipdb; ipdb.set_trace() generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],", "= np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) # tz =", "position_to_given_location(flame_decoder, flm_batch) batch_size_true = flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']:", "fast_image_reshape import torch from my_utils import generic_utils from my_utils.eye_centering import", "position_to_given_location def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond): if normal_map_cond and", "-1, 1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images", "size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1,", "ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval() # images =", "None gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2)", "batch_size_true = flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam =", "'model_idx': '051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name':", "os os.environ['PYTHONHASHSEED'] = '2' import tqdm from model.stg2_generator import StyledGenerator", "norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1 rend_flm", "mode='bilinear') else: rend_flm = None norma_map_img = None gen_1_in =", "mdl_id = 'mdl1_' for batch_idx in pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size,", "import compute_fid import constants from dataset_loaders import fast_image_reshape import torch", "= settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval() #", "step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import ipdb; ipdb.set_trace() images = torch.clamp(mdl_1_gen_images,", "my_utils import generic_utils from my_utils.eye_centering import position_to_given_location def ge_gen_in(flm_params, textured_rndr,", "cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp =", "elif texture_cond: return textured_rndr else: return flm_params # General settings", "= np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i, key", "True, 'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,", "save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images + 1)", "False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1',", "True}, 7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True,", "root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2 = True flength", "* 2 - 1 norma_map_img = torch.clamp(norma_map_img, 0, 1) *", "constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose", "'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition':", "= \\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]", "generate_from_flame_sequence from my_utils.generic_utils import save_set_of_images from my_utils import compute_fid import", "= [7, 24, 8, 3] # run_ids_1 = [7, 8,", "texture_code), camera_params=cam) rend_flm = torch.clamp(rend_flm, 0, 1) * 2 -", "constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] #", "input_indices=identity_embeddings.cpu().numpy()) # import ipdb; ipdb.set_trace() images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()", "norm_map), dim=1) elif normal_map_cond: return norm_map elif texture_cond: return textured_rndr", "import constants from dataset_loaders import fast_image_reshape import torch from my_utils", "= None mdl_id = 'mdl2_' if settings_for_runs[run_idx]['name'] == 'full_model': mdl_id", "torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval() # images = np.zeros((num_smpl_to_eval_on, 3,", "flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb; ipdb.set_trace()", "constants from dataset_loaders import fast_image_reshape import torch from my_utils import", "rend_flm = \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code), camera_params=cam) rend_flm", "pose=None, model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import ipdb; ipdb.set_trace() images", "in enumerate(fl_param_dict): flame_param = fl_param_dict[key] flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'],", "fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')", "os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2,", "= True core_tensor_res = 4 resolution = 256 alpha =", "+ 1) / 2, show_prog_bar=True) #save flam rndr save_path_current_id_flm_rndr =", "64 flame_decoder = overlay_visualizer.deca.flame.eval() for run_idx in run_ids_1: # import", "import generate_from_flame_sequence from my_utils.generic_utils import save_set_of_images from my_utils import compute_fid", "model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import ipdb; ipdb.set_trace() images =", "overlay_visualizer.deca.flame.eval() for run_idx in run_ids_1: # import ipdb; ipdb.set_trace() generator_1", "prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images", "= f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2 = True flength =", "= flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:,", "generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda()", "1) * 2 - 1 rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256,", "constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _, _, rend_flm = \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp,", "= np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i, key in enumerate(fl_param_dict): flame_param =", "\\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code), camera_params=cam) rend_flm = torch.clamp(rend_flm,", "2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())", "norm_map elif texture_cond: return textured_rndr else: return flm_params # General", "overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code), camera_params=cam) rend_flm = torch.clamp(rend_flm, 0,", "= os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) /", "False}, 3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': False,", "'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)", "True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer = OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None)", "camera_ringnetpp((512, 512), trans=cam_t, focal=flength) run_ids_1 = [29, ] # with", "and texture_cond: return torch.cat((textured_rndr, norm_map), dim=1) elif normal_map_cond: return norm_map", "/ 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())", "1) * 2 - 1 norma_map_img = torch.clamp(norma_map_img, 0, 1)", "textured_rndr, norm_map, normal_map_cond, texture_cond): if normal_map_cond and texture_cond: return torch.cat((textured_rndr,", "batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs = None mdl_id = 'mdl2_' if settings_for_runs[run_idx]['name']", "True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1',", "np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i, key in enumerate(fl_param_dict): flame_param = fl_param_dict[key]", "device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy())", "= torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder, flm_batch) batch_size_true = flm_batch.shape[0] if", "tz), axis=1) # import ipdb; ipdb.set_trace() flm_params[i, :] = flame_param.astype('float32')", "show_prog_bar=True) #save flam rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr,", "1 rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img,", "flame_param['lit'].flatten())) # tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])", "'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer = OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on,", "run_ids_1 = [7, 8, 3] settings_for_runs = \\ {24: {'name':", "1) / 2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1)", "import ipdb; ipdb.set_trace() flm_params[i, :] = flame_param.astype('float32') if i ==", "= flame_param.astype('float32') if i == num_smpl_to_eval_on - 1: break batch_size", "flam rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs", "torch.clamp(rend_flm, 0, 1) * 2 - 1 norma_map_img = torch.clamp(norma_map_img,", "'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer = OverLayViz() #", "False}, 29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True,", "1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) /", "save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images + 1) / 2, show_prog_bar=True) #save flam", "True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1',", "if i == num_smpl_to_eval_on - 1: break batch_size = 64", "= fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256,", "4 resolution = 256 alpha = 1 step_max = int(np.log2(resolution)", "torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id =", "camera_params=cam) rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1", "use_styled_conv_stylegan2 = True flength = 5000 cam_t = np.array([0., 0.,", "= np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))", "torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',", "num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs = None mdl_id = 'mdl2_' if", "my_utils.generate_gif import generate_from_flame_sequence from my_utils.generic_utils import save_set_of_images from my_utils import", "= fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear') else: rend_flm = None norma_map_img", "'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False}, 3:", "np.array([0., 0., 0]) camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength) run_ids_1", "mdl_id = 'mdl2_' if settings_for_runs[run_idx]['name'] == 'full_model': mdl_id = 'mdl1_'", "import position_to_given_location def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond): if normal_map_cond", "'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer = OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params", "import ipdb; ipdb.set_trace() light_code = \\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))", "= torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id", "'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 8:", "'full_model': mdl_id = 'mdl1_' for batch_idx in pbar: flm_batch =", "False, 'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,", "3] # run_ids_1 = [7, 8, 3] settings_for_runs = \\", "from my_utils.generic_utils import save_set_of_images from my_utils import compute_fid import constants", "(camera_params['c'][0] * flame_param[:, 156:157]) # flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:],", "save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt,", "normal_map_cond and texture_cond: return torch.cat((textured_rndr, norm_map), dim=1) elif normal_map_cond: return", "\\ {24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False,", "norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear') else: rend_flm = None", "# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1) # import", "# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt,", "generic_utils from my_utils.eye_centering import position_to_given_location def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond,", "# overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()", "os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images + 1) / 2,", "import ipdb; ipdb.set_trace() images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs =", "pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size, :] flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch =", "True flength = 5000 cam_t = np.array([0., 0., 0]) camera_params", "code_size = 236 use_inst_norm = True core_tensor_res = 4 resolution", "[29, ] # with sqrt(2) # run_ids_1 = [7, 24,", "for i, key in enumerate(fl_param_dict): flame_param = fl_param_dict[key] flame_param =", "\\ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img,", "1) / 2, show_prog_bar=True) #save flam rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir,", "gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings", "fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i, key in enumerate(fl_param_dict): flame_param", "from my_utils.eye_centering import position_to_given_location def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):", "generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval() # images = np.zeros((num_smpl_to_eval_on, 3, resolution,", "else: return flm_params # General settings save_images = True code_size", "f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2 = True flength = 5000", "{'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},", "return textured_rndr else: return flm_params # General settings save_images =", "= 'mdl1_' for batch_idx in pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]", "images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on,", "i, key in enumerate(fl_param_dict): flame_param = fl_param_dict[key] flame_param = np.hstack((flame_param['shape'],", "model.stg2_generator import StyledGenerator import numpy as np from my_utils.visualize_flame_overlay import", "fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear') else: rend_flm = None norma_map_img =", "torch.cat((textured_rndr, norm_map), dim=1) elif normal_map_cond: return norm_map elif texture_cond: return", "generator_1 = generator_1.eval() # images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')", "prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_',", "== 'full_model': mdl_id = 'mdl1_' for batch_idx in pbar: flm_batch", "texture_cond: return torch.cat((textured_rndr, norm_map), dim=1) elif normal_map_cond: return norm_map elif", "* flame_param[:, 156:157]) # flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz),", "ipdb; ipdb.set_trace() generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0,", "from my_utils import compute_fid import constants from dataset_loaders import fast_image_reshape", "= flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:,", "[7, 24, 8, 3] # run_ids_1 = [7, 8, 3]", "numpy as np from my_utils.visualize_flame_overlay import OverLayViz from my_utils.flm_dynamic_fit_overlay import", "batch_idx in pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size, :] flm_batch = torch.from_numpy(flm_batch).cuda()", "'216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name': 'full_model',", "import ipdb; ipdb.set_trace() generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res,", "1 norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1", "images=(images + 1) / 2, show_prog_bar=True) #save flam rndr save_path_current_id_flm_rndr", "5000 cam_t = np.array([0., 0., 0]) camera_params = camera_ringnetpp((512, 512),", "\\ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]", "camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157]) # flame_param[:, 156:159] =", "OverLayViz from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from my_utils.generate_gif import generate_from_flame_sequence from", "for batch_idx in pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size, :] flm_batch =", "False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True,", "identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda') mdl_1_gen_images =", "alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import ipdb; ipdb.set_trace() images = torch.clamp(mdl_1_gen_images, -1,", "with sqrt(2) # run_ids_1 = [7, 24, 8, 3] #", "key in enumerate(fl_param_dict): flame_param = fl_param_dict[key] flame_param = np.hstack((flame_param['shape'], flame_param['exp'],", "None mdl_id = 'mdl2_' if settings_for_runs[run_idx]['name'] == 'full_model': mdl_id =", "return norm_map elif texture_cond: return textured_rndr else: return flm_params #", "236 use_inst_norm = True core_tensor_res = 4 resolution = 256", "my_utils.eye_centering import position_to_given_location def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond): if", "generator_1.eval() # images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar =", "batch_size = 64 flame_decoder = overlay_visualizer.deca.flame.eval() for run_idx in run_ids_1:", "height_out=256, width_out=256, mode='bilinear') else: rend_flm = None norma_map_img = None", "'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond':", "= flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb;", "sys.path.append('../../') import constants as cnst import os os.environ['PYTHONHASHSEED'] = '2'", "flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]", "flame_param[:, 156:157]) # flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)", "sys sys.path.append('../../') import constants as cnst import os os.environ['PYTHONHASHSEED'] =", "True, 'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,", "settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images + 1) / 2, show_prog_bar=True) #save", "texture_cond: return textured_rndr else: return flm_params # General settings save_images", "_, _, rend_flm = \\ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),", "from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from my_utils.generate_gif import generate_from_flame_sequence from my_utils.generic_utils", "= camera_ringnetpp((512, 512), trans=cam_t, focal=flength) run_ids_1 = [29, ] #", "mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear') else: rend_flm =", "= None norma_map_img = None gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img,", "0]) camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength) run_ids_1 = [29,", "images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img", "2) root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2 = True", "pose, light_code, texture_code), camera_params=cam) rend_flm = torch.clamp(rend_flm, 0, 1) *" ]
[ "'Topic :: Software Development :: Libraries :: Python Modules', ],", "for users', author = '<NAME>', author_email = '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master',", "instalation script \"\"\" setup( name = 'activity_stream', description = 'generic", "= __import__('activity_stream').__version__, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment", "setup( name = 'activity_stream', description = 'generic activity feed system", "<reponame>philippWassibauer/django-activity-stream from distutils.core import setup \"\"\" django-activity-stream instalation script \"\"\"", "MIT License', 'Operating System :: OS Independent', 'Programming Language ::", "'License :: OSI Approved :: MIT License', 'Operating System ::", "'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI", ":: OSI Approved :: MIT License', 'Operating System :: OS", "script \"\"\" setup( name = 'activity_stream', description = 'generic activity", "name = 'activity_stream', description = 'generic activity feed system for", "description = 'generic activity feed system for users', author =", "Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework", "download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version = __import__('activity_stream').__version__, classifiers=[ 'Development Status :: 3", "feed system for users', author = '<NAME>', author_email = '<EMAIL>',", ":: Software Development :: Libraries :: Python Modules', ], )", "'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience ::", "Python', 'Topic :: Software Development :: Libraries :: Python Modules',", "'Intended Audience :: Developers', 'License :: OSI Approved :: MIT", ":: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers',", "Django', 'Intended Audience :: Developers', 'License :: OSI Approved ::", "url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version = __import__('activity_stream').__version__, classifiers=[ 'Development Status ::", "'Programming Language :: Python', 'Topic :: Software Development :: Libraries", "version = __import__('activity_stream').__version__, classifiers=[ 'Development Status :: 3 - Alpha',", "__import__('activity_stream').__version__, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment ::", "'activity_stream', description = 'generic activity feed system for users', author", "'Development Status :: 3 - Alpha', 'Environment :: Web Environment',", "distutils.core import setup \"\"\" django-activity-stream instalation script \"\"\" setup( name", "\"\"\" setup( name = 'activity_stream', description = 'generic activity feed", ":: Developers', 'License :: OSI Approved :: MIT License', 'Operating", "Developers', 'License :: OSI Approved :: MIT License', 'Operating System", "'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic", "author = '<NAME>', author_email = '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version", ":: MIT License', 'Operating System :: OS Independent', 'Programming Language", "= 'generic activity feed system for users', author = '<NAME>',", "Language :: Python', 'Topic :: Software Development :: Libraries ::", "'<NAME>', author_email = '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version = __import__('activity_stream').__version__,", "Independent', 'Programming Language :: Python', 'Topic :: Software Development ::", "from distutils.core import setup \"\"\" django-activity-stream instalation script \"\"\" setup(", "setup \"\"\" django-activity-stream instalation script \"\"\" setup( name = 'activity_stream',", ":: Django', 'Intended Audience :: Developers', 'License :: OSI Approved", "OSI Approved :: MIT License', 'Operating System :: OS Independent',", "Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License ::", "Audience :: Developers', 'License :: OSI Approved :: MIT License',", "Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience", "Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License", "Approved :: MIT License', 'Operating System :: OS Independent', 'Programming", "'<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version = __import__('activity_stream').__version__, classifiers=[ 'Development Status", "System :: OS Independent', 'Programming Language :: Python', 'Topic ::", "author_email = '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version = __import__('activity_stream').__version__, classifiers=[", "OS Independent', 'Programming Language :: Python', 'Topic :: Software Development", "system for users', author = '<NAME>', author_email = '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream',", ":: OS Independent', 'Programming Language :: Python', 'Topic :: Software", "users', author = '<NAME>', author_email = '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT',", "= '<NAME>', author_email = '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version =", "\"\"\" django-activity-stream instalation script \"\"\" setup( name = 'activity_stream', description", "- Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended", "license='MIT', version = __import__('activity_stream').__version__, classifiers=[ 'Development Status :: 3 -", "= '<EMAIL>', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version = __import__('activity_stream').__version__, classifiers=[ 'Development", ":: 3 - Alpha', 'Environment :: Web Environment', 'Framework ::", "3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django',", "classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web", ":: Python', 'Topic :: Software Development :: Libraries :: Python", "import setup \"\"\" django-activity-stream instalation script \"\"\" setup( name =", "django-activity-stream instalation script \"\"\" setup( name = 'activity_stream', description =", "License', 'Operating System :: OS Independent', 'Programming Language :: Python',", "'generic activity feed system for users', author = '<NAME>', author_email", "activity feed system for users', author = '<NAME>', author_email =", "= 'activity_stream', description = 'generic activity feed system for users'," ]
[ "el.send_keys(username) el = browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password) form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit()", "import webdriver username = \"henlix\" password = \"<PASSWORD>\" browser =", "= \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(\".p_info span\") for product in", "= \"<PASSWORD>\" browser = webdriver.PhantomJS() browser.implicitly_wait(5) url_login = \"https://nid.naver.com/nidlogin.login\" browser.get(url_login)", "form.submit() url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(\".p_info span\") for", "el = browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password) form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list", "browser.get(url_login) el = browser.find_element_by_id(\"id\") el.clear() el.send_keys(username) el = browser.find_element_by_id(\"pw\") el.clear()", "= browser.find_element_by_id(\"id\") el.clear() el.send_keys(username) el = browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password) form", "for product in products: print(\"- \", product.text) # PYTHONIOENCODING=utf-8:surrogateescape python3", "\"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(\".p_info span\") for product in products:", "browser = webdriver.PhantomJS() browser.implicitly_wait(5) url_login = \"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el =", "el.clear() el.send_keys(password) form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list)", "from selenium import webdriver username = \"henlix\" password = \"<PASSWORD>\"", "username = \"henlix\" password = \"<PASSWORD>\" browser = webdriver.PhantomJS() browser.implicitly_wait(5)", "= browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(\".p_info", "<gh_stars>1-10 from selenium import webdriver username = \"henlix\" password =", "products = browser.find_elements_by_css_selector(\".p_info span\") for product in products: print(\"- \",", "browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password) form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\"", "el.clear() el.send_keys(username) el = browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password) form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\")", "= browser.find_elements_by_css_selector(\".p_info span\") for product in products: print(\"- \", product.text)", "= \"henlix\" password = \"<PASSWORD>\" browser = webdriver.PhantomJS() browser.implicitly_wait(5) url_login", "webdriver.PhantomJS() browser.implicitly_wait(5) url_login = \"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el = browser.find_element_by_id(\"id\") el.clear()", "el = browser.find_element_by_id(\"id\") el.clear() el.send_keys(username) el = browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password)", "url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(\".p_info span\") for product", "= webdriver.PhantomJS() browser.implicitly_wait(5) url_login = \"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el = browser.find_element_by_id(\"id\")", "browser.find_elements_by_css_selector(\".p_info span\") for product in products: print(\"- \", product.text) #", "\"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el = browser.find_element_by_id(\"id\") el.clear() el.send_keys(username) el = browser.find_element_by_id(\"pw\")", "browser.find_element_by_id(\"id\") el.clear() el.send_keys(username) el = browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password) form =", "password = \"<PASSWORD>\" browser = webdriver.PhantomJS() browser.implicitly_wait(5) url_login = \"https://nid.naver.com/nidlogin.login\"", "browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(\".p_info span\") for product in products: print(\"-", "selenium import webdriver username = \"henlix\" password = \"<PASSWORD>\" browser", "\"henlix\" password = \"<PASSWORD>\" browser = webdriver.PhantomJS() browser.implicitly_wait(5) url_login =", "product in products: print(\"- \", product.text) # PYTHONIOENCODING=utf-8:surrogateescape python3 selenium.02.py", "url_login = \"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el = browser.find_element_by_id(\"id\") el.clear() el.send_keys(username) el", "el.send_keys(password) form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products", "span\") for product in products: print(\"- \", product.text) # PYTHONIOENCODING=utf-8:surrogateescape", "browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(\".p_info span\")", "= browser.find_element_by_id(\"pw\") el.clear() el.send_keys(password) form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list =", "browser.implicitly_wait(5) url_login = \"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el = browser.find_element_by_id(\"id\") el.clear() el.send_keys(username)", "webdriver username = \"henlix\" password = \"<PASSWORD>\" browser = webdriver.PhantomJS()", "\"<PASSWORD>\" browser = webdriver.PhantomJS() browser.implicitly_wait(5) url_login = \"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el", "form = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\") form.submit() url_shopping_list = \"https://order.pay.naver.com/home?tabMenu=SHOPPING\" browser.get(url_shopping_list) products =", "= \"https://nid.naver.com/nidlogin.login\" browser.get(url_login) el = browser.find_element_by_id(\"id\") el.clear() el.send_keys(username) el =" ]
[ "0] error = [0.6, 0.9, 0.4, 0, 0.9, 0] #Yerr", "at i position represents +/- error[i] variance on bar[i] value", "9, 0] error = [0.6, 0.9, 0.4, 0, 0.9, 0]", "matplotlib import pyplot as plt drinks = [\"cappuccino\", \"latte\", \"chai\",", "[0.6, 0.9, 0.4, 0, 0.9, 0] #Yerr -> element at", "from matplotlib import pyplot as plt drinks = [\"cappuccino\", \"latte\",", "<filename>visualization/matplotlib/barwitherror.py<gh_stars>0 from matplotlib import pyplot as plt drinks = [\"cappuccino\",", "as plt drinks = [\"cappuccino\", \"latte\", \"chai\", \"americano\", \"mocha\", \"espresso\"]", "position represents +/- error[i] variance on bar[i] value plt.bar( range(len(drinks)),ounces_of_milk,", "9, 4, 0, 9, 0] error = [0.6, 0.9, 0.4,", "0.4, 0, 0.9, 0] #Yerr -> element at i position", "0, 0.9, 0] #Yerr -> element at i position represents", "represents +/- error[i] variance on bar[i] value plt.bar( range(len(drinks)),ounces_of_milk, yerr=error,", "[\"cappuccino\", \"latte\", \"chai\", \"americano\", \"mocha\", \"espresso\"] ounces_of_milk = [6, 9,", "0, 9, 0] error = [0.6, 0.9, 0.4, 0, 0.9,", "= [\"cappuccino\", \"latte\", \"chai\", \"americano\", \"mocha\", \"espresso\"] ounces_of_milk = [6,", "ounces_of_milk = [6, 9, 4, 0, 9, 0] error =", "#Yerr -> element at i position represents +/- error[i] variance", "error[i] variance on bar[i] value plt.bar( range(len(drinks)),ounces_of_milk, yerr=error, capsize=15) plt.show()", "\"mocha\", \"espresso\"] ounces_of_milk = [6, 9, 4, 0, 9, 0]", "error = [0.6, 0.9, 0.4, 0, 0.9, 0] #Yerr ->", "drinks = [\"cappuccino\", \"latte\", \"chai\", \"americano\", \"mocha\", \"espresso\"] ounces_of_milk =", "4, 0, 9, 0] error = [0.6, 0.9, 0.4, 0,", "0] #Yerr -> element at i position represents +/- error[i]", "+/- error[i] variance on bar[i] value plt.bar( range(len(drinks)),ounces_of_milk, yerr=error, capsize=15)", "-> element at i position represents +/- error[i] variance on", "import pyplot as plt drinks = [\"cappuccino\", \"latte\", \"chai\", \"americano\",", "0.9, 0.4, 0, 0.9, 0] #Yerr -> element at i", "plt drinks = [\"cappuccino\", \"latte\", \"chai\", \"americano\", \"mocha\", \"espresso\"] ounces_of_milk", "\"espresso\"] ounces_of_milk = [6, 9, 4, 0, 9, 0] error", "i position represents +/- error[i] variance on bar[i] value plt.bar(", "= [0.6, 0.9, 0.4, 0, 0.9, 0] #Yerr -> element", "0.9, 0] #Yerr -> element at i position represents +/-", "[6, 9, 4, 0, 9, 0] error = [0.6, 0.9,", "\"americano\", \"mocha\", \"espresso\"] ounces_of_milk = [6, 9, 4, 0, 9,", "\"chai\", \"americano\", \"mocha\", \"espresso\"] ounces_of_milk = [6, 9, 4, 0,", "element at i position represents +/- error[i] variance on bar[i]", "= [6, 9, 4, 0, 9, 0] error = [0.6,", "pyplot as plt drinks = [\"cappuccino\", \"latte\", \"chai\", \"americano\", \"mocha\",", "\"latte\", \"chai\", \"americano\", \"mocha\", \"espresso\"] ounces_of_milk = [6, 9, 4," ]
[ "CONSTANTS \"\"\" DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT = \"%b %d, %Y,", "not specified\") tag = {\"key\": key, \"value\": value} result =", "}, \"rawJSON\": json.dumps(case), } ) if result[\"data\"]: last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"]", "({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\" readable_output += f\"_id: {result['data']['id']}_\\n\" return CommandResults( readable_output=readable_output,", "import session as argus_session from argus_api.api.currentuser.v1.user import get_current_user from argus_api.api.cases.v2.case", "if isinstance(date_time, datetime): return date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time, str): return pretty_print_date(dateparser.parse(date_time))", ") def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker", "int: if isinstance(date_time, datetime): return int(date_time.timestamp() * 1000) if isinstance(date_time,", "2, } return mapping.get(status, 0) def build_argus_priority_from_min_severity(min_severity: str) -> List[str]:", "return string def pretty_print_comment(comment: dict, title: str = None) ->", "None, proxies: dict = None, verify: bool = None ):", "status=args.get(\"status\", None), priority=args.get(\"priority\", None), category=args.get(\"category\", None), reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\", None),", "sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25),", "not event_id: raise ValueError(\"event id not specified\") result = get_payload(", "\"# Find events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def list_aggregated_events_command(args: Dict[str,", "customerID=customer_id, eventID=event_id ) return CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result,", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def create_case_command(args: Dict[str, Any])", "return mapping.get(priority, 0) def argus_status_to_demisto_status(status: str) -> int: mapping =", "case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"], }, \"rawJSON\": json.dumps(case), } ) if result[\"data\"]:", "== \"fetch-incidents\": # Set and define the fetch incidents command", "to run after activated via integration settings. next_run, incidents =", "\"id\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers ) return", "rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None),", "% 2 != 0: return {} return {lst[i]: lst[i +", "communicate with Argus API {response['responseCode']}, {response}\" ) def fetch_incidents( last_run:", "def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "None), reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\",", "get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, ) from argus_api.api.events.v1", "priority=args.get(\"priority\", None), category=args.get(\"category\", None), reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\", None),", "- {attachment['addedTime']}*\\n\" readable_output += f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\" readable_output +=", "list must be of even number\", tags) tags = build_tags_from_list(tags)", "return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif demisto.command() == \"argus-list-case-comments\":", "logging from argus_api import session as argus_session from argus_api.api.currentuser.v1.user import", "lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\", None)),", "None) if not ip: raise ValueError(\"ip not specified\") result =", "user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\", None),", "demisto.command() == \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif demisto.command() == \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif", "offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)),", "NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def search_records_command(args: Dict[str, Any])", "\"Reported by {} at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) ) string +=", "specified\") if not description: raise ValueError(\"description not specified\") if not", "raise ValueError(\"attachment id not specified\") result = download_attachment(caseID=case_id, attachmentID=attachment_id) return", "[\"addedTimestamp\"] if sort_by == \"ascending\" else [\"-addedTimestamp\"] result = list_case_comments(", "f'Domain observations for \"{fqdn}\"', result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result, )", "tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=tableToMarkdown(\"PDNS", "str) -> List[str]: severities = [\"low\", \"medium\", \"high\", \"critical\"] min_severity_list", "\"name\": f\"#{case['id']}: {case['subject']}\", \"occurred\": case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\":", "== \"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif demisto.command() == \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif demisto.command()", "id not specified\") result = get_event_by_path( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id", "+= data[\"description\"] return string def pretty_print_comment(comment: dict, title: str =", "demisto.command() == \"argus-create-case\": return_results(create_case_command(demisto.args())) elif demisto.command() == \"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif", ") def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker", "service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\", None)),", "CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Updated comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result,", "-> CommandResults: case_id = args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None)", "not event_type: raise ValueError(\"event type not specified\") if not timestamp:", "None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\",", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def add_comment_command(args: Dict[str, Any])", "specified\") if not comment_id: raise ValueError(\"comment id not specified\") if", "indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\", None)),", "ValueError(\"event id not specified\") result = get_payload( type=event_type, timestamp=timestamp, customerID=customer_id,", "None)), workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\",", "raise ValueError(\"comment id not specified\") if not comment: raise ValueError(\"comment", "None) if not case_id: raise ValueError(\"case id not specified\") if", "case_id = args.get(\"case_id\", None) if not case_id: raise ValueError(\"case_id not", "= add_case_tag(caseID=case_id, tags=tag) headers = [\"key\", \"value\", \"addedTime\"] readable_output =", "destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\", None)),", "= get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output = f\"# #{case_id}: attachment metadata\\n\" readable_output", "not case_id: raise ValueError(\"case id not specified\") if not key:", "id not specified\") result = get_payload( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id", "\"\"\" MAIN FUNCTION \"\"\" def main() -> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period", "outputs=result, raw_response=result, ) def delete_case_command(args: Dict[str, Any]) -> CommandResults: case_id", "#{case_id}: Updated comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def get_attachment_command(args:", "-> Any: if isinstance(first_fetch, str): if first_fetch[0] != \"-\": first_fetch", "specified\") result = add_comment( caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\", None),", "Dict[str, Any]) -> CommandResults: event_type = args.get(\"type\", None) timestamp =", "specified\") if not timestamp: raise ValueError(\"timestamp not specified\") if not", "list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, ) from argus_api.api.events.v1 import get_event_by_path from", "query: raise ValueError(\"query not specified\") # noinspection PyTypeChecker result =", "= remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value) headers = [\"key\", \"value\", \"addedTime\", \"id\",", "case_id: raise ValueError(\"case_id not specified\") if not key: raise ValueError(\"key", "result = list_case_tags( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) headers", "= None) -> int: if isinstance(date_time, datetime): return int(date_time.timestamp() *", "raw_response=result, ) def download_attachment_command(args: Dict[str, Any]) -> Any: case_id =", "list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, ) from argus_api.api.events.v1 import get_event_by_path", "raise ValueError(\"service not specified\") if not case_type: raise ValueError(\"case_type not", "Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error( f\"Failed", "if not case_id: raise ValueError(\"case_id not specified\") if sort_by: sort_by", "try: if demisto.command() == \"test-module\": # This is the call", "args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) comment = args.get(\"comment\", None)", "outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result, ) def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults:", "= find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)),", "= fetch_observations_for_i_p(ip=ip) return CommandResults( readable_output=tableToMarkdown(f'IP observations for \"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\",", "geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\", None)),", "if len(tags) % 2 != 0: raise ValueError(\"tags list must", "specified\") result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment) return CommandResults( readable_output=pretty_print_comment( result[\"data\"],", "Find NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def list_nids_events_command(args: Dict[str,", "readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def list_case_tags_command(args: Dict[str, Any]) ->", "caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) return CommandResults( readable_output=pretty_print_events( dict(result),", "specified\") result = get_event_by_path( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return", "else f\"# #{data['id']}: {data['subject']}\\n\" string += \"_Priority: {}, status: {},", "MAIN FUNCTION \"\"\" def main() -> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period =", "return errors except Exception as e: demisto.error(traceback.format_exc()) # print the", "tag_id = args.get(\"tag_id\", None) if not case_id: raise ValueError(\"case id", "ValueError(\"case id not specified\") result = get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\", None)", "if sort_by: sort_by = [\"addedTimestamp\"] if sort_by == \"ascending\" else", "None), offset=args.get(\"offset\", None), limit=args.get(\"limit\", None), sortBy=sort_by, ) return CommandResults( readable_output=pretty_print_comments(", "if title else \"\" string += \"_Count: {}, showing {}", "\"Output not suitable for playground\", result[\"data\"] ) return CommandResults( readable_output=readable_output,", "not comment: raise ValueError(\"comment not specified\") result = edit_comment(caseID=case_id, commentID=comment_id,", "raw_response=result, ) \"\"\" MAIN FUNCTION \"\"\" def main() -> None:", "= args.get(\"comment\", None) if not case_id: raise ValueError(\"case_id not specified\")", "None)), priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\",", "get_event_by_path from argus_api.api.events.v1.case.case import get_events_for_case from argus_api.api.events.v1.aggregated import ( find_aggregated_events,", "last_run.get(\"start_time\", None) if last_run else None # noinspection PyTypeChecker result", "\"value\": lst[i + 1]}) return tags def str_to_dict(string: str) ->", "\"customer\": case[\"customer\"][\"shortName\"], }, \"rawJSON\": json.dumps(case), } ) if result[\"data\"]: last_run[\"start_time\"]", "readable_output += f\"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\" readable_output += f\"{attachment['name']} ({attachment['mimeType']},", "type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return fileResult(f\"{event_id}_pcap\", result.content) def find_nids_events_command(args:", "def list_case_comments_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "specified\") result = delete_case(caseID=case_id) return CommandResults( readable_output=pretty_print_case_metadata(result, \"Case deleted\"), outputs_prefix=\"Argus.Case\",", "mapping.get(priority, 0) def argus_status_to_demisto_status(status: str) -> int: mapping = {", "return string def pretty_print_comments(comments: list, title: str = None) ->", "isinstance(date_time, str): return pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result: dict, title:", "\"* * *\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, )", "accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\", None), ) return", "case in result[\"data\"]: incidents.append( { \"name\": f\"#{case['id']}: {case['subject']}\", \"occurred\": case[\"createdTime\"],", "get_pcap from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search import search_records", "return CommandResults( readable_output=pretty_print_events(dict(result), \"# List NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result,", "\"ok\" return ( f\"Unable to communicate with Argus API {response['responseCode']},", "demisto.command() == \"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif", "1] for i in range(0, len(lst), 2)} def date_time_to_epoch_milliseconds(date_time: Union[datetime,", "= title if title else \"\" string += \"_Count: {},", "case[\"category\"] else None, \"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"], \"customer\":", "value = args.get(\"value\", None) if not case_id: raise ValueError(\"case_id not", "\"# Find NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def list_nids_events_command(args:", "not comment_id: raise ValueError(\"comment id not specified\") if not comment:", "{}_\\n\".format( result[\"count\"], result[\"size\"], result[\"offset\"], result[\"limit\"] ) string += tableToMarkdown(\"Events\", result[\"data\"])", ") return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def remove_case_tag_by_key_value_command(args:", "day\") ) set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\", None), ) demisto.debug(f\"Command", "tableToMarkdown(\"Events\", result[\"data\"]) return string \"\"\" COMMAND FUNCTIONS \"\"\" def test_module_command()", "traceback return_error( f\"Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}\" ) \"\"\" ENTRY", ") def list_case_tags_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "\"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif demisto.command() == \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() ==", "result = get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\", None) ) return CommandResults( readable_output=pretty_print_case_metadata(result),", "readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def list_case_comments_command(args: Dict[str, Any]) ->", "if not customer_id: raise ValueError(\"customer id not specified\") if not", "None) ) readable_output = f\"# #{case_id}: Case attachments\\n\" for attachment", "Any, Dict, List, Union import logging from argus_api import session", "None), priority=args.get(\"priority\", None), category=args.get(\"category\", None), reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\",", "readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Added comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result,", "{pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"] else \"\" ) string += f\"{comment['comment']}\\n\\n\" string", "caseID=case_id, beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\", None), limit=args.get(\"limit\", None), sortBy=sort_by,", "= find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\", None)),", "pretty_print_events(result: dict, title: str = None) -> str: string =", "def create_case_command(args: Dict[str, Any]) -> CommandResults: subject = args.get(\"subject\", None)", "None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\",", "readable_output = tableToMarkdown( f\"#{case_id}: Delete tags\", result[\"data\"], headers=headers ) return", "data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) ) string += \"Reported by {} at {}\\n\\n\".format(", "datetime): return int(date_time.timestamp() * 1000) if isinstance(date_time, str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time))", "import dateparser import traceback from typing import Any, Dict, List,", "properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)),", "\"\"\" import json import urllib3 import dateparser import traceback from", "args.get(\"value\", None) if not case_id: raise ValueError(\"case_id not specified\") if", "case_id: raise ValueError(\"case_id not specified\") if not comment: raise ValueError(\"comment", "result = fetch_observations_for_domain(fqdn=fqdn) return CommandResults( readable_output=tableToMarkdown( f'Domain observations for \"{fqdn}\"',", "description: raise ValueError(\"description not specified\") if not service: raise ValueError(\"service", "specified\") result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers = [\"key\", \"value\", \"addedTime\",", "# noinspection PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp if start_timestamp else", "{}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) ) string += data[\"description\"] return string def", "elif demisto.command() == \"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif demisto.command() == \"argus-edit-comment\": return_results(edit_comment_command(demisto.args()))", "outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def list_case_comments_command(args: Dict[str, Any]) -> CommandResults:", "key: raise ValueError(\"key not specified\") if not value: raise ValueError(\"value", "Argus API {response['responseCode']}, {response}\" ) def fetch_incidents( last_run: dict, first_fetch_period:", "outputs=result, raw_response=result, ) def get_payload_command(args: Dict[str, Any]) -> CommandResults: event_type", "): argus_session.api_key = api_key argus_session.base_url = base_url argus_session.proxies = proxies", "customer_id = args.get(\"customer_id\", None) event_id = args.get(\"event_id\", None) if not", "ValueError(\"query not specified\") # noinspection PyTypeChecker result = search_records( query=query,", "attachmentID=attachment_id) readable_output = f\"# #{case_id}: attachment metadata\\n\" readable_output += f\"####", "str(case[\"id\"]), \"type\": case[\"type\"], \"category\": case[\"category\"][\"name\"] if case[\"category\"] else None, \"service\":", "not specified\") if not attachment_id: raise ValueError(\"attachment id not specified\")", "required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\", None)),", "keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None),", "# print the traceback return_error( f\"Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}\"", "range(0, len(lst), 2)} def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) ->", "# Log exceptions and return errors except Exception as e:", "string += \"_Count: {}, showing {} events, from {} to", "None), internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\", None), ) return CommandResults(", "f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"] else \"\" ) string +=", "-> int: if isinstance(date_time, datetime): return int(date_time.timestamp() * 1000) if", "outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def list_case_tags_command(args: Dict[str, Any]) -> CommandResults:", "and define the fetch incidents command to run after activated", "{result['data']['addedTime']}*\\n\" readable_output += f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\" readable_output += f\"_id:", "25), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\",", ") return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def get_event_command(args:", "Any]) -> CommandResults: case_id = args.get(\"case_id\", None) sort_by = args.get(\"sort_by\",", "None), sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\",", "FUNCTIONS \"\"\" def set_argus_settings( api_key: str, base_url: str = None,", "caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\", None)),", "advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments,", "None), includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\",", "f\"#{case_id}: Tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result,", "= args.get(\"comment\", None) if not case_id: raise ValueError(\"case id not", "None), exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\",", "None) timestamp = args.get(\"timestamp\", None) customer_id = args.get(\"customer_id\", None) event_id", "id=case_id, skipRedirect=args.get(\"skip_redirect\", None) ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result,", "result = get_events_for_case( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) return", "None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\",", "destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)),", "pretty_print_date(data[\"lastUpdatedTime\"]) ) string += \"Reported by {} at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"],", "None), comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\", None), )", "offset=args.get(\"offset\", None) ) headers = [\"key\", \"value\", \"addedTime\", \"id\"] readable_output", "not specified\") result = get_pcap( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id )", "raise ValueError(\"case id not specified\") result = get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\",", "import get_payload from argus_api.api.events.v1.pcap import get_pcap from argus_api.api.events.v1.nids import find_n_i_d_s_events,", "\"pendingClose\": 0, \"workingSoc\": 1, \"workingCustomer\": 1, \"closed\": 2, } return", "result = advanced_case_search( startTimestamp=start_timestamp if start_timestamp else first_fetch_period, endTimestamp=\"now\", limit=limit,", ") def get_pcap_command(args: Dict[str, Any]) -> Any: event_type = args.get(\"type\",", "raise ValueError(\"timestamp not specified\") if not customer_id: raise ValueError(\"customer id", "{\"low\": 1, \"medium\": 2, \"high\": 3, \"critical\": 4} return mapping.get(priority,", "tags = str(tags).split(\",\") if len(tags) % 2 != 0: raise", "title: str = None) -> str: string = title if", "Tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result,", "elif demisto.command() == \"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif demisto.command() == \"argus-delete-comment\": return_results(delete_comment_command(demisto.args()))", "if len(lst) % 2 != 0: return {} return {lst[i]:", "ValueError(\"value not specified\") tag = {\"key\": key, \"value\": value} result", "str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp() * 1000) def pretty_print_date(date_time: Union[datetime,", "customer_id: raise ValueError(\"customer id not specified\") if not event_id: raise", "== \"argus-create-case\": return_results(create_case_command(demisto.args())) elif demisto.command() == \"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif demisto.command()", "min_severity_list.append(severity) return min_severity_list def parse_first_fetch(first_fetch: Any) -> Any: if isinstance(first_fetch,", "sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None),", "id not specified\") result = update_case( id=case_id, subject=args.get(\"subject\", None), description=args.get(\"description\",", "\"workingSoc\": 1, \"workingCustomer\": 1, \"closed\": 2, } return mapping.get(status, 0)", "Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result = find_n_i_d_s_events(", "CommandResults: case_id = args.get(\"case_id\", None) if not case_id: raise ValueError(\"case", ") incidents = [] for case in result[\"data\"]: incidents.append( {", "raw_response=result, ) def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection", "CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def list_case_attachments_command(args: Dict[str, Any])", "if not fqdn: raise ValueError(\"fqdn not specified\") result = fetch_observations_for_domain(fqdn=fqdn)", "if not case_id: raise ValueError(\"case id not specified\") result =", "noinspection PyTypeChecker result = search_records( query=query, aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\", None),", "string = title if title else \"\" string += \"_Count:", "outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults:", "result = get_pcap( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return fileResult(f\"{event_id}_pcap\",", "not tag_id: raise ValueError(\"tag id not specified\") result = remove_case_tag_by_id(caseID=case_id,", "result[\"data\"] ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result, ) def", "specified\") if not value: raise ValueError(\"value not specified\") result =", "case[\"type\"], \"category\": case[\"category\"][\"name\"] if case[\"category\"] else None, \"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\":", "next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\", \"low\"),", "argus_session from argus_api.api.currentuser.v1.user import get_current_user from argus_api.api.cases.v2.case import ( add_case_tag,", "-> CommandResults: case_id = args.get(\"case_id\", None) tag_id = args.get(\"tag_id\", None)", "args.get(\"case_id\", None) tag_id = args.get(\"tag_id\", None) if not case_id: raise", "min_severity=demisto.params().get(\"min_severity\", \"low\"), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args()))", "query=query, aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\",", "None), originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\", None), ) return CommandResults( readable_output=pretty_print_comment( result[\"data\"],", "0) def argus_status_to_demisto_status(status: str) -> int: mapping = { \"pendingCustomer\":", "get_payload_command(args: Dict[str, Any]) -> CommandResults: event_type = args.get(\"type\", None) timestamp", "category=args.get(\"category\", None), type=case_type, status=args.get(\"status\", None), tags=tags, subject=subject, description=description, customerReference=args.get(\"customer_reference\", None),", "rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\", 25),", "= list_case_tags( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) headers =", "specified\") result = update_case( id=case_id, subject=args.get(\"subject\", None), description=args.get(\"description\", None), status=args.get(\"status\",", "execute {demisto.command()} command.\\nError:\\n{str(e)}\" ) \"\"\" ENTRY POINT \"\"\" if __name__", "first_fetch_period: str, limit: int = 25, min_severity: str = \"low\"", "specified\") if not value: raise ValueError(\"value not specified\") tag =", "offset=args.get(\"offset\", None) ) readable_output = f\"# #{case_id}: Case attachments\\n\" for", "argus_api.api.events.v1.case.case import get_events_for_case from argus_api.api.events.v1.aggregated import ( find_aggregated_events, list_aggregated_events, )", "-> str: response = get_current_user() if response[\"responseCode\"] == 200: return", "endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)),", "specified\") result = get_payload( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) readable_output", "None) ) headers = [\"key\", \"value\", \"addedTime\", \"id\"] readable_output =", "ValueError(\"tags list must be of even number\", tags) tags =", "raw_response=result, ) def list_case_comments_command(args: Dict[str, Any]) -> CommandResults: case_id =", "port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None),", "elif demisto.command() == \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args()))", "ValueError(\"case id not specified\") if not key: raise ValueError(\"key not", "properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None),", ") string += \"Reported by {} at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"])", "afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\", None), limit=args.get(\"limit\", None), sortBy=sort_by, ) return CommandResults(", "Any]) -> Any: event_type = args.get(\"type\", None) timestamp = args.get(\"timestamp\",", "None) -> str: if isinstance(date_time, datetime): return date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time,", "return [] tags = [] for i in range(0, len(lst),", "return_results(list_case_comments_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-key-value\":", "= args.get(\"comment_id\", None) comment = args.get(\"comment\", None) if not case_id:", ") return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def list_case_comments_command(args:", "if isinstance(date_time, str): return pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result: dict,", "raise ValueError(\"case_id not specified\") result = list_case_attachments( caseID=case_id, limit=args.get(\"limit\", None),", ") def get_attachment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "None), defaultWatchers=args.get(\"default_watchers\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result,", "# Disable insecure warnings urllib3.disable_warnings() \"\"\" CONSTANTS \"\"\" DATE_FORMAT =", "for playground\", result[\"data\"] ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result,", "outputs=result, raw_response=result, ) def list_case_tags_command(args: Dict[str, Any]) -> CommandResults: case_id", "string = title if title else f\"# #{data['id']}: {data['subject']}\\n\" string", "result = advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\",", "\"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"], \"customFields\": { \"argus_id\": str(case[\"id\"]), \"type\": case[\"type\"],", "fetch_observations_for_i_p, ) # Disable insecure warnings urllib3.disable_warnings() \"\"\" CONSTANTS \"\"\"", "offset=args.get(\"offset\", None), limit=args.get(\"limit\", None), sortBy=sort_by, ) return CommandResults( readable_output=pretty_print_comments( result[\"data\"],", "incidents = [] for case in result[\"data\"]: incidents.append( { \"name\":", "readable_output += f\"Event: {event_id}, type: {result['data']['type']}\\n\" readable_output += result[\"data\"][\"payload\"] return", "\"# List NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def search_records_command(args:", "not ip: raise ValueError(\"ip not specified\") result = fetch_observations_for_i_p(ip=ip) return", "None) comment_id = args.get(\"comment_id\", None) if not case_id: raise ValueError(\"case", "# noinspection PyTypeChecker result = find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None),", "demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\", None), ) demisto.debug(f\"Command being called is {demisto.command()}\")", "== \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == \"argus-update-case\": return_results(update_case_command(demisto.args())) elif demisto.command()", "minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None),", "is the call made when pressing the integration Test button.", "logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period = parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1 day\") ) set_argus_settings( demisto.params().get(\"api_key\"),", "= add_comment( caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\", None),", "== \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif demisto.command() == \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif demisto.command()", "pretty_print_date(date_time: Union[datetime, str] = None) -> str: if isinstance(date_time, datetime):", "= args.get(\"query\", None) if not query: raise ValueError(\"query not specified\")", "severities: if argus_priority_to_demisto_severity( min_severity.lower() ) <= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return min_severity_list", "not specified\") if not comment: raise ValueError(\"comment not specified\") result", "return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == \"argus-close-case\": return_results(close_case_command(demisto.args())) elif demisto.command() == \"argus-create-case\":", "None), internalComment=args.get(\"internal_comment\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result,", "not case_id: raise ValueError(\"case id not specified\") result = update_case(", "{demisto.command()} command.\\nError:\\n{str(e)}\" ) \"\"\" ENTRY POINT \"\"\" if __name__ in", "last updated: {}_\\n\".format( data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) ) string += \"Reported", "import logging from argus_api import session as argus_session from argus_api.api.currentuser.v1.user", "case_id: raise ValueError(\"case id not specified\") result = update_case( id=case_id,", "reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\", None),", "argus_api.api.pdns.v3.search import search_records from argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain, fetch_observations_for_i_p, )", "0, \"pendingClose\": 0, \"workingSoc\": 1, \"workingCustomer\": 1, \"closed\": 2, }", "startTimestamp=start_timestamp if start_timestamp else first_fetch_period, endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[", "exclude=args.get(\"exclude\", None), required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\", None)),", "comment[\"flags\"] else \"\" string += \"* * *\\n\" return string", "Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) tag_id =", "date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int: if isinstance(date_time, datetime):", "in severities: if argus_priority_to_demisto_severity( min_severity.lower() ) <= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return", "for comment in comments: string += pretty_print_comment(comment) return string def", "def pretty_print_date(date_time: Union[datetime, str] = None) -> str: if isinstance(date_time,", "attachment metadata\\n\" readable_output += f\"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\" readable_output +=", "len(lst) % 2 != 0: return {} return {lst[i]: lst[i", "sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) readable_output = f\"Advanced", "Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) attachment_id =", "return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result,", "demisto.command() == \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif", "None) customer_id = args.get(\"customer_id\", None) event_id = args.get(\"event_id\", None) if", "parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1 day\") ) set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\",", "str) -> int: mapping = {\"low\": 1, \"medium\": 2, \"high\":", "dict: if not string: return {} lst = argToList(string) if", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result, ) def close_case_command(args: Dict[str, Any])", "= close_case( caseID=case_id, comment=args.get(\"comment\", None), ) readable_output = f\"# #{case_id}:", "specified\") result = close_case( caseID=case_id, comment=args.get(\"comment\", None), ) readable_output =", "== \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == \"argus-close-case\": return_results(close_case_command(demisto.args())) elif demisto.command()", "return_results(update_case_command(demisto.args())) elif demisto.command() == \"argus-get-event\": return_results(get_event_command(demisto.args())) elif demisto.command() == \"argus-get-events-for-case\":", "This is the call made when pressing the integration Test", "{}, last updated: {}_\\n\".format( data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) ) string +=", "def main() -> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period = parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1", "HELPER FUNCTIONS \"\"\" def set_argus_settings( api_key: str, base_url: str =", "None), includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\", None), required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\",", "severities = [\"low\", \"medium\", \"high\", \"critical\"] min_severity_list = [] for", ") def close_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "if not ip: raise ValueError(\"ip not specified\") result = fetch_observations_for_i_p(ip=ip)", "outputs=result, raw_response=result, ) def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: #", "raise ValueError(\"attachment id not specified\") result = get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output", "return_results(find_nids_events_command(demisto.args())) elif demisto.command() == \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif demisto.command() == \"argus-pdns-search-records\":", "string += \"_Priority: {}, status: {}, last updated: {}_\\n\".format( data[\"priority\"],", "from {} to {}_\\n\".format( result[\"count\"], result[\"size\"], result[\"offset\"], result[\"limit\"] ) string", "import urllib3 import dateparser import traceback from typing import Any,", "None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), ) return CommandResults(", "== \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) # Log exceptions and return errors except", "None), techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\",", "raise ValueError(\"case id not specified\") if not tag_id: raise ValueError(\"tag", "ValueError(\"case id not specified\") if not attachment_id: raise ValueError(\"attachment id", "result = list_case_attachments( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) readable_output", "edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, )", "None), required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\",", "+= ( f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"] else \"\" )", "* \"\"\" IMPORTS \"\"\" import json import urllib3 import dateparser", "limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# List", "timestamp=timestamp, customerID=customer_id, eventID=event_id ) return fileResult(f\"{event_id}_pcap\", result.content) def find_nids_events_command(args: Dict[str,", "typing import Any, Dict, List, Union import logging from argus_api", "\"argus_id\": str(case[\"id\"]), \"type\": case[\"type\"], \"category\": case[\"category\"][\"name\"] if case[\"category\"] else None,", "comment[\"lastUpdatedTime\"] else \"\" ) string += f\"{comment['comment']}\\n\\n\" string += f\"_id:", "# Set and define the fetch incidents command to run", "\"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif demisto.command() ==", "noinspection PyTypeChecker result = find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\",", "called is {demisto.command()}\") try: if demisto.command() == \"test-module\": # This", "if not case_id: raise ValueError(\"case_id not specified\") result = close_case(", "1, \"closed\": 2, } return mapping.get(status, 0) def build_argus_priority_from_min_severity(min_severity: str)", "import ( add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment,", "fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults: ip = args.get(\"ip\", None) if", "timeFieldStrategy=[\"createdTimestamp\"], ) incidents = [] for case in result[\"data\"]: incidents.append(", "return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == \"argus-update-case\":", "str = None) -> str: data = result[\"data\"] string =", "if title else \"\" string += f\"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\"", "fetch_observations_for_domain, fetch_observations_for_i_p, ) # Disable insecure warnings urllib3.disable_warnings() \"\"\" CONSTANTS", "case_id = args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\", None) if not", "ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None),", "outputs=result, raw_response=result, ) def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: result", "service=service, category=args.get(\"category\", None), type=case_type, status=args.get(\"status\", None), tags=tags, subject=subject, description=description, customerReference=args.get(\"customer_reference\",", "\"fetch-incidents\": # Set and define the fetch incidents command to", "dict, first_fetch_period: str, limit: int = 25, min_severity: str =", "demisto.command() == \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == \"argus-close-case\": return_results(close_case_command(demisto.args())) elif", "import Any, Dict, List, Union import logging from argus_api import", "f\"# #{data['id']}: {data['subject']}\\n\" string += \"_Priority: {}, status: {}, last", "\"workingCustomer\": 1, \"closed\": 2, } return mapping.get(status, 0) def build_argus_priority_from_min_severity(min_severity:", "id not specified\") if not attachment_id: raise ValueError(\"attachment id not", "None), originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\", None), ) return CommandResults(", "== \"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif demisto.command() == \"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif demisto.command()", "return_results(get_payload_command(demisto.args())) elif demisto.command() == \"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif demisto.command() == \"argus-find-nids-events\":", "subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\", None),", "None), customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\",", "result = get_event_by_path( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return CommandResults(", "return_results(fetch_observations_for_i_p_command(demisto.args())) # Log exceptions and return errors except Exception as", "specified\") result = fetch_observations_for_domain(fqdn=fqdn) return CommandResults( readable_output=tableToMarkdown( f'Domain observations for", "demisto.command() == \"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif demisto.command() == \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif", "search_records from argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain, fetch_observations_for_i_p, ) # Disable", "if len(lst) % 2 != 0: return [] tags =", "== \"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command()", ") def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: result = list_aggregated_events(", "except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(", "sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\": True, \"status\": [\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"], )", "range(0, len(lst), 2): tags.append({\"key\": lst[i], \"value\": lst[i + 1]}) return", "None # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp if start_timestamp", "return datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result: dict, title: str = None) ->", "not specified\") result = delete_case(caseID=case_id) return CommandResults( readable_output=pretty_print_case_metadata(result, \"Case deleted\"),", "args.get(\"value\", None) if not case_id: raise ValueError(\"case id not specified\")", "elif demisto.command() == \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif demisto.command() == \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args()))", "else None, \"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"],", "= api_key argus_session.base_url = base_url argus_session.proxies = proxies argus_session.verify =", "min_severity_list def parse_first_fetch(first_fetch: Any) -> Any: if isinstance(first_fetch, str): if", "sortBy=sort_by, ) return CommandResults( readable_output=pretty_print_comments( result[\"data\"], f\"# #{case_id}: Comments\\n\" ),", ") return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def update_case_command(args:", "\"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() ==", "startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), ) return", "events, from {} to {}_\\n\".format( result[\"count\"], result[\"size\"], result[\"offset\"], result[\"limit\"] )", "elif demisto.command() == \"argus-create-case\": return_results(create_case_command(demisto.args())) elif demisto.command() == \"argus-delete-case\": return_results(delete_case_command(demisto.args()))", "raise ValueError(\"case id not specified\") if not comment_id: raise ValueError(\"comment", "( f\"Unable to communicate with Argus API {response['responseCode']}, {response}\" )", "None), status=args.get(\"status\", None), priority=args.get(\"priority\", None), category=args.get(\"category\", None), reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\",", "demisto.command() == \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif demisto.command() == \"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif", "= [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"] readable_output = tableToMarkdown( f\"#{case_id}:", "None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\",", "for \"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result, ) \"\"\" MAIN FUNCTION", "case_id: raise ValueError(\"case_id not specified\") result = list_case_attachments( caseID=case_id, limit=args.get(\"limit\",", "get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\", None) ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result,", "result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result, ) def fetch_observations_for_i_p_command(args: Dict[str, Any])", "2 != 0: raise ValueError(\"tags list must be of even", ") return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result,", "\"closed\": 2, } return mapping.get(status, 0) def build_argus_priority_from_min_severity(min_severity: str) ->", "return_results(get_event_command(demisto.args())) elif demisto.command() == \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == \"argus-find-aggregated-events\":", "ValueError(\"key not specified\") if not value: raise ValueError(\"value not specified\")", "FETCH_TAG = demisto.params().get(\"fetch_tag\") \"\"\" HELPER FUNCTIONS \"\"\" def set_argus_settings( api_key:", "def pretty_print_comment(comment: dict, title: str = None) -> str: string", "args.get(\"case_id\", None) comment = args.get(\"comment\", None) if not case_id: raise", "case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"], }, \"rawJSON\": json.dumps(case), } )", "advanced_case_search_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result =", "None), type=case_type, status=args.get(\"status\", None), tags=tags, subject=subject, description=description, customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\",", "CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Deleted comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result,", "comment=comment) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Updated comment\\n\" ),", "Dict[str, Any]) -> CommandResults: subject = args.get(\"subject\", None) description =", "readable_output += tableToMarkdown( \"Output not suitable for playground\", result[\"data\"] )", "outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result, ) def close_case_command(args: Dict[str, Any]) -> CommandResults:", "get_current_user() if response[\"responseCode\"] == 200: return \"ok\" return ( f\"Unable", "return ( f\"Unable to communicate with Argus API {response['responseCode']}, {response}\"", "elif demisto.command() == \"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif demisto.command() == \"argus-download-attachment\": return_results(download_attachment_command(demisto.args()))", "response = get_current_user() if response[\"responseCode\"] == 200: return \"ok\" return", "settings. next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\",", "\"category\": case[\"category\"][\"name\"] if case[\"category\"] else None, \"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"],", "+ 1] for i in range(0, len(lst), 2)} def date_time_to_epoch_milliseconds(date_time:", "updated: {}_\\n\".format( data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) ) string += \"Reported by", "} ) if result[\"data\"]: last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"] + 1) return", "readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def update_case_command(args: Dict[str, Any]) ->", "incidents command to run after activated via integration settings. next_run,", "ValueError(\"case_id not specified\") result = close_case( caseID=case_id, comment=args.get(\"comment\", None), )", "args.get(\"service\", None) case_type = args.get(\"type\", None) tags = args.get(\"tags\", None)", "readable_output += f\"_id: {attachment['id']}_\\n\" readable_output += \"* * *\\n\" return", "+= f\"_id: {result['data']['id']}_\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, )", "str) -> dict: if not string: return {} lst =", "Associated Events\\n\" ), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def find_aggregated_events_command(args: Dict[str,", "result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result, ) \"\"\" MAIN FUNCTION \"\"\" def", "title: str = None) -> str: data = result[\"data\"] string", "CommandResults( readable_output=pretty_print_events(dict(result), \"# Find NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, )", "query = args.get(\"query\", None) if not query: raise ValueError(\"query not", "args.get(\"comment\", None) if not case_id: raise ValueError(\"case id not specified\")", "Dict[str, Any]) -> Any: case_id = args.get(\"case_id\", None) attachment_id =", "result = find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\",", "if not case_id: raise ValueError(\"case_id not specified\") if not key:", "signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None),", "== \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif demisto.command() == \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif demisto.command()", "readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def list_case_attachments_command(args: Dict[str, Any]) ->", "def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "+= f\"_Flags: {str(comment['flags'])}_\\n\" if comment[\"flags\"] else \"\" string += \"*", "None), sortBy=sort_by, ) return CommandResults( readable_output=pretty_print_comments( result[\"data\"], f\"# #{case_id}: Comments\\n\"", "\"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif demisto.command() == \"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif demisto.command() ==", "case_id = args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) if not", "demisto from CommonServerPython import * \"\"\" IMPORTS \"\"\" import json", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def get_case_metadata_by_id_command(args: Dict[str,", "{ \"pendingCustomer\": 0, \"pendingSoc\": 0, \"pendingVendor\": 0, \"pendingClose\": 0, \"workingSoc\":", "+= f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\" readable_output += f\"_id: {result['data']['id']}_\\n\" return", "specified\") result = list_case_attachments( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) )", "None), subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\", None), required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\",", "not subject: raise ValueError(\"subject not specified\") if not description: raise", "value: raise ValueError(\"value not specified\") tag = {\"key\": key, \"value\":", "return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-list-aggregated-events\":", "list, title: str = None) -> str: string = title", "ValueError(\"case id not specified\") if not comment_id: raise ValueError(\"comment id", "None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\",", "== \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command()", "suitable for playground\", result[\"data\"] ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result,", "int = 25, min_severity: str = \"low\" ): start_timestamp =", "by {} at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) ) string += data[\"description\"]", "id not specified\") if not key: raise ValueError(\"key not specified\")", "string += f\"{comment['comment']}\\n\\n\" string += f\"_id: {comment['id']}_\\n\" string += f\"_Flags:", "limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) readable_output = f\"# #{case_id}: Case", "args.get(\"timestamp\", None) customer_id = args.get(\"customer_id\", None) event_id = args.get(\"event_id\", None)", "ValueError(\"attachment id not specified\") result = get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output =", "str): return pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result: dict, title: str", "userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\", None)),", "tableToMarkdown( f\"#{case_id}: Delete tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output,", "limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) return CommandResults( readable_output=pretty_print_events( dict(result), f\"#", "Any]) -> CommandResults: case_id = args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\",", "includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\", None))),", "return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Added comment\\n\" ), outputs_prefix=\"Argus.Comment\",", "len(lst) % 2 != 0: return [] tags = []", "string += data[\"description\"] return string def pretty_print_comment(comment: dict, title: str", "specified\") result = fetch_observations_for_i_p(ip=ip) return CommandResults( readable_output=tableToMarkdown(f'IP observations for \"{ip}\"',", "None), includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\",", "Dict[str, Any]) -> CommandResults: query = args.get(\"query\", None) if not", ") return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result, ) def close_case_command(args:", "\"\" for comment in comments: string += pretty_print_comment(comment) return string", "#{case_id}: Comments\\n\" ), outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result, ) def remove_case_tag_by_id_command(args: Dict[str,", "NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def list_nids_events_command(args: Dict[str, Any])", "-> CommandResults: case_id = args.get(\"case_id\", None) key = args.get(\"key\", None)", "def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "argus_priority_to_demisto_severity(priority: str) -> int: mapping = {\"low\": 1, \"medium\": 2,", "CommandResults: result = list_n_i_d_s_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None),", "None)), ) readable_output = f\"Advanced Case Search: {result['count']} result(s)\\n\" readable_output", "int(date_time.timestamp() * 1000) if isinstance(date_time, str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp()", "demisto.command() == \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif", "IMPORTS \"\"\" import json import urllib3 import dateparser import traceback", "len(lst), 2): tags.append({\"key\": lst[i], \"value\": lst[i + 1]}) return tags", "mapping.get(status, 0) def build_argus_priority_from_min_severity(min_severity: str) -> List[str]: severities = [\"low\",", "outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults:", "readable_output = f\"Advanced Case Search: {result['count']} result(s)\\n\" readable_output += tableToMarkdown(", "args.get(\"type\", None) tags = args.get(\"tags\", None) if not subject: raise", "0, \"pendingVendor\": 0, \"pendingClose\": 0, \"workingSoc\": 1, \"workingCustomer\": 1, \"closed\":", "result = update_case( id=case_id, subject=args.get(\"subject\", None), description=args.get(\"description\", None), status=args.get(\"status\", None),", "), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result, ) def fetch_observations_for_i_p_command(args: Dict[str, Any]) ->", "incidents def add_case_tag_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "= verify def argus_priority_to_demisto_severity(priority: str) -> int: mapping = {\"low\":", "f\"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\" readable_output += f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\"", "made when pressing the integration Test button. return_results(test_module_command()) elif demisto.command()", "specified\") if not tag_id: raise ValueError(\"tag id not specified\") result", "FUNCTIONS \"\"\" def test_module_command() -> str: response = get_current_user() if", "event_id: raise ValueError(\"event id not specified\") result = get_pcap( type=event_type,", "demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\", None), ) demisto.debug(f\"Command being called is", "elif demisto.command() == \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args()))", "raw_response=result, ) def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults: case_id =", "+= tableToMarkdown(\"Events\", result[\"data\"]) return string \"\"\" COMMAND FUNCTIONS \"\"\" def", "\"\"\" CONSTANTS \"\"\" DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT = \"%b %d,", "-> CommandResults: case_id = args.get(\"case_id\", None) if not case_id: raise", "outputs=result, raw_response=result, ) def get_pcap_command(args: Dict[str, Any]) -> Any: event_type", "3, \"critical\": 4} return mapping.get(priority, 0) def argus_status_to_demisto_status(status: str) ->", ") if result[\"data\"]: last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"] + 1) return last_run,", ") \"\"\" ENTRY POINT \"\"\" if __name__ in (\"__main__\", \"__builtin__\",", "f\"Advanced Case Search: {result['count']} result(s)\\n\" readable_output += tableToMarkdown( \"Output not", "not specified\") if tags: tags = str(tags).split(\",\") if len(tags) %", "def close_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp() * 1000) def pretty_print_date(date_time: Union[datetime, str] =", "= args.get(\"subject\", None) description = args.get(\"description\", None) service = args.get(\"service\",", "startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)),", "not lst: return [] if len(lst) % 2 != 0:", "= get_payload( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) readable_output = \"#", "( fetch_observations_for_domain, fetch_observations_for_i_p, ) # Disable insecure warnings urllib3.disable_warnings() \"\"\"", "None), assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\",", "Dict[str, Any]) -> CommandResults: result = list_n_i_d_s_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\",", "argus_status_to_demisto_status(status: str) -> int: mapping = { \"pendingCustomer\": 0, \"pendingSoc\":", "add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment,", "caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) headers = [\"key\", \"value\",", "readable_output += \"* * *\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result,", "def get_payload_command(args: Dict[str, Any]) -> CommandResults: event_type = args.get(\"type\", None)", "str = \"low\" ): start_timestamp = last_run.get(\"start_time\", None) if last_run", "destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)),", "\"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif demisto.command() == \"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif demisto.command() ==", "demisto.command() == \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif demisto.command() == \"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif", "string def pretty_print_comments(comments: list, title: str = None) -> str:", "ValueError(\"value not specified\") result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value) headers =", "None) comment = args.get(\"comment\", None) if not case_id: raise ValueError(\"case_id", "def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int: if isinstance(date_time,", "str(tags).split(\",\") if len(tags) % 2 != 0: raise ValueError(\"tags list", "{result['data']['type']}\\n\" readable_output += result[\"data\"][\"payload\"] return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result,", "None)), customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\",", "True, \"status\": [\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"], ) incidents = [] for", "args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\", None) if not case_id: raise", "return CommandResults( readable_output=pretty_print_events(dict(result), \"# List Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, )", "case_id = args.get(\"case_id\", None) comment = args.get(\"comment\", None) if not", "list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, ) from argus_api.api.events.v1 import", "pressing the integration Test button. return_results(test_module_command()) elif demisto.command() == \"fetch-incidents\":", "return CommandResults( readable_output=pretty_print_comments( result[\"data\"], f\"# #{case_id}: Comments\\n\" ), outputs_prefix=\"Argus.Comments\", outputs=result,", ") return CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result, )", "== \"argus-get-event\": return_results(get_event_command(demisto.args())) elif demisto.command() == \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif demisto.command()", "f\"_id: {attachment['id']}_\\n\" readable_output += \"* * *\\n\" return CommandResults( readable_output=readable_output,", "f\"#{case_id}: Delete tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\",", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result, ) def close_case_command(args: Dict[str,", "CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_case_command(args: Dict[str, Any])", "+= f\"Event: {event_id}, type: {result['data']['type']}\\n\" readable_output += result[\"data\"][\"payload\"] return CommandResults(", "title else \"\" for comment in comments: string += pretty_print_comment(comment)", "+= result[\"data\"][\"payload\"] return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result, ) def", "-> Any: event_type = args.get(\"type\", None) timestamp = args.get(\"timestamp\", None)", "return fileResult(f\"{event_id}_pcap\", result.content) def find_nids_events_command(args: Dict[str, Any]) -> CommandResults: #", "= args.get(\"attachment_id\", None) if not case_id: raise ValueError(\"case id not", "f\"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_\" ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result,", "{pretty_print_date(comment['addedTime'])}*\\n\" string += ( f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"] else", "None) value = args.get(\"value\", None) if not case_id: raise ValueError(\"case", "\"_Priority: {}, status: {}, last updated: {}_\\n\".format( data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"])", "None)), tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\",", "== \"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif demisto.command() == \"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif demisto.command()", "if isinstance(date_time, datetime): return int(date_time.timestamp() * 1000) if isinstance(date_time, str):", "None) ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def", "case_id: raise ValueError(\"case_id not specified\") if sort_by: sort_by = [\"addedTimestamp\"]", "ValueError(\"event type not specified\") if not timestamp: raise ValueError(\"timestamp not", "Union import logging from argus_api import session as argus_session from", "return {} lst = argToList(string) if len(lst) % 2 !=", "not specified\") result = delete_comment(caseID=case_id, commentID=comment_id) return CommandResults( readable_output=pretty_print_comment( result[\"data\"],", "== \"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif demisto.command() == \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif demisto.command()", "2 != 0: return [] tags = [] for i", "tagValue=value) headers = [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"] readable_output =", "argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return min_severity_list def parse_first_fetch(first_fetch: Any) -> Any: if", "result = list_n_i_d_s_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\",", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def get_case_metadata_by_id_command(args: Dict[str, Any])", "list_nids_events_command(args: Dict[str, Any]) -> CommandResults: result = list_n_i_d_s_events( customerID=args.get(\"customer_id\", None),", "subject: raise ValueError(\"subject not specified\") if not description: raise ValueError(\"description", "ValueError(\"comment id not specified\") result = delete_comment(caseID=case_id, commentID=comment_id) return CommandResults(", "None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\",", "25, min_severity: str = \"low\" ): start_timestamp = last_run.get(\"start_time\", None)", "args.get(\"description\", None) service = args.get(\"service\", None) case_type = args.get(\"type\", None)", "not case_id: raise ValueError(\"case id not specified\") result = get_events_for_case(", "get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, ) from", "= base_url argus_session.proxies = proxies argus_session.verify = verify def argus_priority_to_demisto_severity(priority:", "fetch incidents command to run after activated via integration settings.", "outputs=result, raw_response=result, ) def add_comment_command(args: Dict[str, Any]) -> CommandResults: case_id", "return CommandResults( readable_output=tableToMarkdown(f'IP observations for \"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result,", "elif demisto.command() == \"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif demisto.command() == \"argus-get-pcap\": return_results(get_pcap_command(demisto.args()))", "result[\"limit\"] ) string += tableToMarkdown(\"Events\", result[\"data\"]) return string \"\"\" COMMAND", "= \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT = \"%b %d, %Y, %H:%M:%S\" FETCH_TAG =", "2, \"high\": 3, \"critical\": 4} return mapping.get(priority, 0) def argus_status_to_demisto_status(status:", "raw_response=result, ) def list_nids_events_command(args: Dict[str, Any]) -> CommandResults: result =", "raw_response=result, ) def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults: fqdn =", ") return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_case_command(args:", "None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find events\\n\"),", "Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def get_payload_command(args: Dict[str, Any]) ->", "assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\", None)),", "ValueError(\"case id not specified\") result = update_case( id=case_id, subject=args.get(\"subject\", None),", "maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"),", "raw_response=result, ) def get_pcap_command(args: Dict[str, Any]) -> Any: event_type =", "import get_events_for_case from argus_api.api.events.v1.aggregated import ( find_aggregated_events, list_aggregated_events, ) from", "updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"] else \"\" ) string += f\"{comment['comment']}\\n\\n\"", "fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults: fqdn = args.get(\"fqdn\", None) if", "raw_response=result, ) def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults: # noinspection", "min_severity_list = [] for severity in severities: if argus_priority_to_demisto_severity( min_severity.lower()", "Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) comment_id =", "customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)),", "elif demisto.command() == \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args()))", "outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def search_records_command(args: Dict[str, Any]) -> CommandResults:", "if not comment_id: raise ValueError(\"comment id not specified\") result =", "{result['count']} result(s)\\n\" readable_output += tableToMarkdown( \"Output not suitable for playground\",", "Any: if isinstance(first_fetch, str): if first_fetch[0] != \"-\": first_fetch =", "case_id: raise ValueError(\"case id not specified\") if not comment_id: raise", "outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults:", "specified\") if not event_id: raise ValueError(\"event id not specified\") result", ") demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif demisto.command()", "\"\"\" def main() -> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period = parse_first_fetch( demisto.params().get(\"first_fetch\",", "destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\", None),", "offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\", None), required=args.get(\"required\", None),", "readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def add_comment_command(args: Dict[str, Any]) ->", "case[\"description\"], \"customFields\": { \"argus_id\": str(case[\"id\"]), \"type\": case[\"type\"], \"category\": case[\"category\"][\"name\"] if", "title else f\"# #{data['id']}: {data['subject']}\\n\" string += \"_Priority: {}, status:", "- {result['data']['addedTime']}*\\n\" readable_output += f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\" readable_output +=", "observations for \"{fqdn}\"', result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result, ) def", "elif demisto.command() == \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args()))", "-> CommandResults: result = list_aggregated_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\",", ") def delete_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\", None),", "outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def get_event_command(args: Dict[str, Any]) -> CommandResults:", "title else \"\" string += f\"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\" string", "ValueError(\"case_id not specified\") result = list_case_attachments( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\",", "raise ValueError(\"ip not specified\") result = fetch_observations_for_i_p(ip=ip) return CommandResults( readable_output=tableToMarkdown(f'IP", "not case_id: raise ValueError(\"case_id not specified\") result = list_case_attachments( caseID=case_id,", "not specified\") if not value: raise ValueError(\"value not specified\") result", "\"argus-get-event\": return_results(get_event_command(demisto.args())) elif demisto.command() == \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() ==", "return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Updated comment\\n\" ), outputs_prefix=\"Argus.Comment\",", "if not case_id: raise ValueError(\"case_id not specified\") if not comment:", "list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: result = list_aggregated_events( customerID=args.get(\"customer_id\", None),", "\"status\": [\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"], ) incidents = [] for case", "!= 0: raise ValueError(\"tags list must be of even number\",", "elif demisto.command() == \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif demisto.command() == \"argus-pdns-search-records\": return_results(search_records_command(demisto.args()))", "traceback from typing import Any, Dict, List, Union import logging", "raise ValueError(\"comment id not specified\") result = delete_comment(caseID=case_id, commentID=comment_id) return", "not case_type: raise ValueError(\"case_type not specified\") if tags: tags =", "argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"], \"customFields\": { \"argus_id\": str(case[\"id\"]), \"type\": case[\"type\"], \"category\":", "result = fetch_observations_for_i_p(ip=ip) return CommandResults( readable_output=tableToMarkdown(f'IP observations for \"{ip}\"', result[\"data\"]),", "def set_argus_settings( api_key: str, base_url: str = None, proxies: dict", "case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"], \"customFields\": { \"argus_id\":", "\"%b %d, %Y, %H:%M:%S\" FETCH_TAG = demisto.params().get(\"fetch_tag\") \"\"\" HELPER FUNCTIONS", "= [\"low\", \"medium\", \"high\", \"critical\"] min_severity_list = [] for severity", "int: mapping = { \"pendingCustomer\": 0, \"pendingSoc\": 0, \"pendingVendor\": 0,", "= args.get(\"description\", None) service = args.get(\"service\", None) case_type = args.get(\"type\",", "not suitable for playground\", result[\"data\"] ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\",", "demisto.params().get(\"insecure\", None), ) demisto.debug(f\"Command being called is {demisto.command()}\") try: if", "None) ) return CommandResults( readable_output=pretty_print_events( dict(result), f\"# #{case_id}: Associated Events\\n\"", "None) if not case_id: raise ValueError(\"case_id not specified\") if sort_by:", ") string += data[\"description\"] return string def pretty_print_comment(comment: dict, title:", "{result['data']['size']} bytes)\\n\\n\" readable_output += f\"_id: {result['data']['id']}_\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\",", "Any]) -> CommandResults: result = list_n_i_d_s_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None),", "specified\") if not customer_id: raise ValueError(\"customer id not specified\") if", "outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:", "case_id: raise ValueError(\"case_id not specified\") result = list_case_tags( caseID=case_id, limit=args.get(\"limit\",", "str, limit: int = 25, min_severity: str = \"low\" ):", "pretty_print_case_metadata(result: dict, title: str = None) -> str: data =", "for i in range(0, len(lst), 2): tags.append({\"key\": lst[i], \"value\": lst[i", "{attachment['size']} kb)\\n\\n\" readable_output += f\"_id: {attachment['id']}_\\n\" readable_output += \"* *", "API {response['responseCode']}, {response}\" ) def fetch_incidents( last_run: dict, first_fetch_period: str,", ") def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "-> List[str]: severities = [\"low\", \"medium\", \"high\", \"critical\"] min_severity_list =", "ValueError(\"case id not specified\") if not tag_id: raise ValueError(\"tag id", "list_n_i_d_s_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\",", "args.get(\"customer_id\", None) event_id = args.get(\"event_id\", None) if not event_type: raise", "from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search import search_records from", "\"\" string += \"_Count: {}, showing {} events, from {}", "+= \"_Count: {}, showing {} events, from {} to {}_\\n\".format(", "tagID=tag_id) headers = [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"] readable_output =", "title else \"\" string += \"_Count: {}, showing {} events,", "CommandResults: # noinspection PyTypeChecker result = find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\",", "if response[\"responseCode\"] == 200: return \"ok\" return ( f\"Unable to", "None), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\",", "-> CommandResults: # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None),", "timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\", None)),", "+= \"Reported by {} at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) ) string", "includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)),", "None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\",", "readable_output=pretty_print_comments( result[\"data\"], f\"# #{case_id}: Comments\\n\" ), outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result, )", "for \"{fqdn}\"', result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result, ) def fetch_observations_for_i_p_command(args:", "#{case_id}: Deleted comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def download_attachment_command(args:", ") set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\", None), ) demisto.debug(f\"Command being", "str] = None) -> int: if isinstance(date_time, datetime): return int(date_time.timestamp()", "search_records( query=query, aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\", None)),", "\"test-module\": # This is the call made when pressing the", "{} at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) ) string += data[\"description\"] return", "deleted\"), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_comment_command(args: Dict[str, Any]) ->", "string += f\"_id: {comment['id']}_\\n\" string += f\"_Flags: {str(comment['flags'])}_\\n\" if comment[\"flags\"]", "in range(0, len(lst), 2): tags.append({\"key\": lst[i], \"value\": lst[i + 1]})", "None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\",", "+= f\"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\" readable_output += f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']}", "\"value\", \"addedTime\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers )", "from argus_api.api.events.v1.payload import get_payload from argus_api.api.events.v1.pcap import get_pcap from argus_api.api.events.v1.nids", ") def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults: ip = args.get(\"ip\",", "else \"\" for comment in comments: string += pretty_print_comment(comment) return", "not specified\") result = close_case( caseID=case_id, comment=args.get(\"comment\", None), ) readable_output", "readable_output = f\"# #{case_id}: Case attachments\\n\" for attachment in result[\"data\"]:", "alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)),", "customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), ) return", "tags=tags, subject=subject, description=description, customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\",", "outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def update_case_command(args: Dict[str, Any]) -> CommandResults:", "not case_id: raise ValueError(\"case id not specified\") if not comment_id:", "errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback", "* 1000) if isinstance(date_time, str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp() *", "None)), status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\",", "if not comment: raise ValueError(\"comment not specified\") result = edit_comment(caseID=case_id,", "PyTypeChecker result = find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\", None)),", "None) comment = args.get(\"comment\", None) if not case_id: raise ValueError(\"case", "-> dict: if not string: return {} lst = argToList(string)", "= f\"# #{case_id}: close case\\n\" readable_output += ( f\"_Status: {result['data']['status']},", "tags.append({\"key\": lst[i], \"value\": lst[i + 1]}) return tags def str_to_dict(string:", "25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\",", "the integration Test button. return_results(test_module_command()) elif demisto.command() == \"fetch-incidents\": #", "if isinstance(date_time, str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp() * 1000) def", "main() -> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period = parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1 day\")", "description=args.get(\"description\", None), status=args.get(\"status\", None), priority=args.get(\"priority\", None), category=args.get(\"category\", None), reporter=args.get(\"reporter\", None),", "fqdn: raise ValueError(\"fqdn not specified\") result = fetch_observations_for_domain(fqdn=fqdn) return CommandResults(", "tagKey=key, tagValue=value) headers = [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"] readable_output", "f\"# #{case_id}: close case\\n\" readable_output += ( f\"_Status: {result['data']['status']}, at:", ") from argus_api.api.events.v1 import get_event_by_path from argus_api.api.events.v1.case.case import get_events_for_case from", "None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# List NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\",", "2): tags.append({\"key\": lst[i], \"value\": lst[i + 1]}) return tags def", "edit_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) comment_id", "not case_id: raise ValueError(\"case id not specified\") result = get_case_metadata_by_id(", "not case_id: raise ValueError(\"case id not specified\") if not attachment_id:", "string += tableToMarkdown(\"Events\", result[\"data\"]) return string \"\"\" COMMAND FUNCTIONS \"\"\"", "[] for case in result[\"data\"]: incidents.append( { \"name\": f\"#{case['id']}: {case['subject']}\",", "def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result", "else \"\" string += \"_Count: {}, showing {} events, from", "if result[\"data\"]: last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"] + 1) return last_run, incidents", "= f\"-{first_fetch}\" return first_fetch def build_tags_from_list(lst: list) -> List[Dict]: if", "Dict, List, Union import logging from argus_api import session as", "caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\", None),", "if case[\"category\"] else None, \"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"],", "Any]) -> CommandResults: subject = args.get(\"subject\", None) description = args.get(\"description\",", "None), offset=args.get(\"offset\", None) ) readable_output = f\"# #{case_id}: Case attachments\\n\"", "f\"{comment['comment']}\\n\\n\" string += f\"_id: {comment['id']}_\\n\" string += f\"_Flags: {str(comment['flags'])}_\\n\" if", "= build_tags_from_list(tags) result = create_case( customer=args.get(\"customer\", None), service=service, category=args.get(\"category\", None),", "ValueError(\"subject not specified\") if not description: raise ValueError(\"description not specified\")", "handle_proxy(), demisto.params().get(\"insecure\", None), ) demisto.debug(f\"Command being called is {demisto.command()}\") try:", "#{case_id}: Added comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def advanced_case_search_command(args:", "None, \"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"], },", "CommandResults( readable_output=pretty_print_events(dict(result), \"# List NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, )", "not timestamp: raise ValueError(\"timestamp not specified\") if not customer_id: raise", "pretty_print_comment(comment: dict, title: str = None) -> str: string =", "outputs=result, raw_response=result, ) def list_case_comments_command(args: Dict[str, Any]) -> CommandResults: case_id", "= fetch_observations_for_domain(fqdn=fqdn) return CommandResults( readable_output=tableToMarkdown( f'Domain observations for \"{fqdn}\"', result[\"data\"]", "-> CommandResults: result = list_n_i_d_s_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\",", "None) if not event_type: raise ValueError(\"event type not specified\") if", "fetch_observations_for_i_p(ip=ip) return CommandResults( readable_output=tableToMarkdown(f'IP observations for \"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result,", "1, \"medium\": 2, \"high\": 3, \"critical\": 4} return mapping.get(priority, 0)", "return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == \"argus-list-case-tags\":", "\"\"\" HELPER FUNCTIONS \"\"\" def set_argus_settings( api_key: str, base_url: str", "outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result, ) def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults:", "with Argus API {response['responseCode']}, {response}\" ) def fetch_incidents( last_run: dict,", "raise ValueError(\"event id not specified\") result = get_event_by_path( type=event_type, timestamp=timestamp,", "if not value: raise ValueError(\"value not specified\") result = remove_case_tag_by_key_value(caseID=case_id,", "readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result, ) def get_pcap_command(args: Dict[str, Any]) ->", "add_comment( caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\",", "raise ValueError(\"case_type not specified\") if tags: tags = str(tags).split(\",\") if", "dict(result), f\"# #{case_id}: Associated Events\\n\" ), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, )", "for i in range(0, len(lst), 2)} def date_time_to_epoch_milliseconds(date_time: Union[datetime, str]", "case_id: raise ValueError(\"case id not specified\") if not key: raise", "if title else f\"# #{data['id']}: {data['subject']}\\n\" string += \"_Priority: {},", "json import urllib3 import dateparser import traceback from typing import", "delete_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if", "return date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time, str): return pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT) def", "result[\"size\"], result[\"offset\"], result[\"limit\"] ) string += tableToMarkdown(\"Events\", result[\"data\"]) return string", "== \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command()", "PyTypeChecker result = find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\", None)),", "\"high\": 3, \"critical\": 4} return mapping.get(priority, 0) def argus_status_to_demisto_status(status: str)", "== \"argus-close-case\": return_results(close_case_command(demisto.args())) elif demisto.command() == \"argus-create-case\": return_results(create_case_command(demisto.args())) elif demisto.command()", "specified\") result = delete_comment(caseID=case_id, commentID=comment_id) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"#", "comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def advanced_case_search_command(args: Dict[str, Any])", "readable_output += f\"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\" readable_output += f\"{result['data']['name']} ({result['data']['mimeType']},", "get_event_by_path( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\",", "specified\") if tags: tags = str(tags).split(\",\") if len(tags) % 2", "case\\n\" readable_output += ( f\"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_\" ) return", "Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if not case_id:", "event_type = args.get(\"type\", None) timestamp = args.get(\"timestamp\", None) customer_id =", "\"rawJSON\": json.dumps(case), } ) if result[\"data\"]: last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"] +", "last_run, incidents def add_case_tag_command(args: Dict[str, Any]) -> CommandResults: case_id =", "# This is the call made when pressing the integration", "else [\"-addedTimestamp\"] result = list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\", None),", "None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find NIDS", "last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"] + 1) return last_run, incidents def add_case_tag_command(args:", "\"value\": value} result = add_case_tag(caseID=case_id, tags=tag) headers = [\"key\", \"value\",", "def pretty_print_events(result: dict, title: str = None) -> str: string", "get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output = f\"# #{case_id}: attachment metadata\\n\" readable_output +=", "\"\" ) string += f\"{comment['comment']}\\n\\n\" string += f\"_id: {comment['id']}_\\n\" string", "fetch_incidents( last_run: dict, first_fetch_period: str, limit: int = 25, min_severity:", "readable_output = f\"# #{case_id}: close case\\n\" readable_output += ( f\"_Status:", "\"critical\": 4} return mapping.get(priority, 0) def argus_status_to_demisto_status(status: str) -> int:", "= [] for case in result[\"data\"]: incidents.append( { \"name\": f\"#{case['id']}:", "button. return_results(test_module_command()) elif demisto.command() == \"fetch-incidents\": # Set and define", "\"flags\"] readable_output = tableToMarkdown( f\"#{case_id}: Delete tags\", result[\"data\"], headers=headers )", "), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def download_attachment_command(args: Dict[str, Any]) ->", "elif demisto.command() == \"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif demisto.command() == \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args()))", "raise ValueError(\"description not specified\") if not service: raise ValueError(\"service not", "service: raise ValueError(\"service not specified\") if not case_type: raise ValueError(\"case_type", "None)), category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\",", "raw_response=result, ) def delete_case_command(args: Dict[str, Any]) -> CommandResults: case_id =", "endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return", "add_case_tag(caseID=case_id, tags=tag) headers = [\"key\", \"value\", \"addedTime\"] readable_output = tableToMarkdown(", "pretty_print_comments(comments: list, title: str = None) -> str: string =", "None)), tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), ) return CommandResults(", "None), tags=tags, subject=subject, description=description, customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\", None),", "argus_session.base_url = base_url argus_session.proxies = proxies argus_session.verify = verify def", "if first_fetch[0] != \"-\": first_fetch = f\"-{first_fetch}\" return first_fetch def", "str(result[\"data\"][-1][\"createdTimestamp\"] + 1) return last_run, incidents def add_case_tag_command(args: Dict[str, Any])", "specified\") tag = {\"key\": key, \"value\": value} result = add_case_tag(caseID=case_id,", "locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)),", "None) if not case_id: raise ValueError(\"case_id not specified\") result =", "\"argus-close-case\": return_results(close_case_command(demisto.args())) elif demisto.command() == \"argus-create-case\": return_results(create_case_command(demisto.args())) elif demisto.command() ==", "outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result, ) def get_pcap_command(args: Dict[str, Any]) -> Any:", "tags = build_tags_from_list(tags) result = create_case( customer=args.get(\"customer\", None), service=service, category=args.get(\"category\",", "not specified\") result = fetch_observations_for_domain(fqdn=fqdn) return CommandResults( readable_output=tableToMarkdown( f'Domain observations", "not service: raise ValueError(\"service not specified\") if not case_type: raise", "None)), keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\",", "= args.get(\"case_id\", None) sort_by = args.get(\"sort_by\", None) if not case_id:", "not specified\") result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers = [\"key\", \"value\",", "asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\", None), ) return", "be of even number\", tags) tags = build_tags_from_list(tags) result =", "endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=pretty_print_events(dict(result),", "id not specified\") if not tag_id: raise ValueError(\"tag id not", "CommandResults( readable_output=tableToMarkdown( f'Domain observations for \"{fqdn}\"', result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result,", "find_nids_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result =", ") headers = [\"key\", \"value\", \"addedTime\", \"id\"] readable_output = tableToMarkdown(", "return [] if len(lst) % 2 != 0: return []", "not description: raise ValueError(\"description not specified\") if not service: raise", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def remove_case_tag_by_key_value_command(args: Dict[str, Any])", "def add_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Deleted comment\\n\" ), outputs_prefix=\"Argus.Comment\",", "from argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain, fetch_observations_for_i_p, ) # Disable insecure", "get_current_user from argus_api.api.cases.v2.case import ( add_case_tag, add_comment, advanced_case_search, close_case, create_case,", "def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\",", "elif demisto.command() == \"argus-close-case\": return_results(close_case_command(demisto.args())) elif demisto.command() == \"argus-create-case\": return_results(create_case_command(demisto.args()))", "result.content) def find_nids_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker", "type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) readable_output = \"# Event payload\\n\"", "None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\",", "= args.get(\"case_id\", None) key = args.get(\"key\", None) value = args.get(\"value\",", "customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\", None)),", "CommandResults: case_id = args.get(\"case_id\", None) tag_id = args.get(\"tag_id\", None) if", "field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)),", "not specified\") if not comment_id: raise ValueError(\"comment id not specified\")", "i in range(0, len(lst), 2): tags.append({\"key\": lst[i], \"value\": lst[i +", "return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) # Log exceptions and", "attachment_id: raise ValueError(\"attachment id not specified\") result = download_attachment(caseID=case_id, attachmentID=attachment_id)", "metadata\\n\" readable_output += f\"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\" readable_output += f\"{result['data']['name']}", "CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result, ) def get_events_for_case_command(args:", "delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value,", "raw_response=result, ) def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults: case_id =", "raw_response=result, ) def update_case_command(args: Dict[str, Any]) -> CommandResults: case_id =", "skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)),", "specified\") result = list_case_tags( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) )", "elif demisto.command() == \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == \"argus-close-case\": return_results(close_case_command(demisto.args()))", "exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)),", "= list_case_attachments( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) readable_output =", "== \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif demisto.command()", "readable_output=tableToMarkdown( f'Domain observations for \"{fqdn}\"', result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result,", "Find events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def list_aggregated_events_command(args: Dict[str, Any])", "assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\", None),", "base_url argus_session.proxies = proxies argus_session.verify = verify def argus_priority_to_demisto_severity(priority: str)", "* *\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def", "result[\"offset\"], result[\"limit\"] ) string += tableToMarkdown(\"Events\", result[\"data\"]) return string \"\"\"", "== \"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif demisto.command() == \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command()", "import traceback from typing import Any, Dict, List, Union import", "tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)),", "raw_response=result, ) def list_case_tags_command(args: Dict[str, Any]) -> CommandResults: case_id =", "return string def pretty_print_events(result: dict, title: str = None) ->", "outputs=result, raw_response=result, ) def create_case_command(args: Dict[str, Any]) -> CommandResults: subject", "readable_output=pretty_print_events(dict(result), \"# List NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def", "None), priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\",", "number\", tags) tags = build_tags_from_list(tags) result = create_case( customer=args.get(\"customer\", None),", "outputs=result, raw_response=result, ) def list_nids_events_command(args: Dict[str, Any]) -> CommandResults: result", "\"-1 day\") ) set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\", None), )", "\"medium\", \"high\", \"critical\"] min_severity_list = [] for severity in severities:", "+ 1) return last_run, incidents def add_case_tag_command(args: Dict[str, Any]) ->", "\"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"], \"customFields\": { \"argus_id\": str(case[\"id\"]),", "not specified\") result = list_case_tags( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None)", "result = list_aggregated_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\",", "Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def search_records_command(args: Dict[str, Any]) ->", "first_fetch = f\"-{first_fetch}\" return first_fetch def build_tags_from_list(lst: list) -> List[Dict]:", "CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def get_event_command(args: Dict[str, Any])", "Dict[str, Any]) -> CommandResults: fqdn = args.get(\"fqdn\", None) if not", "demisto.command() == \"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif demisto.command() == \"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif", "last_run: dict, first_fetch_period: str, limit: int = 25, min_severity: str", "== \"argus-update-case\": return_results(update_case_command(demisto.args())) elif demisto.command() == \"argus-get-event\": return_results(get_event_command(demisto.args())) elif demisto.command()", "argus_session.proxies = proxies argus_session.verify = verify def argus_priority_to_demisto_severity(priority: str) ->", "case_id: raise ValueError(\"case_id not specified\") result = close_case( caseID=case_id, comment=args.get(\"comment\",", "), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def get_attachment_command(args: Dict[str, Any]) ->", "string = title if title else \"\" for comment in", "import json import urllib3 import dateparser import traceback from typing", "elif demisto.command() == \"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif demisto.command() == \"argus-get-attachment\": return_results(get_attachment_command(demisto.args()))", "result[\"data\"], f\"# #{case_id}: Comments\\n\" ), outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result, ) def", "limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"),", "demisto.command() == \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif", "observations for \"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result, ) \"\"\" MAIN", "readable_output += f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\" readable_output += f\"_id: {result['data']['id']}_\\n\"", "elif demisto.command() == \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-get-payload\": return_results(get_payload_command(demisto.args()))", "ValueError(\"fqdn not specified\") result = fetch_observations_for_domain(fqdn=fqdn) return CommandResults( readable_output=tableToMarkdown( f'Domain", "None), user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\",", "result = delete_comment(caseID=case_id, commentID=comment_id) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}:", "\"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) # Log exceptions", "None)), service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\",", "value = args.get(\"value\", None) if not case_id: raise ValueError(\"case id", "isinstance(date_time, datetime): return int(date_time.timestamp() * 1000) if isinstance(date_time, str): return", "= args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\", None) if not case_id:", "from argus_api.api.events.v1.case.case import get_events_for_case from argus_api.api.events.v1.aggregated import ( find_aggregated_events, list_aggregated_events,", "update_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if", "description=description, customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\",", "create_case( customer=args.get(\"customer\", None), service=service, category=args.get(\"category\", None), type=case_type, status=args.get(\"status\", None), tags=tags,", "in comments: string += pretty_print_comment(comment) return string def pretty_print_events(result: dict,", "return_results(list_nids_events_command(demisto.args())) elif demisto.command() == \"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-domain\":", "get_payload( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) readable_output = \"# Event", "ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\", None),", "[] for i in range(0, len(lst), 2): tags.append({\"key\": lst[i], \"value\":", "result = search_records( query=query, aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\", None)),", "if tags: tags = str(tags).split(\",\") if len(tags) % 2 !=", "elif demisto.command() == \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == \"argus-update-case\": return_results(update_case_command(demisto.args()))", "argus_api.api.currentuser.v1.user import get_current_user from argus_api.api.cases.v2.case import ( add_case_tag, add_comment, advanced_case_search,", "not specified\") if not customer_id: raise ValueError(\"customer id not specified\")", "ValueError(\"comment not specified\") result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment) return CommandResults(", "{result['data']['closedTime']}_\" ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def", "= last_run.get(\"start_time\", None) if last_run else None # noinspection PyTypeChecker", "id not specified\") result = get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output = f\"#", ") def search_records_command(args: Dict[str, Any]) -> CommandResults: query = args.get(\"query\",", "!= 0: return [] tags = [] for i in", "None) sort_by = args.get(\"sort_by\", None) if not case_id: raise ValueError(\"case_id", "fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\", \"low\"), ) demisto.setLastRun(next_run) demisto.incidents(incidents)", "args.get(\"case_id\", None) if not case_id: raise ValueError(\"case_id not specified\") result", "event_id = args.get(\"event_id\", None) if not event_type: raise ValueError(\"event type", "if not key: raise ValueError(\"key not specified\") if not value:", "integration Test button. return_results(test_module_command()) elif demisto.command() == \"fetch-incidents\": # Set", "last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\", \"low\"), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif", "result = download_attachment(caseID=case_id, attachmentID=attachment_id) return fileResult(attachment_id, result.content) def edit_comment_command(args: Dict[str,", "{} to {}_\\n\".format( result[\"count\"], result[\"size\"], result[\"offset\"], result[\"limit\"] ) string +=", "None), accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\", None), )", "readable_output=pretty_print_case_metadata(result, \"Case deleted\"), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_comment_command(args: Dict[str,", "None), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# List NIDS", "list_n_i_d_s_events from argus_api.api.pdns.v3.search import search_records from argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain,", "return_error( f\"Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}\" ) \"\"\" ENTRY POINT", "str: string = title if title else \"\" for comment", "if not lst: return [] if len(lst) % 2 !=", "None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\",", "int: mapping = {\"low\": 1, \"medium\": 2, \"high\": 3, \"critical\":", ") demisto.debug(f\"Command being called is {demisto.command()}\") try: if demisto.command() ==", "not case_id: raise ValueError(\"case_id not specified\") result = close_case( caseID=case_id,", "\"{fqdn}\"', result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\", outputs=result, raw_response=result, ) def fetch_observations_for_i_p_command(args: Dict[str,", "if not subject: raise ValueError(\"subject not specified\") if not description:", "None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\",", "includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)),", "demisto.command() == \"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif demisto.command() == \"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif", "= args.get(\"case_id\", None) tag_id = args.get(\"tag_id\", None) if not case_id:", "category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\", None)),", "Case attachments\\n\" for attachment in result[\"data\"]: readable_output += f\"#### *{attachment['addedByUser']['userName']}", "case_id = args.get(\"case_id\", None) sort_by = args.get(\"sort_by\", None) if not", "result[\"data\"]: readable_output += f\"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\" readable_output += f\"{attachment['name']}", "argus_priority_to_demisto_severity( min_severity.lower() ) <= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return min_severity_list def parse_first_fetch(first_fetch:", "case_type = args.get(\"type\", None) tags = args.get(\"tags\", None) if not", "string def pretty_print_events(result: dict, title: str = None) -> str:", "{lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}", "f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\" readable_output += f\"_id: {result['data']['id']}_\\n\" return CommandResults(", "f\"-{first_fetch}\" return first_fetch def build_tags_from_list(lst: list) -> List[Dict]: if not", "None)), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\",", "find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\",", "None) if not query: raise ValueError(\"query not specified\") # noinspection", "== \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command()", "def build_tags_from_list(lst: list) -> List[Dict]: if not lst: return []", "and return errors except Exception as e: demisto.error(traceback.format_exc()) # print", "None)), sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\",", "being called is {demisto.command()}\") try: if demisto.command() == \"test-module\": #", "% 2 != 0: raise ValueError(\"tags list must be of", "None), offset=args.get(\"offset\", None) ) return CommandResults( readable_output=pretty_print_events( dict(result), f\"# #{case_id}:", ") return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result,", "elif demisto.command() == \"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif demisto.command() == \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args()))", "attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)),", "def argus_priority_to_demisto_severity(priority: str) -> int: mapping = {\"low\": 1, \"medium\":", "{comment['id']}_\\n\" string += f\"_Flags: {str(comment['flags'])}_\\n\" if comment[\"flags\"] else \"\" string", "return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def list_case_attachments_command(args: Dict[str,", "= args.get(\"type\", None) timestamp = args.get(\"timestamp\", None) customer_id = args.get(\"customer_id\",", "sort_by = args.get(\"sort_by\", None) if not case_id: raise ValueError(\"case_id not", "= None ): argus_session.api_key = api_key argus_session.base_url = base_url argus_session.proxies", "argus_session.verify = verify def argus_priority_to_demisto_severity(priority: str) -> int: mapping =", "None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# List Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result,", "specified\") if sort_by: sort_by = [\"addedTimestamp\"] if sort_by == \"ascending\"", "tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\",", "datetime): return date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time, str): return pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT)", "exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)),", "result = add_case_tag(caseID=case_id, tags=tag) headers = [\"key\", \"value\", \"addedTime\"] readable_output", "argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search import search_records from argus_api.api.reputation.v1.observation", "excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find events\\n\"), outputs_prefix=\"Argus.Events\",", "exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None),", "f\"# #{case_id}: Case attachments\\n\" for attachment in result[\"data\"]: readable_output +=", "Event payload\\n\" readable_output += f\"Event: {event_id}, type: {result['data']['type']}\\n\" readable_output +=", "demisto.command() == \"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif demisto.command() == \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif", "# noinspection PyTypeChecker result = search_records( query=query, aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\",", "= f\"# #{case_id}: attachment metadata\\n\" readable_output += f\"#### *{result['data']['addedByUser']['userName']} -", "result[\"data\"], f\"# #{case_id}: Added comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, )", "specified\") result = download_attachment(caseID=case_id, attachmentID=attachment_id) return fileResult(attachment_id, result.content) def edit_comment_command(args:", "* 1000) def pretty_print_date(date_time: Union[datetime, str] = None) -> str:", "outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def create_case_command(args: Dict[str, Any]) -> CommandResults:", "not value: raise ValueError(\"value not specified\") result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key,", "f\"# #{case_id}: Added comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def", "to communicate with Argus API {response['responseCode']}, {response}\" ) def fetch_incidents(", "{}, status: {}, last updated: {}_\\n\".format( data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) )", "not specified\") result = get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\", None) ) return", "= tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output,", "userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\", None),", "Deleted comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def download_attachment_command(args: Dict[str,", "outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result, ) def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults:", "data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) ) string += data[\"description\"] return string def pretty_print_comment(comment:", "not key: raise ValueError(\"key not specified\") if not value: raise", "def find_nids_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result", "demisto.command() == \"test-module\": # This is the call made when", "Search: {result['count']} result(s)\\n\" readable_output += tableToMarkdown( \"Output not suitable for", "api_key: str, base_url: str = None, proxies: dict = None,", "== \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif demisto.command() == \"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif demisto.command()", "not specified\") result = download_attachment(caseID=case_id, attachmentID=attachment_id) return fileResult(attachment_id, result.content) def", "= demisto.params().get(\"fetch_tag\") \"\"\" HELPER FUNCTIONS \"\"\" def set_argus_settings( api_key: str,", "beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\", None), limit=args.get(\"limit\", None), sortBy=sort_by, )", "severity in severities: if argus_priority_to_demisto_severity( min_severity.lower() ) <= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity)", "1]}) return tags def str_to_dict(string: str) -> dict: if not", "pretty_print_date(data[\"publishedTime\"]) ) string += data[\"description\"] return string def pretty_print_comment(comment: dict,", "readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers ) return CommandResults(", "\"* * *\\n\" return string def pretty_print_comments(comments: list, title: str", "originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result),", "def list_case_tags_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "\"high\", \"critical\"] min_severity_list = [] for severity in severities: if", "remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) key", "# noinspection PyTypeChecker result = advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None),", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result, ) def get_pcap_command(args: Dict[str,", "specified\") result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value) headers = [\"key\", \"value\",", "None)), ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\",", "result[\"count\"], result[\"size\"], result[\"offset\"], result[\"limit\"] ) string += tableToMarkdown(\"Events\", result[\"data\"]) return", "status=args.get(\"status\", None), tags=tags, subject=subject, description=description, customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\",", "outputs=result, raw_response=result, ) def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id", "demisto.command() == \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif", "\"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif demisto.command() == \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif demisto.command() ==", "{} events, from {} to {}_\\n\".format( result[\"count\"], result[\"size\"], result[\"offset\"], result[\"limit\"]", "min_severity: str = \"low\" ): start_timestamp = last_run.get(\"start_time\", None) if", "Case Search: {result['count']} result(s)\\n\" readable_output += tableToMarkdown( \"Output not suitable", "None), associatedAttachmentID=args.get(\"associated_attachment_id\", None), ) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}:", "= args.get(\"case_id\", None) if not case_id: raise ValueError(\"case id not", "*{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\" string += ( f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if", "\"Case deleted\"), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_comment_command(args: Dict[str, Any])", "records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result, ) def fetch_observations_for_domain_command(args: Dict[str, Any])", "raise ValueError(\"key not specified\") if not value: raise ValueError(\"value not", "headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def", "string def pretty_print_comment(comment: dict, title: str = None) -> str:", "args.get(\"case_id\", None) sort_by = args.get(\"sort_by\", None) if not case_id: raise", "None)), timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\",", "type not specified\") if not timestamp: raise ValueError(\"timestamp not specified\")", "raise ValueError(\"value not specified\") result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value) headers", "= str(result[\"data\"][-1][\"createdTimestamp\"] + 1) return last_run, incidents def add_case_tag_command(args: Dict[str,", "== \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif demisto.command()", "api_key argus_session.base_url = base_url argus_session.proxies = proxies argus_session.verify = verify", "ValueError(\"attachment id not specified\") result = download_attachment(caseID=case_id, attachmentID=attachment_id) return fileResult(attachment_id,", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def list_case_tags_command(args: Dict[str, Any])", "set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\", None), ) demisto.debug(f\"Command being called", "+= f\"_id: {comment['id']}_\\n\" string += f\"_Flags: {str(comment['flags'])}_\\n\" if comment[\"flags\"] else", "comment in comments: string += pretty_print_comment(comment) return string def pretty_print_events(result:", ") from argus_api.api.events.v1.payload import get_payload from argus_api.api.events.v1.pcap import get_pcap from", "event_id: raise ValueError(\"event id not specified\") result = get_event_by_path( type=event_type,", "= args.get(\"service\", None) case_type = args.get(\"type\", None) tags = args.get(\"tags\",", "== \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) # Log", "first_fetch_period, endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\": True, \"status\": [\"closed\"]},", "= [\"key\", \"value\", \"addedTime\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"],", "[\"key\", \"value\", \"addedTime\", \"id\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"],", "-> CommandResults: query = args.get(\"query\", None) if not query: raise", "readable_output += f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\" readable_output += f\"_id: {attachment['id']}_\\n\"", "not value: raise ValueError(\"value not specified\") tag = {\"key\": key,", "readable_output=pretty_print_events(dict(result), \"# Find events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def list_aggregated_events_command(args:", "args.get(\"type\", None) timestamp = args.get(\"timestamp\", None) customer_id = args.get(\"customer_id\", None)", "-> int: mapping = { \"pendingCustomer\": 0, \"pendingSoc\": 0, \"pendingVendor\":", "at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) ) string += data[\"description\"] return string", "None)), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) readable_output =", "demisto.params().get(\"fetch_tag\") \"\"\" HELPER FUNCTIONS \"\"\" def set_argus_settings( api_key: str, base_url:", "customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None),", "-> int: mapping = {\"low\": 1, \"medium\": 2, \"high\": 3,", "COMMAND FUNCTIONS \"\"\" def test_module_command() -> str: response = get_current_user()", "return_results(create_case_command(demisto.args())) elif demisto.command() == \"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif demisto.command() == \"argus-delete-comment\":", "Any]) -> Any: case_id = args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\",", "from argus_api.api.cases.v2.case import ( add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case,", "comment_id: raise ValueError(\"comment id not specified\") if not comment: raise", "Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc())", "CommandResults( readable_output=pretty_print_comments( result[\"data\"], f\"# #{case_id}: Comments\\n\" ), outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result,", "readable_output += ( f\"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_\" ) return CommandResults(", "sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None),", "\"pendingCustomer\": 0, \"pendingSoc\": 0, \"pendingVendor\": 0, \"pendingClose\": 0, \"workingSoc\": 1,", "None), ) return CommandResults( readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result,", "outputs=result, raw_response=result, ) def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults: case_id", "= list_n_i_d_s_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None),", "not query: raise ValueError(\"query not specified\") # noinspection PyTypeChecker result", "return_results(download_attachment_command(demisto.args())) elif demisto.command() == \"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif demisto.command() == \"argus-get-attachment\":", "args.get(\"sort_by\", None) if not case_id: raise ValueError(\"case_id not specified\") if", "= \"# Event payload\\n\" readable_output += f\"Event: {event_id}, type: {result['data']['type']}\\n\"", "comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def get_attachment_command(args: Dict[str, Any])", "ValueError(\"case id not specified\") result = get_events_for_case( caseID=case_id, limit=args.get(\"limit\", None),", ") return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def create_case_command(args:", "raise ValueError(\"case id not specified\") if not attachment_id: raise ValueError(\"attachment", "result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers = [\"key\", \"value\", \"addedTime\", \"id\",", "outputs=result, raw_response=result, ) def delete_comment_command(args: Dict[str, Any]) -> CommandResults: case_id", "None), ) demisto.debug(f\"Command being called is {demisto.command()}\") try: if demisto.command()", "args.get(\"comment_id\", None) comment = args.get(\"comment\", None) if not case_id: raise", "None), service=service, category=args.get(\"category\", None), type=case_type, status=args.get(\"status\", None), tags=tags, subject=subject, description=description,", "{}, showing {} events, from {} to {}_\\n\".format( result[\"count\"], result[\"size\"],", "CommandResults: case_id = args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) if", "not specified\") result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment) return CommandResults( readable_output=pretty_print_comment(", "sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)),", "ValueError(\"case_id not specified\") result = list_case_tags( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\",", "\"argus-create-case\": return_results(create_case_command(demisto.args())) elif demisto.command() == \"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif demisto.command() ==", "+= pretty_print_comment(comment) return string def pretty_print_events(result: dict, title: str =", "outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_comment_command(args: Dict[str, Any]) -> CommandResults:", "bytes)\\n\\n\" readable_output += f\"_id: {result['data']['id']}_\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result,", "not specified\") if not service: raise ValueError(\"service not specified\") if", "offset=args.get(\"offset\", None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# List Events\\n\"), outputs_prefix=\"Argus.Events\",", ") string += tableToMarkdown(\"Events\", result[\"data\"]) return string \"\"\" COMMAND FUNCTIONS", "f\"Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}\" ) \"\"\" ENTRY POINT \"\"\"", "even number\", tags) tags = build_tags_from_list(tags) result = create_case( customer=args.get(\"customer\",", "-> CommandResults: fqdn = args.get(\"fqdn\", None) if not fqdn: raise", "\"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == \"argus-close-case\": return_results(close_case_command(demisto.args())) elif demisto.command() ==", "None)), caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\",", "* *\\n\" return string def pretty_print_comments(comments: list, title: str =", "comment=args.get(\"comment\", None), ) readable_output = f\"# #{case_id}: close case\\n\" readable_output", "readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_case_command(args: Dict[str, Any]) ->", "not customer_id: raise ValueError(\"customer id not specified\") if not event_id:", "keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\", None),", "sort_by = [\"addedTimestamp\"] if sort_by == \"ascending\" else [\"-addedTimestamp\"] result", "status: {}, last updated: {}_\\n\".format( data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) ) string", "f\"_id: {result['data']['id']}_\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def", "for attachment in result[\"data\"]: readable_output += f\"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\"", "tags def str_to_dict(string: str) -> dict: if not string: return", "): start_timestamp = last_run.get(\"start_time\", None) if last_run else None #", "None) tags = args.get(\"tags\", None) if not subject: raise ValueError(\"subject", "id not specified\") result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers = [\"key\",", "None) -> str: data = result[\"data\"] string = title if", "includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\", None)),", "\"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif demisto.command() == \"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif demisto.command() ==", "raw_response=result, ) def get_payload_command(args: Dict[str, Any]) -> CommandResults: event_type =", "Test button. return_results(test_module_command()) elif demisto.command() == \"fetch-incidents\": # Set and", "Added comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def advanced_case_search_command(args: Dict[str,", "import ( find_aggregated_events, list_aggregated_events, ) from argus_api.api.events.v1.payload import get_payload from", "- {pretty_print_date(comment['addedTime'])}*\\n\" string += ( f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"]", "defaultWatchers=args.get(\"default_watchers\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, )", "outputs=result, raw_response=result, ) def download_attachment_command(args: Dict[str, Any]) -> Any: case_id", "= args.get(\"timestamp\", None) customer_id = args.get(\"customer_id\", None) event_id = args.get(\"event_id\",", "None), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\", None), required=args.get(\"required\",", "isinstance(date_time, str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp() * 1000) def pretty_print_date(date_time:", "= delete_comment(caseID=case_id, commentID=comment_id) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Deleted", "\"addedTime\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers ) return", "raw_response=result, ) def delete_comment_command(args: Dict[str, Any]) -> CommandResults: case_id =", "def list_nids_events_command(args: Dict[str, Any]) -> CommandResults: result = list_n_i_d_s_events( customerID=args.get(\"customer_id\",", "= tableToMarkdown( f\"#{case_id}: Delete tags\", result[\"data\"], headers=headers ) return CommandResults(", "case_id: raise ValueError(\"case id not specified\") result = delete_case(caseID=case_id) return", "str): if first_fetch[0] != \"-\": first_fetch = f\"-{first_fetch}\" return first_fetch", "not case_id: raise ValueError(\"case_id not specified\") if not key: raise", "def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults: ip = args.get(\"ip\", None)", "comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def download_attachment_command(args: Dict[str, Any])", "kb)\\n\\n\" readable_output += f\"_id: {attachment['id']}_\\n\" readable_output += \"* * *\\n\"", "\"\"\" def set_argus_settings( api_key: str, base_url: str = None, proxies:", "case_id = args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) comment =", "result[\"data\"]: last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"] + 1) return last_run, incidents def", "ValueError(\"case id not specified\") result = delete_case(caseID=case_id) return CommandResults( readable_output=pretty_print_case_metadata(result,", "specified\") if not comment: raise ValueError(\"comment not specified\") result =", "CommandResults: case_id = args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\", None) if", "def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", ") def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "dateparser import traceback from typing import Any, Dict, List, Union", "\"low\"), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif", "if not case_id: raise ValueError(\"case_id not specified\") result = list_case_attachments(", "subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\", None), required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\", None)),", "-> List[Dict]: if not lst: return [] if len(lst) %", "outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result, ) def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults:", "%H:%M:%S\" FETCH_TAG = demisto.params().get(\"fetch_tag\") \"\"\" HELPER FUNCTIONS \"\"\" def set_argus_settings(", "min_severity.lower() ) <= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return min_severity_list def parse_first_fetch(first_fetch: Any)", "None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\",", "pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result: dict, title: str = None)", "timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)),", "else None # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp if", "None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\",", "str: if isinstance(date_time, datetime): return date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time, str): return", "#{case_id}: attachment metadata\\n\" readable_output += f\"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\" readable_output", "Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) key =", "Dict[str, Any]) -> Any: event_type = args.get(\"type\", None) timestamp =", "tags: tags = str(tags).split(\",\") if len(tags) % 2 != 0:", "raise ValueError(\"case id not specified\") result = update_case( id=case_id, subject=args.get(\"subject\",", "type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\", None)),", "None)), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\",", "string = title if title else \"\" string += f\"####", "string: return {} lst = argToList(string) if len(lst) % 2", "\"pendingSoc\": 0, \"pendingVendor\": 0, \"pendingClose\": 0, \"workingSoc\": 1, \"workingCustomer\": 1,", "None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\",", "Union[datetime, str] = None) -> str: if isinstance(date_time, datetime): return", "CommandResults: case_id = args.get(\"case_id\", None) sort_by = args.get(\"sort_by\", None) if", "{demisto.command()}\") try: if demisto.command() == \"test-module\": # This is the", "= None) -> str: data = result[\"data\"] string = title", "not specified\") if not case_type: raise ValueError(\"case_type not specified\") if", "not specified\") result = update_case( id=case_id, subject=args.get(\"subject\", None), description=args.get(\"description\", None),", "def argus_status_to_demisto_status(status: str) -> int: mapping = { \"pendingCustomer\": 0,", ") return CommandResults( readable_output=pretty_print_events(dict(result), \"# List Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result,", "CommandResults( readable_output=tableToMarkdown(f'IP observations for \"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result, )", "timestamp=timestamp, customerID=customer_id, eventID=event_id ) return CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\",", "Disable insecure warnings urllib3.disable_warnings() \"\"\" CONSTANTS \"\"\" DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\"", "comment = args.get(\"comment\", None) if not case_id: raise ValueError(\"case_id not", "service = args.get(\"service\", None) case_type = args.get(\"type\", None) tags =", "internalComment=args.get(\"internal_comment\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, )", "specified\") result = get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output = f\"# #{case_id}: attachment", "demisto.command() == \"argus-get-event\": return_results(get_event_command(demisto.args())) elif demisto.command() == \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif", "id not specified\") result = download_attachment(caseID=case_id, attachmentID=attachment_id) return fileResult(attachment_id, result.content)", "None), indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\",", "readable_output=tableToMarkdown(f'IP observations for \"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result, ) \"\"\"", "= list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\", None), limit=args.get(\"limit\",", "not specified\") result = add_comment( caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\",", "raise ValueError(\"tag id not specified\") result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers", "add_case_tag_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) key", "build_argus_priority_from_min_severity(min_severity: str) -> List[str]: severities = [\"low\", \"medium\", \"high\", \"critical\"]", "25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\",", "download_attachment_command(args: Dict[str, Any]) -> Any: case_id = args.get(\"case_id\", None) attachment_id", "category=args.get(\"category\", None), reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\", None),", "type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]),", "{response}\" ) def fetch_incidents( last_run: dict, first_fetch_period: str, limit: int", "subCriteria=[ {\"exclude\": True, \"status\": [\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"], ) incidents =", "CommandResults: event_type = args.get(\"type\", None) timestamp = args.get(\"timestamp\", None) customer_id", "key = args.get(\"key\", None) value = args.get(\"value\", None) if not", "1) return last_run, incidents def add_case_tag_command(args: Dict[str, Any]) -> CommandResults:", "\"createdTimestamp\": case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"], }, \"rawJSON\": json.dumps(case), } ) if", "remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers = [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"] readable_output", "not case_id: raise ValueError(\"case_id not specified\") result = list_case_tags( caseID=case_id,", "key, \"value\": value} result = add_case_tag(caseID=case_id, tags=tag) headers = [\"key\",", "0: raise ValueError(\"tags list must be of even number\", tags)", "indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\", None)),", "= parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1 day\") ) set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(),", "None) key = args.get(\"key\", None) value = args.get(\"value\", None) if", "= proxies argus_session.verify = verify def argus_priority_to_demisto_severity(priority: str) -> int:", "eventID=event_id ) return CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result,", "the traceback return_error( f\"Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}\" ) \"\"\"", "delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id,", "Any]) -> CommandResults: # noinspection PyTypeChecker result = find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\",", "= title if title else \"\" for comment in comments:", "elif demisto.command() == \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif demisto.command() == \"argus-add-comment\": return_results(add_comment_command(demisto.args()))", "int(datetime.now().timestamp() * 1000) def pretty_print_date(date_time: Union[datetime, str] = None) ->", "if demisto.command() == \"test-module\": # This is the call made", "== \"ascending\" else [\"-addedTimestamp\"] result = list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\", None),", "list_case_tags( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) headers = [\"key\",", "result = close_case( caseID=case_id, comment=args.get(\"comment\", None), ) readable_output = f\"#", "aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\", None)),", "specified\") if not service: raise ValueError(\"service not specified\") if not", "def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result", "List, Union import logging from argus_api import session as argus_session", "\"\"\" DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT = \"%b %d, %Y, %H:%M:%S\"", "offset=args.get(\"offset\", None), ) return CommandResults( readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result,", "publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result,", "= {\"low\": 1, \"medium\": 2, \"high\": 3, \"critical\": 4} return", "result = delete_case(caseID=case_id) return CommandResults( readable_output=pretty_print_case_metadata(result, \"Case deleted\"), outputs_prefix=\"Argus.Case\", outputs=result,", "urllib3 import dateparser import traceback from typing import Any, Dict,", ") readable_output = \"# Event payload\\n\" readable_output += f\"Event: {event_id},", "CommandResults: result = list_aggregated_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None),", "None), limit=args.get(\"limit\", None), sortBy=sort_by, ) return CommandResults( readable_output=pretty_print_comments( result[\"data\"], f\"#", "string += ( f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"] else \"\"", "delete_case(caseID=case_id) return CommandResults( readable_output=pretty_print_case_metadata(result, \"Case deleted\"), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, )", "None), includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\",", "= { \"pendingCustomer\": 0, \"pendingSoc\": 0, \"pendingVendor\": 0, \"pendingClose\": 0,", "case_id: raise ValueError(\"case id not specified\") if not attachment_id: raise", "demisto.command() == \"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif demisto.command() == \"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif", "None) if not subject: raise ValueError(\"subject not specified\") if not", "not specified\") result = get_payload( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id )", "get_pcap( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return fileResult(f\"{event_id}_pcap\", result.content) def", "from argus_api.api.events.v1.pcap import get_pcap from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events from", ") def list_nids_events_command(args: Dict[str, Any]) -> CommandResults: result = list_n_i_d_s_events(", "id not specified\") if not comment: raise ValueError(\"comment not specified\")", "readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def remove_case_tag_by_key_value_command(args: Dict[str, Any]) ->", "f\"_id: {comment['id']}_\\n\" string += f\"_Flags: {str(comment['flags'])}_\\n\" if comment[\"flags\"] else \"\"", "Any]) -> CommandResults: query = args.get(\"query\", None) if not query:", "argToList(string) if len(lst) % 2 != 0: return {} return", ") return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def list_case_attachments_command(args:", "*\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def list_case_tags_command(args:", "raise ValueError(\"event type not specified\") if not timestamp: raise ValueError(\"timestamp", "Any: case_id = args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\", None) if", "def update_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "raise ValueError(\"event id not specified\") result = get_pcap( type=event_type, timestamp=timestamp,", "= args.get(\"case_id\", None) comment = args.get(\"comment\", None) if not case_id:", "f\"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\" string += ( f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\"", "\"low\" ): start_timestamp = last_run.get(\"start_time\", None) if last_run else None", "not case_id: raise ValueError(\"case_id not specified\") if sort_by: sort_by =", "= args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) comment = args.get(\"comment\",", "demisto.command() == \"fetch-incidents\": # Set and define the fetch incidents", "proxies: dict = None, verify: bool = None ): argus_session.api_key", "first_fetch[0] != \"-\": first_fetch = f\"-{first_fetch}\" return first_fetch def build_tags_from_list(lst:", "fileResult(attachment_id, result.content) def edit_comment_command(args: Dict[str, Any]) -> CommandResults: case_id =", "tags = args.get(\"tags\", None) if not subject: raise ValueError(\"subject not", "None)), sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\",", "raise ValueError(\"event id not specified\") result = get_payload( type=event_type, timestamp=timestamp,", "None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"#", "{event_id}, type: {result['data']['type']}\\n\" readable_output += result[\"data\"][\"payload\"] return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\",", "when pressing the integration Test button. return_results(test_module_command()) elif demisto.command() ==", "readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Deleted comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result,", "result = get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output = f\"# #{case_id}: attachment metadata\\n\"", ") def delete_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "if not attachment_id: raise ValueError(\"attachment id not specified\") result =", "elif demisto.command() == \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args()))", "includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) readable_output = f\"Advanced Case Search:", "ValueError(\"comment id not specified\") if not comment: raise ValueError(\"comment not", "= args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) if not case_id:", "= None) -> str: if isinstance(date_time, datetime): return date_time.strftime(PRETTY_DATE_FORMAT) if", "return int(datetime.now().timestamp() * 1000) def pretty_print_date(date_time: Union[datetime, str] = None)", "{ \"argus_id\": str(case[\"id\"]), \"type\": case[\"type\"], \"category\": case[\"category\"][\"name\"] if case[\"category\"] else", "args.get(\"tags\", None) if not subject: raise ValueError(\"subject not specified\") if", "return_results(delete_case_command(demisto.args())) elif demisto.command() == \"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif demisto.command() == \"argus-download-attachment\":", "as argus_session from argus_api.api.currentuser.v1.user import get_current_user from argus_api.api.cases.v2.case import (", "\"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() ==", "), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def find_aggregated_events_command(args: Dict[str, Any]) ->", "\"\"\" ENTRY POINT \"\"\" if __name__ in (\"__main__\", \"__builtin__\", \"builtins\"):", "Set and define the fetch incidents command to run after", "id not specified\") result = delete_comment(caseID=case_id, commentID=comment_id) return CommandResults( readable_output=pretty_print_comment(", "incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\", \"low\"), )", "{} return {lst[i]: lst[i + 1] for i in range(0,", "return {} return {lst[i]: lst[i + 1] for i in", "outputs=result, raw_response=result, ) def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults: ip", "None) -> str: string = title if title else \"\"", "argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain, fetch_observations_for_i_p, ) # Disable insecure warnings", "events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def list_aggregated_events_command(args: Dict[str, Any]) ->", "lst = argToList(string) if len(lst) % 2 != 0: return", "import search_records from argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain, fetch_observations_for_i_p, ) #", "first_fetch def build_tags_from_list(lst: list) -> List[Dict]: if not lst: return", "return min_severity_list def parse_first_fetch(first_fetch: Any) -> Any: if isinstance(first_fetch, str):", "def test_module_command() -> str: response = get_current_user() if response[\"responseCode\"] ==", "close case\\n\" readable_output += ( f\"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_\" )", "f\"Event: {event_id}, type: {result['data']['type']}\\n\" readable_output += result[\"data\"][\"payload\"] return CommandResults( readable_output=readable_output,", "bool = None ): argus_session.api_key = api_key argus_session.base_url = base_url", "\"id\", \"flags\"] readable_output = tableToMarkdown( f\"#{case_id}: Delete tags\", result[\"data\"], headers=headers", "Comments\\n\" ), outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result, ) def remove_case_tag_by_id_command(args: Dict[str, Any])", "CommandResults: case_id = args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) comment", "\"value\", \"addedTime\", \"id\", \"flags\"] readable_output = tableToMarkdown( f\"#{case_id}: Delete tags\",", "-> CommandResults: # noinspection PyTypeChecker result = find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None),", "\"ascending\" else [\"-addedTimestamp\"] result = list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\",", "exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) #", "demisto.command() == \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif", "return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp() * 1000) def pretty_print_date(date_time: Union[datetime, str]", "in result[\"data\"]: incidents.append( { \"name\": f\"#{case['id']}: {case['subject']}\", \"occurred\": case[\"createdTime\"], \"severity\":", "fetch_observations_for_domain(fqdn=fqdn) return CommandResults( readable_output=tableToMarkdown( f'Domain observations for \"{fqdn}\"', result[\"data\"] ),", "return first_fetch def build_tags_from_list(lst: list) -> List[Dict]: if not lst:", "\"# Event payload\\n\" readable_output += f\"Event: {event_id}, type: {result['data']['type']}\\n\" readable_output", "len(tags) % 2 != 0: raise ValueError(\"tags list must be", "raw_response=result, ) def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: result =", ") readable_output = f\"# #{case_id}: close case\\n\" readable_output += (", "def add_case_tag_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\",", "-> CommandResults: subject = args.get(\"subject\", None) description = args.get(\"description\", None)", ") return CommandResults( readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result, )", "priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\", None)),", "argus_api import session as argus_session from argus_api.api.currentuser.v1.user import get_current_user from", "return CommandResults( readable_output=tableToMarkdown( f'Domain observations for \"{fqdn}\"', result[\"data\"] ), outputs_prefix=\"Argus.ObservationsDomain\",", "#{case_id}: Case attachments\\n\" for attachment in result[\"data\"]: readable_output += f\"####", "if comment[\"flags\"] else \"\" string += \"* * *\\n\" return", "assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\", None),", "str = None, proxies: dict = None, verify: bool =", "not specified\") if sort_by: sort_by = [\"addedTimestamp\"] if sort_by ==", "{\"key\": key, \"value\": value} result = add_case_tag(caseID=case_id, tags=tag) headers =", "List[Dict]: if not lst: return [] if len(lst) % 2", "\"pendingVendor\": 0, \"pendingClose\": 0, \"workingSoc\": 1, \"workingCustomer\": 1, \"closed\": 2,", "activated via integration settings. next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period,", ") readable_output = f\"Advanced Case Search: {result['count']} result(s)\\n\" readable_output +=", "None) service = args.get(\"service\", None) case_type = args.get(\"type\", None) tags", "None) tag_id = args.get(\"tag_id\", None) if not case_id: raise ValueError(\"case", "to {}_\\n\".format( result[\"count\"], result[\"size\"], result[\"offset\"], result[\"limit\"] ) string += tableToMarkdown(\"Events\",", "demisto.command() == \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif demisto.command() == \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif", "title if title else \"\" string += \"_Count: {}, showing", "def str_to_dict(string: str) -> dict: if not string: return {}", "id not specified\") result = get_events_for_case( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\",", "string += f\"_Flags: {str(comment['flags'])}_\\n\" if comment[\"flags\"] else \"\" string +=", "None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), )", "call made when pressing the integration Test button. return_results(test_module_command()) elif", "25), min_severity=demisto.params().get(\"min_severity\", \"low\"), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == \"argus-add-case-tag\":", "close_case( caseID=case_id, comment=args.get(\"comment\", None), ) readable_output = f\"# #{case_id}: close", "limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\": True, \"status\": [\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"],", ") def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "{event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result, ) def get_events_for_case_command(args: Dict[str, Any])", "signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\", None)),", "import find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search import search_records from argus_api.api.reputation.v1.observation import", "search_records_command(args: Dict[str, Any]) -> CommandResults: query = args.get(\"query\", None) if", "if argus_priority_to_demisto_severity( min_severity.lower() ) <= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return min_severity_list def", "+= \"* * *\\n\" return string def pretty_print_comments(comments: list, title:", "set_argus_settings( api_key: str, base_url: str = None, proxies: dict =", "return_results(test_module_command()) elif demisto.command() == \"fetch-incidents\": # Set and define the", "= str(tags).split(\",\") if len(tags) % 2 != 0: raise ValueError(\"tags", "argus_api.api.events.v1.aggregated import ( find_aggregated_events, list_aggregated_events, ) from argus_api.api.events.v1.payload import get_payload", ") def download_attachment_command(args: Dict[str, Any]) -> Any: case_id = args.get(\"case_id\",", "outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def get_payload_command(args: Dict[str, Any]) -> CommandResults:", "None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) readable_output = f\"Advanced Case", "None)), rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\", 25), offset=args.get(\"offset\",", "not specified\") if not value: raise ValueError(\"value not specified\") tag", "Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result = find_aggregated_events(", "1, \"workingCustomer\": 1, \"closed\": 2, } return mapping.get(status, 0) def", "+= f\"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\" string += ( f\"_Last updated", "advanced_case_search( startTimestamp=start_timestamp if start_timestamp else first_fetch_period, endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity),", "comment_id: raise ValueError(\"comment id not specified\") result = delete_comment(caseID=case_id, commentID=comment_id)", "not specified\") result = get_attachment(caseID=case_id, attachmentID=attachment_id) readable_output = f\"# #{case_id}:", "None)), field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\", None)), timeFieldStrategy=argToList(args.get(\"time_field_strategy\", None)), timeMatchStrategy=args.get(\"time_match_strategy\", None), keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\",", "must be of even number\", tags) tags = build_tags_from_list(tags) result", "outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def get_attachment_command(args: Dict[str, Any]) -> CommandResults:", "destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\", None)),", "readable_output=pretty_print_events( dict(result), f\"# #{case_id}: Associated Events\\n\" ), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result,", "elif demisto.command() == \"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif demisto.command() == \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args()))", "def edit_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "isinstance(first_fetch, str): if first_fetch[0] != \"-\": first_fetch = f\"-{first_fetch}\" return", "result[\"data\"], f\"# #{case_id}: Deleted comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, )", "outputs=result, raw_response=result, ) def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id", "= \"low\" ): start_timestamp = last_run.get(\"start_time\", None) if last_run else", ") string += f\"{comment['comment']}\\n\\n\" string += f\"_id: {comment['id']}_\\n\" string +=", "outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_case_command(args: Dict[str, Any]) -> CommandResults:", "datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result: dict, title: str = None) -> str:", "None) description = args.get(\"description\", None) service = args.get(\"service\", None) case_type", "None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\",", "None), offset=args.get(\"offset\", None) ) headers = [\"key\", \"value\", \"addedTime\", \"id\"]", "outputs=result, raw_response=result, ) \"\"\" MAIN FUNCTION \"\"\" def main() ->", "endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\": True, \"status\": [\"closed\"]}, ],", "= create_case( customer=args.get(\"customer\", None), service=service, category=args.get(\"category\", None), type=case_type, status=args.get(\"status\", None),", "= title if title else f\"# #{data['id']}: {data['subject']}\\n\" string +=", "\"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif demisto.command() == \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif demisto.command() ==", "\"medium\": 2, \"high\": 3, \"critical\": 4} return mapping.get(priority, 0) def", "from CommonServerPython import * \"\"\" IMPORTS \"\"\" import json import", "= None, verify: bool = None ): argus_session.api_key = api_key", "tags) tags = build_tags_from_list(tags) result = create_case( customer=args.get(\"customer\", None), service=service,", "({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\" readable_output += f\"_id: {attachment['id']}_\\n\" readable_output += \"*", ") def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "200: return \"ok\" return ( f\"Unable to communicate with Argus", "noinspection PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp if start_timestamp else first_fetch_period,", "readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def get_event_command(args: Dict[str, Any]) ->", "if not event_type: raise ValueError(\"event type not specified\") if not", "result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result, ) def get_events_for_case_command(args: Dict[str, Any]) ->", "raw_response=result, ) def search_records_command(args: Dict[str, Any]) -> CommandResults: query =", "readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def get_case_metadata_by_id_command(args: Dict[str, Any]) ->", "\"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif demisto.command() == \"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif demisto.command() ==", "<= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return min_severity_list def parse_first_fetch(first_fetch: Any) -> Any:", "} return mapping.get(status, 0) def build_argus_priority_from_min_severity(min_severity: str) -> List[str]: severities", "None) event_id = args.get(\"event_id\", None) if not event_type: raise ValueError(\"event", "0, \"workingSoc\": 1, \"workingCustomer\": 1, \"closed\": 2, } return mapping.get(status,", "comment_id = args.get(\"comment_id\", None) comment = args.get(\"comment\", None) if not", ") return CommandResults( readable_output=pretty_print_comments( result[\"data\"], f\"# #{case_id}: Comments\\n\" ), outputs_prefix=\"Argus.Comments\",", "def pretty_print_case_metadata(result: dict, title: str = None) -> str: data", "raise ValueError(\"query not specified\") # noinspection PyTypeChecker result = search_records(", "the call made when pressing the integration Test button. return_results(test_module_command())", "remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) tag_id", "id not specified\") if not event_id: raise ValueError(\"event id not", "ValueError(\"case_id not specified\") if not comment: raise ValueError(\"comment not specified\")", "\"\" string += f\"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\" string += (", "CommandResults: ip = args.get(\"ip\", None) if not ip: raise ValueError(\"ip", "#{case_id}: Associated Events\\n\" ), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def find_aggregated_events_command(args:", "None)), exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\",", "\"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults(", "first_fetch_period = parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1 day\") ) set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"),", "lst[i + 1]}) return tags def str_to_dict(string: str) -> dict:", "from argus_api.api.events.v1 import get_event_by_path from argus_api.api.events.v1.case.case import get_events_for_case from argus_api.api.events.v1.aggregated", "def download_attachment_command(args: Dict[str, Any]) -> Any: case_id = args.get(\"case_id\", None)", "args.get(\"attachment_id\", None) if not case_id: raise ValueError(\"case id not specified\")", "if not case_id: raise ValueError(\"case_id not specified\") result = list_case_tags(", "-> CommandResults: case_id = args.get(\"case_id\", None) comment = args.get(\"comment\", None)", "= list_aggregated_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None),", "data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) ) string += \"Reported by {} at", "), outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result, ) def remove_case_tag_by_id_command(args: Dict[str, Any]) ->", "\"\" string += \"* * *\\n\" return string def pretty_print_comments(comments:", "from argus_api.api.pdns.v3.search import search_records from argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain, fetch_observations_for_i_p,", "None) attachment_id = args.get(\"attachment_id\", None) if not case_id: raise ValueError(\"case", "Any) -> Any: if isinstance(first_fetch, str): if first_fetch[0] != \"-\":", "demisto.command() == \"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif demisto.command() == \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif", "print the traceback return_error( f\"Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}\" )", "None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\",", "result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value) headers = [\"key\", \"value\", \"addedTime\",", "import get_pcap from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search import", "raise ValueError(\"case_id not specified\") if not comment: raise ValueError(\"comment not", "Delete tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result,", "0: return {} return {lst[i]: lst[i + 1] for i", "limit=args.get(\"limit\", None), sortBy=sort_by, ) return CommandResults( readable_output=pretty_print_comments( result[\"data\"], f\"# #{case_id}:", "else \"\" string += \"* * *\\n\" return string def", "originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\", None), ) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"#", "status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\", None)), assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\", None)),", "def fetch_incidents( last_run: dict, first_fetch_period: str, limit: int = 25,", "sort_by == \"ascending\" else [\"-addedTimestamp\"] result = list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\",", "), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def advanced_case_search_command(args: Dict[str, Any]) ->", "def pretty_print_comments(comments: list, title: str = None) -> str: string", "title if title else f\"# #{data['id']}: {data['subject']}\\n\" string += \"_Priority:", "not attachment_id: raise ValueError(\"attachment id not specified\") result = get_attachment(caseID=case_id,", "result[\"data\"][\"payload\"] return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result, ) def get_pcap_command(args:", "start_timestamp else first_fetch_period, endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\": True,", "case_id = args.get(\"case_id\", None) key = args.get(\"key\", None) value =", "edit_comment(caseID=case_id, commentID=comment_id, comment=comment) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Updated", "= get_pcap( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return fileResult(f\"{event_id}_pcap\", result.content)", "readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result, ) def get_events_for_case_command(args: Dict[str,", "== \"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif demisto.command() == \"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif demisto.command()", "+ 1]}) return tags def str_to_dict(string: str) -> dict: if", "demisto.command() == \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) #", "\"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif demisto.command() ==", "{data['subject']}\\n\" string += \"_Priority: {}, status: {}, last updated: {}_\\n\".format(", ") def get_event_command(args: Dict[str, Any]) -> CommandResults: event_type = args.get(\"type\",", ") def fetch_incidents( last_run: dict, first_fetch_period: str, limit: int =", "None), rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\", None)), customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\",", "\"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif demisto.command() == \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() ==", "\"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif demisto.command() == \"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif demisto.command() ==", "if not comment_id: raise ValueError(\"comment id not specified\") if not", "CommandResults: case_id = args.get(\"case_id\", None) comment = args.get(\"comment\", None) if", ") def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "elif demisto.command() == \"argus-get-event\": return_results(get_event_command(demisto.args())) elif demisto.command() == \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args()))", "None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\",", "= search_records( query=query, aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\", None)), rrType=argToList(args.get(\"rr_type\",", "None), customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\",", "[] for severity in severities: if argus_priority_to_demisto_severity( min_severity.lower() ) <=", "includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None),", "ValueError(\"event id not specified\") result = get_pcap( type=event_type, timestamp=timestamp, customerID=customer_id,", "if not string: return {} lst = argToList(string) if len(lst)", "= advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None),", "return_results(add_comment_command(demisto.args())) elif demisto.command() == \"argus-advanced-case-search\": return_results(advanced_case_search_command(demisto.args())) elif demisto.command() == \"argus-close-case\":", "demisto.debug(f\"Command being called is {demisto.command()}\") try: if demisto.command() == \"test-module\":", "result[\"data\"]) return string \"\"\" COMMAND FUNCTIONS \"\"\" def test_module_command() ->", "for severity in severities: if argus_priority_to_demisto_severity( min_severity.lower() ) <= argus_priority_to_demisto_severity(severity):", "mapping = {\"low\": 1, \"medium\": 2, \"high\": 3, \"critical\": 4}", "string += f\"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\" string += ( f\"_Last", "raw_response=result, ) def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id =", "return CommandResults( readable_output=pretty_print_events( dict(result), f\"# #{case_id}: Associated Events\\n\" ), outputs_prefix=\"Argus.Events\",", "if not timestamp: raise ValueError(\"timestamp not specified\") if not customer_id:", "{\"exclude\": True, \"status\": [\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"], ) incidents = []", "# noinspection PyTypeChecker result = find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None),", "keywordFieldStrategy=argToList(args.get(\"keyword_field_strategy\", None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\", None),", "Any]) -> CommandResults: # noinspection PyTypeChecker result = find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\",", "case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"], }, \"rawJSON\": json.dumps(case),", "raw_response=result, ) def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id =", "id not specified\") result = delete_case(caseID=case_id) return CommandResults( readable_output=pretty_print_case_metadata(result, \"Case", "CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Added comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result,", "return int(date_time.timestamp() * 1000) if isinstance(date_time, str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return", "start_timestamp = last_run.get(\"start_time\", None) if last_run else None # noinspection", "\"addedTime\", \"id\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers )", "case[\"customer\"][\"shortName\"], }, \"rawJSON\": json.dumps(case), } ) if result[\"data\"]: last_run[\"start_time\"] =", "raise ValueError(\"case id not specified\") result = delete_case(caseID=case_id) return CommandResults(", "verify def argus_priority_to_demisto_severity(priority: str) -> int: mapping = {\"low\": 1,", "headers = [\"key\", \"value\", \"addedTime\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\",", "customer=args.get(\"customer\", None), service=service, category=args.get(\"category\", None), type=case_type, status=args.get(\"status\", None), tags=tags, subject=subject,", ") return CommandResults( readable_output=pretty_print_events( dict(result), f\"# #{case_id}: Associated Events\\n\" ),", "def search_records_command(args: Dict[str, Any]) -> CommandResults: query = args.get(\"query\", None)", "None)), exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\",", "= args.get(\"fqdn\", None) if not fqdn: raise ValueError(\"fqdn not specified\")", "demisto.command() == \"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif demisto.command() == \"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif", "case_type: raise ValueError(\"case_type not specified\") if tags: tags = str(tags).split(\",\")", "customerID=customer_id, eventID=event_id ) return fileResult(f\"{event_id}_pcap\", result.content) def find_nids_events_command(args: Dict[str, Any])", "playground\", result[\"data\"] ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result, )", "args.get(\"event_id\", None) if not event_type: raise ValueError(\"event type not specified\")", "Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def list_nids_events_command(args: Dict[str, Any]) ->", "the fetch incidents command to run after activated via integration", "None) value = args.get(\"value\", None) if not case_id: raise ValueError(\"case_id", "list_case_attachments( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) readable_output = f\"#", "result = get_payload( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) readable_output =", "tableToMarkdown( \"Output not suitable for playground\", result[\"data\"] ) return CommandResults(", "+= ( f\"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_\" ) return CommandResults( readable_output=readable_output,", "outputs=result, raw_response=result, ) def update_case_command(args: Dict[str, Any]) -> CommandResults: case_id", "not specified\") result = fetch_observations_for_i_p(ip=ip) return CommandResults( readable_output=tableToMarkdown(f'IP observations for", "timestamp = args.get(\"timestamp\", None) customer_id = args.get(\"customer_id\", None) event_id =", "outputs=result, raw_response=result, ) def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults: fqdn", "None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None), indexEndTime=args.get(\"index_end_time\", None), destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\",", "not specified\") result = list_case_attachments( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None)", "CommandResults( readable_output=pretty_print_events(dict(result), \"# List Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def", "warnings urllib3.disable_warnings() \"\"\" CONSTANTS \"\"\" DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT =", "outputs=result, raw_response=result, ) def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults: case_id", "ip: raise ValueError(\"ip not specified\") result = fetch_observations_for_i_p(ip=ip) return CommandResults(", "internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\", None), ) return CommandResults( readable_output=pretty_print_comment(", "return_results(get_attachment_command(demisto.args())) elif demisto.command() == \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == \"argus-list-case-attachments\":", "readable_output=pretty_print_events(dict(result), \"# Find NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def", "tags = [] for i in range(0, len(lst), 2): tags.append({\"key\":", "CommonServerPython import * \"\"\" IMPORTS \"\"\" import json import urllib3", "headers = [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"] readable_output = tableToMarkdown(", "geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)),", "not specified\") result = get_events_for_case( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None)", "\"{ip}\"', result[\"data\"]), outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result, ) \"\"\" MAIN FUNCTION \"\"\"", "list_aggregated_events, ) from argus_api.api.events.v1.payload import get_payload from argus_api.api.events.v1.pcap import get_pcap", "None)), port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\",", "if not description: raise ValueError(\"description not specified\") if not service:", "+= tableToMarkdown( \"Output not suitable for playground\", result[\"data\"] ) return", "not specified\") # noinspection PyTypeChecker result = search_records( query=query, aggregateResult=args.get(\"aggregate_result\",", "originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result),", "None) if not case_id: raise ValueError(\"case_id not specified\") if not", "= fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\", \"low\"), ) demisto.setLastRun(next_run)", "\"\"\" COMMAND FUNCTIONS \"\"\" def test_module_command() -> str: response =", "string += \"Reported by {} at {}\\n\\n\".format( data[\"publishedByUser\"][\"name\"], pretty_print_date(data[\"publishedTime\"]) )", "return_results(edit_comment_command(demisto.args())) elif demisto.command() == \"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif demisto.command() == \"argus-get-case-metadata-by-id\":", "excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) readable_output = f\"Advanced Case Search: {result['count']} result(s)\\n\"", "None)), userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\",", "( find_aggregated_events, list_aggregated_events, ) from argus_api.api.events.v1.payload import get_payload from argus_api.api.events.v1.pcap", "get_events_for_case from argus_api.api.events.v1.aggregated import ( find_aggregated_events, list_aggregated_events, ) from argus_api.api.events.v1.payload", "+= f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\" readable_output += f\"_id: {attachment['id']}_\\n\" readable_output", "caseID=case_id, comment=args.get(\"comment\", None), ) readable_output = f\"# #{case_id}: close case\\n\"", "= args.get(\"type\", None) tags = args.get(\"tags\", None) if not subject:", "return CommandResults( readable_output=pretty_print_case_metadata(result, \"Case deleted\"), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def", "demisto.command() == \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif", "result[\"data\"] string = title if title else f\"# #{data['id']}: {data['subject']}\\n\"", "accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)),", "0: return [] tags = [] for i in range(0,", "return string \"\"\" COMMAND FUNCTIONS \"\"\" def test_module_command() -> str:", "return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-get-payload\":", "\"critical\"] min_severity_list = [] for severity in severities: if argus_priority_to_demisto_severity(", "{result['data']['status']}, at: {result['data']['closedTime']}_\" ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result,", "list_aggregated_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None), ip=args.get(\"ip\", None), startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\",", "PyTypeChecker result = search_records( query=query, aggregateResult=args.get(\"aggregate_result\", None), includeAnonymousResults=args.get(\"include_anonymous_results\", None), rrClass=argToList(args.get(\"rr_class\",", "None), publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\",", "attachment in result[\"data\"]: readable_output += f\"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\" readable_output", "elif demisto.command() == \"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args()))", "CommandResults: # noinspection PyTypeChecker result = find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\",", ") # Disable insecure warnings urllib3.disable_warnings() \"\"\" CONSTANTS \"\"\" DATE_FORMAT", "== \"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif demisto.command() == \"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif demisto.command()", "specified\") result = get_events_for_case( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) )", "elif demisto.command() == \"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args()))", "return_results(list_case_tags_command(demisto.args())) elif demisto.command() == \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-id\":", "raise ValueError(\"comment not specified\") result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment) return", "pretty_print_comment(comment) return string def pretty_print_events(result: dict, title: str = None)", "outputs=result, raw_response=result, ) def get_event_command(args: Dict[str, Any]) -> CommandResults: event_type", ") def create_case_command(args: Dict[str, Any]) -> CommandResults: subject = args.get(\"subject\",", ") \"\"\" MAIN FUNCTION \"\"\" def main() -> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\")", "Any: event_type = args.get(\"type\", None) timestamp = args.get(\"timestamp\", None) customer_id", "id not specified\") result = get_pcap( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id", "as e: demisto.error(traceback.format_exc()) # print the traceback return_error( f\"Failed to", "outputs=result, raw_response=result, ) def get_attachment_command(args: Dict[str, Any]) -> CommandResults: case_id", "\"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"], }, \"rawJSON\":", "mapping = { \"pendingCustomer\": 0, \"pendingSoc\": 0, \"pendingVendor\": 0, \"pendingClose\":", "-> str: string = title if title else \"\" string", "result = create_case( customer=args.get(\"customer\", None), service=service, category=args.get(\"category\", None), type=case_type, status=args.get(\"status\",", "{case['subject']}\", \"occurred\": case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"], \"customFields\":", "command.\\nError:\\n{str(e)}\" ) \"\"\" ENTRY POINT \"\"\" if __name__ in (\"__main__\",", "result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, )", "elif demisto.command() == \"argus-update-case\": return_results(update_case_command(demisto.args())) elif demisto.command() == \"argus-get-event\": return_results(get_event_command(demisto.args()))", "return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_case_command(args: Dict[str,", "get_attachment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) attachment_id", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def list_case_comments_command(args: Dict[str, Any])", "PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp if start_timestamp else first_fetch_period, endTimestamp=\"now\",", "find_aggregated_events, list_aggregated_events, ) from argus_api.api.events.v1.payload import get_payload from argus_api.api.events.v1.pcap import", "== \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command()", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result, ) def get_pcap_command(args: Dict[str, Any])", "[] tags = [] for i in range(0, len(lst), 2):", "Any]) -> CommandResults: case_id = args.get(\"case_id\", None) key = args.get(\"key\",", "None)), customerID=argToList(args.get(\"customer_id\", None)), tlp=argToList((args.get(\"tlp\", None))), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), )", "eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)),", "\"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), )", "not comment_id: raise ValueError(\"comment id not specified\") result = delete_comment(caseID=case_id,", "args.get(\"key\", None) value = args.get(\"value\", None) if not case_id: raise", "len(lst), 2)} def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int:", "None), exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\",", "create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments,", "raise ValueError(\"case_id not specified\") if sort_by: sort_by = [\"addedTimestamp\"] if", "if not value: raise ValueError(\"value not specified\") tag = {\"key\":", "f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\" readable_output += f\"_id: {attachment['id']}_\\n\" readable_output +=", "str] = None) -> str: if isinstance(date_time, datetime): return date_time.strftime(PRETTY_DATE_FORMAT)", "ValueError(\"case_id not specified\") if sort_by: sort_by = [\"addedTimestamp\"] if sort_by", "def build_argus_priority_from_min_severity(min_severity: str) -> List[str]: severities = [\"low\", \"medium\", \"high\",", "None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"#", "if not comment: raise ValueError(\"comment not specified\") result = add_comment(", "readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def create_case_command(args: Dict[str, Any]) ->", "eventID=event_id ) return fileResult(f\"{event_id}_pcap\", result.content) def find_nids_events_command(args: Dict[str, Any]) ->", "if last_run else None # noinspection PyTypeChecker result = advanced_case_search(", "get_events_for_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if", "i in range(0, len(lst), 2)} def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] =", "return CommandResults( readable_output=tableToMarkdown(f\"Event: {event_id}\", result[\"data\"]), outputs_prefix=\"Argus.Event\", outputs=result, raw_response=result, ) def", "def parse_first_fetch(first_fetch: Any) -> Any: if isinstance(first_fetch, str): if first_fetch[0]", "2)} def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int: if", "base_url: str = None, proxies: dict = None, verify: bool", "json.dumps(case), } ) if result[\"data\"]: last_run[\"start_time\"] = str(result[\"data\"][-1][\"createdTimestamp\"] + 1)", "ValueError(\"case_id not specified\") if not key: raise ValueError(\"key not specified\")", "add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id,", "None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def", "dict = None, verify: bool = None ): argus_session.api_key =", "outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def download_attachment_command(args: Dict[str, Any]) -> Any:", ") def add_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "{result['data']['id']}_\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def get_case_metadata_by_id_command(args:", "], timeFieldStrategy=[\"createdTimestamp\"], ) incidents = [] for case in result[\"data\"]:", "*{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\" readable_output += f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\" readable_output", "delete_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) comment_id", "subject=args.get(\"subject\", None), description=args.get(\"description\", None), status=args.get(\"status\", None), priority=args.get(\"priority\", None), category=args.get(\"category\", None),", "outputs_prefix=\"Argus.ObservationsIP\", outputs=result, raw_response=result, ) \"\"\" MAIN FUNCTION \"\"\" def main()", "return_results(close_case_command(demisto.args())) elif demisto.command() == \"argus-create-case\": return_results(create_case_command(demisto.args())) elif demisto.command() == \"argus-delete-case\":", "-> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period = parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1 day\") )", "offset=args.get(\"offset\", None) ) return CommandResults( readable_output=pretty_print_events( dict(result), f\"# #{case_id}: Associated", "List[str]: severities = [\"low\", \"medium\", \"high\", \"critical\"] min_severity_list = []", "def get_pcap_command(args: Dict[str, Any]) -> Any: event_type = args.get(\"type\", None)", "+= \"* * *\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result,", "[\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"], ) incidents = [] for case in", "customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\", None),", "raise ValueError(\"value not specified\") tag = {\"key\": key, \"value\": value}", "[\"-addedTimestamp\"] result = list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\",", "PyTypeChecker result = advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None),", "result = add_comment( caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\",", "outputs=result, raw_response=result, ) def search_records_command(args: Dict[str, Any]) -> CommandResults: query", "!= \"-\": first_fetch = f\"-{first_fetch}\" return first_fetch def build_tags_from_list(lst: list)", "= args.get(\"sort_by\", None) if not case_id: raise ValueError(\"case_id not specified\")", "None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\",", "== \"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif demisto.command() == \"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif demisto.command()", "raise ValueError(\"subject not specified\") if not description: raise ValueError(\"description not", "case_id = args.get(\"case_id\", None) tag_id = args.get(\"tag_id\", None) if not", "= result[\"data\"] string = title if title else f\"# #{data['id']}:", "limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]),", "= [] for i in range(0, len(lst), 2): tags.append({\"key\": lst[i],", "fqdn = args.get(\"fqdn\", None) if not fqdn: raise ValueError(\"fqdn not", "advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\",", "None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\",", "exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\", None)), lastUpdatedTimestamp=args.get(\"last_updated_timestamp\", None), indexStartTime=args.get(\"index_start_time\", None),", "= argToList(string) if len(lst) % 2 != 0: return {}", "Any]) -> CommandResults: event_type = args.get(\"type\", None) timestamp = args.get(\"timestamp\",", "ip = args.get(\"ip\", None) if not ip: raise ValueError(\"ip not", "\"details\": case[\"description\"], \"customFields\": { \"argus_id\": str(case[\"id\"]), \"type\": case[\"type\"], \"category\": case[\"category\"][\"name\"]", "str: string = title if title else \"\" string +=", "limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\", None),", "Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) comment =", "Updated comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def get_attachment_command(args: Dict[str,", "\"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif demisto.command() == \"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif demisto.command() ==", "-> Any: case_id = args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\", None)", "= [] for severity in severities: if argus_priority_to_demisto_severity( min_severity.lower() )", "update_case( id=case_id, subject=args.get(\"subject\", None), description=args.get(\"description\", None), status=args.get(\"status\", None), priority=args.get(\"priority\", None),", "None), destinationIP=argToList(args.get(\"destination_ip\", None)), sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\",", "[] if len(lst) % 2 != 0: return [] tags", ") readable_output = f\"# #{case_id}: Case attachments\\n\" for attachment in", "startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\", None), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None),", "isinstance(date_time, datetime): return date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time, str): return pretty_print_date(dateparser.parse(date_time)) return", "define the fetch incidents command to run after activated via", "None) comment_id = args.get(\"comment_id\", None) comment = args.get(\"comment\", None) if", "result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"#", "if title else \"\" for comment in comments: string +=", "None), originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\", None), ) return CommandResults(", ") <= argus_priority_to_demisto_severity(severity): min_severity_list.append(severity) return min_severity_list def parse_first_fetch(first_fetch: Any) ->", "def delete_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "ValueError(\"timestamp not specified\") if not customer_id: raise ValueError(\"customer id not", "ValueError(\"event id not specified\") result = get_event_by_path( type=event_type, timestamp=timestamp, customerID=customer_id,", "= args.get(\"ip\", None) if not ip: raise ValueError(\"ip not specified\")", "!= 0: return {} return {lst[i]: lst[i + 1] for", "data = result[\"data\"] string = title if title else f\"#", "noinspection PyTypeChecker result = advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\", None), limit=args.get(\"limit\",", "payload\\n\" readable_output += f\"Event: {event_id}, type: {result['data']['type']}\\n\" readable_output += result[\"data\"][\"payload\"]", "offset=args.get(\"offset\", None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# List NIDS Events\\n\"),", "not specified\") if not description: raise ValueError(\"description not specified\") if", "raise ValueError(\"tags list must be of even number\", tags) tags", "value} result = add_case_tag(caseID=case_id, tags=tag) headers = [\"key\", \"value\", \"addedTime\"]", "None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period = parse_first_fetch( demisto.params().get(\"first_fetch\", \"-1 day\") ) set_argus_settings(", "result(s)\\n\" readable_output += tableToMarkdown( \"Output not suitable for playground\", result[\"data\"]", "+= f\"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\" readable_output += f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']}", "if not query: raise ValueError(\"query not specified\") # noinspection PyTypeChecker", "\"argus-get-pcap\": return_results(get_pcap_command(demisto.args())) elif demisto.command() == \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif demisto.command() ==", "specified\") if not comment_id: raise ValueError(\"comment id not specified\") result", "minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"),", "incidents.append( { \"name\": f\"#{case['id']}: {case['subject']}\", \"occurred\": case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\":", "= args.get(\"value\", None) if not case_id: raise ValueError(\"case id not", "if start_timestamp else first_fetch_period, endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\":", "return {lst[i]: lst[i + 1] for i in range(0, len(lst),", "comment_id = args.get(\"comment_id\", None) if not case_id: raise ValueError(\"case id", "elif demisto.command() == \"argus-get-events-for-case\": return_results(get_events_for_case_command(demisto.args())) elif demisto.command() == \"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args()))", "None), hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\",", "Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if not", "return pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result: dict, title: str =", "includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find", ") def update_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "not event_id: raise ValueError(\"event id not specified\") result = get_pcap(", "CommandResults: fqdn = args.get(\"fqdn\", None) if not fqdn: raise ValueError(\"fqdn", "== \"argus-list-nids-events\": return_results(list_nids_events_command(demisto.args())) elif demisto.command() == \"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif demisto.command()", "if comment[\"lastUpdatedTime\"] else \"\" ) string += f\"{comment['comment']}\\n\\n\" string +=", "result[\"data\"]: incidents.append( { \"name\": f\"#{case['id']}: {case['subject']}\", \"occurred\": case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]),", "get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if", "outputs=result, raw_response=result, ) def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults: case_id", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def create_case_command(args: Dict[str,", "readable_output=readable_output, outputs_prefix=\"Argus.Cases\", outputs=result, raw_response=result, ) def close_case_command(args: Dict[str, Any]) ->", "[\"key\", \"value\", \"addedTime\", \"id\", \"flags\"] readable_output = tableToMarkdown( f\"#{case_id}: Delete", "+= \"_Priority: {}, status: {}, last updated: {}_\\n\".format( data[\"priority\"], data[\"status\"],", "get_events_for_case( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) return CommandResults( readable_output=pretty_print_events(", "case_id: raise ValueError(\"case id not specified\") result = get_case_metadata_by_id( id=case_id,", "find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result =", "= get_events_for_case( caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) return CommandResults(", "noinspection PyTypeChecker result = find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\",", "args.get(\"fqdn\", None) if not fqdn: raise ValueError(\"fqdn not specified\") result", "Union[datetime, str] = None) -> int: if isinstance(date_time, datetime): return", "== \"test-module\": # This is the call made when pressing", "build_tags_from_list(tags) result = create_case( customer=args.get(\"customer\", None), service=service, category=args.get(\"category\", None), type=case_type,", "sourceIP=argToList(args.get(\"source_ip\", None)), ip=argToList(args.get(\"ip\", None)), destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\", None)),", "specified\") if not case_type: raise ValueError(\"case_type not specified\") if tags:", "return CommandResults( readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result, ) def", "\"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) # Log exceptions and return errors except Exception", "-> str: data = result[\"data\"] string = title if title", "args.get(\"comment\", None) if not case_id: raise ValueError(\"case_id not specified\") if", "def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults: result = list_aggregated_events( customerID=args.get(\"customer_id\",", "None), eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\",", "-> CommandResults: event_type = args.get(\"type\", None) timestamp = args.get(\"timestamp\", None)", "priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\", None), publish=args.get(\"publish\", None), defaultWatchers=args.get(\"default_watchers\", None),", "subject=subject, description=description, customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\", None), accessMode=args.get(\"access_mode\", None), originEmailAddress=args.get(\"origin_email_address\", None),", "in range(0, len(lst), 2)} def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None)", "date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time, str): return pretty_print_date(dateparser.parse(date_time)) return datetime.now().strftime(PRETTY_DATE_FORMAT) def pretty_print_case_metadata(result:", "ValueError(\"case_type not specified\") if tags: tags = str(tags).split(\",\") if len(tags)", "None), assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\",", "readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Updated comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result,", "None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\",", "title if title else \"\" string += f\"#### *{comment['addedByUser']['userName']} -", "limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\", \"low\"), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() ==", "1000) def pretty_print_date(date_time: Union[datetime, str] = None) -> str: if", "comments: string += pretty_print_comment(comment) return string def pretty_print_events(result: dict, title:", "None) -> int: if isinstance(date_time, datetime): return int(date_time.timestamp() * 1000)", "ValueError(\"customer id not specified\") if not event_id: raise ValueError(\"event id", "f\"Unable to communicate with Argus API {response['responseCode']}, {response}\" ) def", "None)), explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), )", "None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\",", "2 != 0: return {} return {lst[i]: lst[i + 1]", "outputs=result, raw_response=result, ) def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults: #", "not case_id: raise ValueError(\"case id not specified\") if not tag_id:", "return_results(get_pcap_command(demisto.args())) elif demisto.command() == \"argus-find-nids-events\": return_results(find_nids_events_command(demisto.args())) elif demisto.command() == \"argus-list-nids-events\":", "raise ValueError(\"case id not specified\") if not key: raise ValueError(\"key", "if not case_type: raise ValueError(\"case_type not specified\") if tags: tags", "CommandResults: query = args.get(\"query\", None) if not query: raise ValueError(\"query", "= args.get(\"customer_id\", None) event_id = args.get(\"event_id\", None) if not event_type:", "\"# List Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def get_payload_command(args: Dict[str,", "attachments\\n\" for attachment in result[\"data\"]: readable_output += f\"#### *{attachment['addedByUser']['userName']} -", "\"\"\" def test_module_command() -> str: response = get_current_user() if response[\"responseCode\"]", "data[\"description\"] return string def pretty_print_comment(comment: dict, title: str = None)", "raw_response=result, ) def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults: case_id =", "None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\",", "DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT = \"%b %d, %Y, %H:%M:%S\" FETCH_TAG", "return last_run, incidents def add_case_tag_command(args: Dict[str, Any]) -> CommandResults: case_id", "not comment: raise ValueError(\"comment not specified\") result = add_comment( caseID=case_id,", "demisto.command() == \"argus-close-case\": return_results(close_case_command(demisto.args())) elif demisto.command() == \"argus-create-case\": return_results(create_case_command(demisto.args())) elif", "result = find_aggregated_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\",", "e: demisto.error(traceback.format_exc()) # print the traceback return_error( f\"Failed to execute", "% 2 != 0: return [] tags = [] for", "outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults:", "customerReference=args.get(\"customer_reference\", None), comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\", None),", "result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result, ) def fetch_observations_for_domain_command(args: Dict[str, Any]) ->", "eventID=event_id ) readable_output = \"# Event payload\\n\" readable_output += f\"Event:", "= f\"Advanced Case Search: {result['count']} result(s)\\n\" readable_output += tableToMarkdown( \"Output", "None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\", None)), signature=argToList(args.get(\"signature\",", "-> str: if isinstance(date_time, datetime): return date_time.strftime(PRETTY_DATE_FORMAT) if isinstance(date_time, str):", "None), afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\", None), limit=args.get(\"limit\", None), sortBy=sort_by, ) return", "= args.get(\"case_id\", None) if not case_id: raise ValueError(\"case_id not specified\")", "str, base_url: str = None, proxies: dict = None, verify:", "id=case_id, subject=args.get(\"subject\", None), description=args.get(\"description\", None), status=args.get(\"status\", None), priority=args.get(\"priority\", None), category=args.get(\"category\",", "headers = [\"key\", \"value\", \"addedTime\", \"id\"] readable_output = tableToMarkdown( f\"#{case_id}:", "argus_api.api.events.v1 import get_event_by_path from argus_api.api.events.v1.case.case import get_events_for_case from argus_api.api.events.v1.aggregated import", "args.get(\"subject\", None) description = args.get(\"description\", None) service = args.get(\"service\", None)", "run after activated via integration settings. next_run, incidents = fetch_incidents(", "\"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT = \"%b %d, %Y, %H:%M:%S\" FETCH_TAG = demisto.params().get(\"fetch_tag\")", "( f\"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_\" ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\",", "import get_event_by_path from argus_api.api.events.v1.case.case import get_events_for_case from argus_api.api.events.v1.aggregated import (", "args.get(\"query\", None) if not query: raise ValueError(\"query not specified\") #", "args.get(\"case_id\", None) comment_id = args.get(\"comment_id\", None) if not case_id: raise", "outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def add_comment_command(args: Dict[str, Any]) -> CommandResults:", "( f\"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\\n\" if comment[\"lastUpdatedTime\"] else \"\" ) string", "find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), eventIdentifier=argToList(args.get(\"event_identifier\", None)), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\",", "elif demisto.command() == \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) # Log exceptions and return", "if not tag_id: raise ValueError(\"tag id not specified\") result =", "readable_output += result[\"data\"][\"payload\"] return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result, raw_response=result, )", "PRETTY_DATE_FORMAT = \"%b %d, %Y, %H:%M:%S\" FETCH_TAG = demisto.params().get(\"fetch_tag\") \"\"\"", "as demisto from CommonServerPython import * \"\"\" IMPORTS \"\"\" import", "return tags def str_to_dict(string: str) -> dict: if not string:", "1000) if isinstance(date_time, str): return date_time_to_epoch_milliseconds(dateparser.parse(date_time)) return int(datetime.now().timestamp() * 1000)", "ValueError(\"comment not specified\") result = add_comment( caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\", None),", "dict, title: str = None) -> str: data = result[\"data\"]", "get_event_command(args: Dict[str, Any]) -> CommandResults: event_type = args.get(\"type\", None) timestamp", "Dict[str, Any]) -> CommandResults: result = list_aggregated_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\",", "Any]) -> CommandResults: fqdn = args.get(\"fqdn\", None) if not fqdn:", "ENTRY POINT \"\"\" if __name__ in (\"__main__\", \"__builtin__\", \"builtins\"): main()", "list_case_comments_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) sort_by", "= download_attachment(caseID=case_id, attachmentID=attachment_id) return fileResult(attachment_id, result.content) def edit_comment_command(args: Dict[str, Any])", "Any]) -> CommandResults: # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=args.get(\"start_timestamp\",", "download_attachment(caseID=case_id, attachmentID=attachment_id) return fileResult(attachment_id, result.content) def edit_comment_command(args: Dict[str, Any]) ->", "customerID=customer_id, eventID=event_id ) readable_output = \"# Event payload\\n\" readable_output +=", "demisto.error(traceback.format_exc()) # print the traceback return_error( f\"Failed to execute {demisto.command()}", "else \"\" ) string += f\"{comment['comment']}\\n\\n\" string += f\"_id: {comment['id']}_\\n\"", "import ( fetch_observations_for_domain, fetch_observations_for_i_p, ) # Disable insecure warnings urllib3.disable_warnings()", "find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search import search_records from argus_api.api.reputation.v1.observation import (", "CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def update_case_command(args: Dict[str, Any])", "not string: return {} lst = argToList(string) if len(lst) %", "return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, )", "{ \"name\": f\"#{case['id']}: {case['subject']}\", \"occurred\": case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]),", "\"_Count: {}, showing {} events, from {} to {}_\\n\".format( result[\"count\"],", "str: data = result[\"data\"] string = title if title else", "\"addedTime\", \"id\", \"flags\"] readable_output = tableToMarkdown( f\"#{case_id}: Delete tags\", result[\"data\"],", "insecure warnings urllib3.disable_warnings() \"\"\" CONSTANTS \"\"\" DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT", "f\"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\" readable_output += f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\"", "= {\"key\": key, \"value\": value} result = add_case_tag(caseID=case_id, tags=tag) headers", "excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find NIDS Events\\n\"),", "= delete_case(caseID=case_id) return CommandResults( readable_output=pretty_print_case_metadata(result, \"Case deleted\"), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result,", "after activated via integration settings. next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(),", "args.get(\"tag_id\", None) if not case_id: raise ValueError(\"case id not specified\")", "explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) readable_output", "limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)),", "session as argus_session from argus_api.api.currentuser.v1.user import get_current_user from argus_api.api.cases.v2.case import", "\"argus-pdns-search-records\": return_results(search_records_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() ==", "= update_case( id=case_id, subject=args.get(\"subject\", None), description=args.get(\"description\", None), status=args.get(\"status\", None), priority=args.get(\"priority\",", "update_case, ) from argus_api.api.events.v1 import get_event_by_path from argus_api.api.events.v1.case.case import get_events_for_case", "None)), keywordMatchStrategy=args.get(\"keyword_match_strategy\", None), user=argToList(args.get(\"user\", None)), userFieldStrategy=argToList(args.get(\"user_field_strategy\", None)), userAssigned=args.get(\"user_assigned\", None), techAssigned=args.get(\"tech_assigned\",", "type: {result['data']['type']}\\n\" readable_output += result[\"data\"][\"payload\"] return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Payload\", outputs=result,", "readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result, ) def fetch_observations_for_domain_command(args: Dict[str,", "argus_api.api.events.v1.pcap import get_pcap from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search", "CommandResults: subject = args.get(\"subject\", None) description = args.get(\"description\", None) service", "= [\"key\", \"value\", \"addedTime\", \"id\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\",", "raw_response=result, ) def add_comment_command(args: Dict[str, Any]) -> CommandResults: case_id =", "build_tags_from_list(lst: list) -> List[Dict]: if not lst: return [] if", "#{case_id}: close case\\n\" readable_output += ( f\"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_\"", "remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value) headers = [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"]", "ValueError(\"description not specified\") if not service: raise ValueError(\"service not specified\")", "= advanced_case_search( startTimestamp=start_timestamp if start_timestamp else first_fetch_period, endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"],", "#{data['id']}: {data['subject']}\\n\" string += \"_Priority: {}, status: {}, last updated:", "f\"# #{case_id}: Deleted comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def", "argus_session.api_key = api_key argus_session.base_url = base_url argus_session.proxies = proxies argus_session.verify", "subject = args.get(\"subject\", None) description = args.get(\"description\", None) service =", "id not specified\") result = get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\", None) )", "type=case_type, status=args.get(\"status\", None), tags=tags, subject=subject, description=description, customerReference=args.get(\"customer_reference\", None), priority=args.get(\"priority\", None),", "specified\") if not key: raise ValueError(\"key not specified\") if not", "= get_current_user() if response[\"responseCode\"] == 200: return \"ok\" return (", "elif demisto.command() == \"fetch-incidents\": # Set and define the fetch", "*{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\" readable_output += f\"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\\n\\n\" readable_output", "List Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def get_payload_command(args: Dict[str, Any])", "specified\") result = get_pcap( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return", "timestamp: raise ValueError(\"timestamp not specified\") if not customer_id: raise ValueError(\"customer", "commentID=comment_id) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Deleted comment\\n\" ),", "comment: raise ValueError(\"comment not specified\") result = add_comment( caseID=case_id, comment=comment,", "result.content) def edit_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "None)), destinationPort=argToList(args.get(\"destination_port\", None)), sourcePort=argToList(args.get(\"source_port\", None)), port=argToList(args.get(\"port\", None)), minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\",", ") def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults: fqdn = args.get(\"fqdn\",", "[\"key\", \"value\", \"addedTime\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers", "remove_case_tag_by_key_value, update_case, ) from argus_api.api.events.v1 import get_event_by_path from argus_api.api.events.v1.case.case import", "4} return mapping.get(priority, 0) def argus_status_to_demisto_status(status: str) -> int: mapping", ") return CommandResults( readable_output=pretty_print_events(dict(result), \"# List NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result,", "list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\", None), limit=args.get(\"limit\", None),", "return_results(search_records_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-domain\": return_results(fetch_observations_for_domain_command(demisto.args())) elif demisto.command() == \"argus-fetch-observations-for-ip\":", "{str(comment['flags'])}_\\n\" if comment[\"flags\"] else \"\" string += \"* * *\\n\"", "== 200: return \"ok\" return ( f\"Unable to communicate with", "at: {result['data']['closedTime']}_\" ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, )", "def delete_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "case_id: raise ValueError(\"case id not specified\") result = get_events_for_case( caseID=case_id,", "= get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\", None) ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\",", "not specified\") result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value) headers = [\"key\",", "str: response = get_current_user() if response[\"responseCode\"] == 200: return \"ok\"", "return fileResult(attachment_id, result.content) def edit_comment_command(args: Dict[str, Any]) -> CommandResults: case_id", "sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) return CommandResults( readable_output=pretty_print_events(dict(result),", ") def list_case_comments_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\",", "raise ValueError(\"case id not specified\") result = get_events_for_case( caseID=case_id, limit=args.get(\"limit\",", "None))), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=tableToMarkdown(\"PDNS records\",", "priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\": True, \"status\": [\"closed\"]}, ], timeFieldStrategy=[\"createdTimestamp\"], ) incidents", "not specified\") if not tag_id: raise ValueError(\"tag id not specified\")", "limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) headers = [\"key\", \"value\", \"addedTime\",", "None), description=args.get(\"description\", None), status=args.get(\"status\", None), priority=args.get(\"priority\", None), category=args.get(\"category\", None), reporter=args.get(\"reporter\",", "None), offset=args.get(\"offset\", None), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# List Events\\n\"),", "\"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\": case[\"createdTimestamp\"], \"customer\": case[\"customer\"][\"shortName\"], }, \"rawJSON\": json.dumps(case), }", "offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None),", "None)), ) return CommandResults( readable_output=pretty_print_events(dict(result), \"# Find events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result,", "commentID=comment_id, comment=comment) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Updated comment\\n\"", "raise ValueError(\"case_id not specified\") if not key: raise ValueError(\"key not", "comment=comment, asReplyTo=args.get(\"as_reply_to\", None), internal=args.get(\"internal\", None), originEmailAddress=args.get(\"origin_email_address\", None), associatedAttachmentID=args.get(\"associated_attachment_id\", None), )", "in result[\"data\"]: readable_output += f\"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\\n\" readable_output +=", "None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\",", "f\"# #{case_id}: Associated Events\\n\" ), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def", "%d, %Y, %H:%M:%S\" FETCH_TAG = demisto.params().get(\"fetch_tag\") \"\"\" HELPER FUNCTIONS \"\"\"", "description = args.get(\"description\", None) service = args.get(\"service\", None) case_type =", "= args.get(\"tags\", None) if not subject: raise ValueError(\"subject not specified\")", "of even number\", tags) tags = build_tags_from_list(tags) result = create_case(", "add_comment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) comment", "= None, proxies: dict = None, verify: bool = None", "= args.get(\"comment_id\", None) if not case_id: raise ValueError(\"case id not", "response[\"responseCode\"] == 200: return \"ok\" return ( f\"Unable to communicate", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def update_case_command(args: Dict[str,", "not fqdn: raise ValueError(\"fqdn not specified\") result = fetch_observations_for_domain(fqdn=fqdn) return", "timestamp=timestamp, customerID=customer_id, eventID=event_id ) readable_output = \"# Event payload\\n\" readable_output", "demisto.incidents(incidents) elif demisto.command() == \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif demisto.command() == \"argus-add-comment\":", "\"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == \"argus-update-case\": return_results(update_case_command(demisto.args())) elif demisto.command() ==", "via integration settings. next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\",", "\"type\": case[\"type\"], \"category\": case[\"category\"][\"name\"] if case[\"category\"] else None, \"service\": case[\"service\"][\"name\"],", "associatedAttachmentID=args.get(\"associated_attachment_id\", None), ) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Added", "return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def get_event_command(args: Dict[str,", "hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\", None), ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result,", "from typing import Any, Dict, List, Union import logging from", "None), minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\",", "tag = {\"key\": key, \"value\": value} result = add_case_tag(caseID=case_id, tags=tag)", "Events\\n\" ), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def find_aggregated_events_command(args: Dict[str, Any])", "raw_response=result, ) def create_case_command(args: Dict[str, Any]) -> CommandResults: subject =", "from argus_api.api.events.v1.aggregated import ( find_aggregated_events, list_aggregated_events, ) from argus_api.api.events.v1.payload import", "case_id: raise ValueError(\"case id not specified\") if not tag_id: raise", "outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def list_nids_events_command(args: Dict[str, Any]) -> CommandResults:", "string += pretty_print_comment(comment) return string def pretty_print_events(result: dict, title: str", "comment=args.get(\"comment\", None), originEmailAddress=args.get(\"origin_email_address\", None), hasEvents=args.get(\"has_events\", None), internalComment=args.get(\"internal_comment\", None), ) return", "\"customFields\": { \"argus_id\": str(case[\"id\"]), \"type\": case[\"type\"], \"category\": case[\"category\"][\"name\"] if case[\"category\"]", "raw_response=result, ) def get_event_command(args: Dict[str, Any]) -> CommandResults: event_type =", "CommandResults: case_id = args.get(\"case_id\", None) if not case_id: raise ValueError(\"case_id", "not specified\") result = get_event_by_path( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id )", "-> str: string = title if title else \"\" for", "import demistomock as demisto from CommonServerPython import * \"\"\" IMPORTS", "{}_\\n\".format( data[\"priority\"], data[\"status\"], pretty_print_date(data[\"lastUpdatedTime\"]) ) string += \"Reported by {}", "from argus_api.api.currentuser.v1.user import get_current_user from argus_api.api.cases.v2.case import ( add_case_tag, add_comment,", "raise ValueError(\"comment not specified\") result = add_comment( caseID=case_id, comment=comment, asReplyTo=args.get(\"as_reply_to\",", "delete_comment(caseID=case_id, commentID=comment_id) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Deleted comment\\n\"", "parse_first_fetch(first_fetch: Any) -> Any: if isinstance(first_fetch, str): if first_fetch[0] !=", "def get_event_command(args: Dict[str, Any]) -> CommandResults: event_type = args.get(\"type\", None)", "{} lst = argToList(string) if len(lst) % 2 != 0:", "None) if not case_id: raise ValueError(\"case id not specified\") result", "CommandResults: # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=args.get(\"start_timestamp\", None), endTimestamp=args.get(\"end_timestamp\",", "specified\") result = get_case_metadata_by_id( id=case_id, skipRedirect=args.get(\"skip_redirect\", None) ) return CommandResults(", "if not service: raise ValueError(\"service not specified\") if not case_type:", "raise ValueError(\"customer id not specified\") if not event_id: raise ValueError(\"event", "customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\", None)), type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\", None)),", "not specified\") if not event_id: raise ValueError(\"event id not specified\")", "not specified\") if not key: raise ValueError(\"key not specified\") if", "f\"# #{case_id}: Comments\\n\" ), outputs_prefix=\"Argus.Comments\", outputs=result, raw_response=result, ) def remove_case_tag_by_id_command(args:", "\"occurred\": case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"], \"customFields\": {", "lst: return [] if len(lst) % 2 != 0: return", "return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-get-payload\": return_results(get_payload_command(demisto.args())) elif demisto.command() == \"argus-get-pcap\":", "demisto.command() == \"argus-fetch-observations-for-ip\": return_results(fetch_observations_for_i_p_command(demisto.args())) # Log exceptions and return errors", ") return fileResult(f\"{event_id}_pcap\", result.content) def find_nids_events_command(args: Dict[str, Any]) -> CommandResults:", "CommandResults( readable_output=pretty_print_case_metadata(result, \"Case deleted\"), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, ) def delete_comment_command(args:", ") def get_payload_command(args: Dict[str, Any]) -> CommandResults: event_type = args.get(\"type\",", "limit: int = 25, min_severity: str = \"low\" ): start_timestamp", "comment = args.get(\"comment\", None) if not case_id: raise ValueError(\"case id", "list_case_attachments_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if", "return mapping.get(status, 0) def build_argus_priority_from_min_severity(min_severity: str) -> List[str]: severities =", "= edit_comment(caseID=case_id, commentID=comment_id, comment=comment) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}:", "= f\"# #{case_id}: Case attachments\\n\" for attachment in result[\"data\"]: readable_output", "= args.get(\"key\", None) value = args.get(\"value\", None) if not case_id:", "Any]) -> CommandResults: case_id = args.get(\"case_id\", None) comment_id = args.get(\"comment_id\",", "ValueError(\"service not specified\") if not case_type: raise ValueError(\"case_type not specified\")", "args.get(\"comment_id\", None) if not case_id: raise ValueError(\"case id not specified\")", "None)), minSeverity=args.get(\"min_severity\", None), maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\",", "string \"\"\" COMMAND FUNCTIONS \"\"\" def test_module_command() -> str: response", "close_case_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if", "0) def build_argus_priority_from_min_severity(min_severity: str) -> List[str]: severities = [\"low\", \"medium\",", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result, ) def list_case_tags_command(args: Dict[str,", "= [\"addedTimestamp\"] if sort_by == \"ascending\" else [\"-addedTimestamp\"] result =", "is {demisto.command()}\") try: if demisto.command() == \"test-module\": # This is", "FUNCTION \"\"\" def main() -> None: logging.getLogger(\"argus_cli\").setLevel(\"WARNING\") first_fetch_period = parse_first_fetch(", "\"argus-edit-comment\": return_results(edit_comment_command(demisto.args())) elif demisto.command() == \"argus-get-attachment\": return_results(get_attachment_command(demisto.args())) elif demisto.command() ==", "tags=tag) headers = [\"key\", \"value\", \"addedTime\"] readable_output = tableToMarkdown( f\"#{case_id}:", "get_payload from argus_api.api.events.v1.pcap import get_pcap from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events", "demisto.command() == \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif", "comment: raise ValueError(\"comment not specified\") result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment)", "= remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers = [\"key\", \"value\", \"addedTime\", \"id\", \"flags\"]", "args.get(\"ip\", None) if not ip: raise ValueError(\"ip not specified\") result", "techAssigned=args.get(\"tech_assigned\", None), includeWorkflows=args.get(\"include_workflows\", None), includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\", None)),", "readable_output += f\"_id: {result['data']['id']}_\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\", outputs=result, raw_response=result,", "verify: bool = None ): argus_session.api_key = api_key argus_session.base_url =", "args.get(\"case_id\", None) key = args.get(\"key\", None) value = args.get(\"value\", None)", "None)), assetID=argToList(args.get(\"asset_id\", None)), tag=argToList(args.get(\"tag\", None)), workflow=argToList(args.get(\"workflow\", None)), field=argToList(args.get(\"field\", None)), keywords=argToList(args.get(\"keywords\",", "None), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\",", "demisto.command() == \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == \"argus-update-case\": return_results(update_case_command(demisto.args())) elif", "maxSeverity=args.get(\"max_severity\", None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\", None),", "create_case_command(args: Dict[str, Any]) -> CommandResults: subject = args.get(\"subject\", None) description", "+= f\"{comment['comment']}\\n\\n\" string += f\"_id: {comment['id']}_\\n\" string += f\"_Flags: {str(comment['flags'])}_\\n\"", "CommandResults: case_id = args.get(\"case_id\", None) key = args.get(\"key\", None) value", "\"value\", \"addedTime\", \"id\"] readable_output = tableToMarkdown( f\"#{case_id}: Tags\", result[\"data\"], headers=headers", "attachment_id = args.get(\"attachment_id\", None) if not case_id: raise ValueError(\"case id", "showing {} events, from {} to {}_\\n\".format( result[\"count\"], result[\"size\"], result[\"offset\"],", "raw_response=result, ) def get_attachment_command(args: Dict[str, Any]) -> CommandResults: case_id =", "Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) sort_by =", "None)), type=argToList(args.get(\"case_type\", None)), service=argToList(args.get(\"service\", None)), category=argToList(args.get(\"category\", None)), status=argToList(args.get(\"status\", None)), priority=argToList(args.get(\"priority\",", "f\"# #{case_id}: attachment metadata\\n\" readable_output += f\"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\\n\"", "outputs=result, raw_response=result, ) def close_case_command(args: Dict[str, Any]) -> CommandResults: case_id", "-> CommandResults: case_id = args.get(\"case_id\", None) sort_by = args.get(\"sort_by\", None)", "result[\"data\"], f\"# #{case_id}: Updated comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, )", "attachment_id: raise ValueError(\"attachment id not specified\") result = get_attachment(caseID=case_id, attachmentID=attachment_id)", "event_type: raise ValueError(\"event type not specified\") if not timestamp: raise", "ValueError(\"ip not specified\") result = fetch_observations_for_i_p(ip=ip) return CommandResults( readable_output=tableToMarkdown(f'IP observations", "\"\"\" IMPORTS \"\"\" import json import urllib3 import dateparser import", "Any]) -> CommandResults: case_id = args.get(\"case_id\", None) comment = args.get(\"comment\",", "skipFutureEvents=args.get(\"skip_future_events\", None), exclude=args.get(\"exclude\", None), locationID=argToList(args.get(\"location_id\", None)), severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)),", "attachmentID=attachment_id) return fileResult(attachment_id, result.content) def edit_comment_command(args: Dict[str, Any]) -> CommandResults:", "str_to_dict(string: str) -> dict: if not string: return {} lst", "download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case,", "\"argus-update-case\": return_results(update_case_command(demisto.args())) elif demisto.command() == \"argus-get-event\": return_results(get_event_command(demisto.args())) elif demisto.command() ==", "argus_api.api.events.v1.payload import get_payload from argus_api.api.events.v1.pcap import get_pcap from argus_api.api.events.v1.nids import", "string += \"* * *\\n\" return string def pretty_print_comments(comments: list,", "id not specified\") if not comment_id: raise ValueError(\"comment id not", "not specified\") if not timestamp: raise ValueError(\"timestamp not specified\") if", "None) if not fqdn: raise ValueError(\"fqdn not specified\") result =", "result = list_case_comments( caseID=case_id, beforeComment=args.get(\"before_comment\", None), afterComment=args.get(\"after_comment\", None), offset=args.get(\"offset\", None),", "case_id = args.get(\"case_id\", None) if not case_id: raise ValueError(\"case id", "severity=argToList(args.get(\"severity\", None)), customer=argToList(args.get(\"customer\", None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)),", "Dict[str, Any]) -> CommandResults: # noinspection PyTypeChecker result = advanced_case_search(", "None)), exclude=args.get(\"exclude\", None), required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\", None)), caseID=argToList(args.get(\"case_id\", None)), customer=argToList(args.get(\"customer\",", "\"argus-find-aggregated-events\": return_results(find_aggregated_events_command(demisto.args())) elif demisto.command() == \"argus-list-aggregated-events\": return_results(list_aggregated_events_command(demisto.args())) elif demisto.command() ==", "None) if last_run else None # noinspection PyTypeChecker result =", "None)), geoCountry=argToList(args.get(\"geo_country\", None)), properties=str_to_dict(args.get(\"properties\", None)), exactMatchProperties=args.get(\"exact_match_properties\", None), sensorID=argToList(args.get(\"sensor_id\", None)), subCriteria=argToList(args.get(\"sub_criteria\",", "integration settings. next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25),", "else \"\" string += f\"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\\n\" string +=", "value: raise ValueError(\"value not specified\") result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value)", "raise ValueError(\"fqdn not specified\") result = fetch_observations_for_domain(fqdn=fqdn) return CommandResults( readable_output=tableToMarkdown(", "return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() == \"argus-update-case\": return_results(update_case_command(demisto.args())) elif demisto.command() == \"argus-get-event\":", "= \"%b %d, %Y, %H:%M:%S\" FETCH_TAG = demisto.params().get(\"fetch_tag\") \"\"\" HELPER", "raw_response=result, ) def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults: ip =", "\"argus-remove-case-tag-by-id\": return_results(remove_case_tag_by_id_command(demisto.args())) elif demisto.command() == \"argus-remove-case-tag-by-key-value\": return_results(remove_case_tag_by_key_value_command(demisto.args())) elif demisto.command() ==", "None), ) return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Added comment\\n\"", "f\"#{case['id']}: {case['subject']}\", \"occurred\": case[\"createdTime\"], \"severity\": argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"],", "case[\"category\"][\"name\"] if case[\"category\"] else None, \"service\": case[\"service\"][\"name\"], \"lastUpdatedTime\": case[\"lastUpdatedTime\"], \"createdTimestamp\":", "0, \"pendingSoc\": 0, \"pendingVendor\": 0, \"pendingClose\": 0, \"workingSoc\": 1, \"workingCustomer\":", "f\"# #{case_id}: Updated comment\\n\" ), outputs_prefix=\"Argus.Comment\", outputs=result, raw_response=result, ) def", "first_fetch_period=first_fetch_period, limit=demisto.params().get(\"max_fetch\", 25), min_severity=demisto.params().get(\"min_severity\", \"low\"), ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command()", "= args.get(\"value\", None) if not case_id: raise ValueError(\"case_id not specified\")", "*\\n\" return string def pretty_print_comments(comments: list, title: str = None)", "None), accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\", None)), excludeFlags=argToList(args.get(\"exclude_flags\",", "def get_attachment_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None)", "readable_output=pretty_print_events(dict(result), \"# List Events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def get_payload_command(args:", "not case_id: raise ValueError(\"case id not specified\") result = delete_case(caseID=case_id)", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def list_case_comments_command(args: Dict[str,", "if not event_id: raise ValueError(\"event id not specified\") result =", "\"-\": first_fetch = f\"-{first_fetch}\" return first_fetch def build_tags_from_list(lst: list) ->", "return_results(delete_comment_command(demisto.args())) elif demisto.command() == \"argus-download-attachment\": return_results(download_attachment_command(demisto.args())) elif demisto.command() == \"argus-edit-comment\":", "List NIDS Events\\n\"), outputs_prefix=\"Argus.NIDS\", outputs=result, raw_response=result, ) def search_records_command(args: Dict[str,", "def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults: fqdn = args.get(\"fqdn\", None)", "= get_event_by_path( type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id ) return CommandResults( readable_output=tableToMarkdown(f\"Event:", "specified\") # noinspection PyTypeChecker result = search_records( query=query, aggregateResult=args.get(\"aggregate_result\", None),", "tags\", result[\"data\"], headers=headers ) return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result,", "lst[i + 1] for i in range(0, len(lst), 2)} def", "includeDeleted=args.get(\"include_deleted\", None), subCriteria=argToList(args.get(\"sub_criteria\", None)), exclude=args.get(\"exclude\", None), required=args.get(\"required\", None), customerID=argToList(args.get(\"customer_id\", None)),", ") return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def add_comment_command(args:", "= 25, min_severity: str = \"low\" ): start_timestamp = last_run.get(\"start_time\",", "{attachment['id']}_\\n\" readable_output += \"* * *\\n\" return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Attachments\",", "( add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment,", "None)), excludeFlags=argToList(args.get(\"exclude_flags\", None)), ) readable_output = f\"Advanced Case Search: {result['count']}", "import * \"\"\" IMPORTS \"\"\" import json import urllib3 import", "None), includeDeleted=args.get(\"include_deleted\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\",", "sort_by: sort_by = [\"addedTimestamp\"] if sort_by == \"ascending\" else [\"-addedTimestamp\"]", "CommandResults( readable_output=pretty_print_events( dict(result), f\"# #{case_id}: Associated Events\\n\" ), outputs_prefix=\"Argus.Events\", outputs=result,", "%Y, %H:%M:%S\" FETCH_TAG = demisto.params().get(\"fetch_tag\") \"\"\" HELPER FUNCTIONS \"\"\" def", "None)), alarmID=argToList(args.get(\"alarm_id\", None)), attackCategoryID=argToList(args.get(\"attack_category_id\", None)), sourceGeoCountry=argToList(args.get(\"source_geo_country\", None)), destinationGeoCountry=argToList(args.get(\"destination_geo_country\", None)), geoCountry=argToList(args.get(\"geo_country\",", "if not case_id: raise ValueError(\"case id not specified\") if not", "dict, title: str = None) -> str: string = title", "argus_priority_to_demisto_severity(case[\"priority\"]), \"status\": argus_status_to_demisto_status(case[\"status\"]), \"details\": case[\"description\"], \"customFields\": { \"argus_id\": str(case[\"id\"]), \"type\":", "not attachment_id: raise ValueError(\"attachment id not specified\") result = download_attachment(caseID=case_id,", "ValueError(\"tag id not specified\") result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id) headers =", "Any]) -> CommandResults: result = list_aggregated_events( customerID=args.get(\"customer_id\", None), signature=args.get(\"signature\", None),", "close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags,", "raise ValueError(\"case_id not specified\") result = close_case( caseID=case_id, comment=args.get(\"comment\", None),", "str = None) -> str: string = title if title", "+= f\"_id: {attachment['id']}_\\n\" readable_output += \"* * *\\n\" return CommandResults(", "command to run after activated via integration settings. next_run, incidents", "argus_api.api.cases.v2.case import ( add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment,", "{attachment['addedTime']}*\\n\" readable_output += f\"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\\n\\n\" readable_output += f\"_id:", "Any]) -> CommandResults: ip = args.get(\"ip\", None) if not ip:", "urllib3.disable_warnings() \"\"\" CONSTANTS \"\"\" DATE_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\" PRETTY_DATE_FORMAT = \"%b", "= args.get(\"event_id\", None) if not event_type: raise ValueError(\"event type not", "readable_output = \"# Event payload\\n\" readable_output += f\"Event: {event_id}, type:", "None), limit=args.get(\"limit\", 25), offset=args.get(\"offset\", None), includeDeleted=args.get(\"include_deleted\", None), minCount=args.get(\"min_count\", None), associatedCaseID=argToList(args.get(\"associated_case_id\",", "-> CommandResults: ip = args.get(\"ip\", None) if not ip: raise", "specified\") if not attachment_id: raise ValueError(\"attachment id not specified\") result", "raise ValueError(\"case_id not specified\") result = list_case_tags( caseID=case_id, limit=args.get(\"limit\", None),", "== \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() == \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif demisto.command()", "to execute {demisto.command()} command.\\nError:\\n{str(e)}\" ) \"\"\" ENTRY POINT \"\"\" if", "demisto.params().get(\"first_fetch\", \"-1 day\") ) set_argus_settings( demisto.params().get(\"api_key\"), demisto.params().get(\"api_url\"), handle_proxy(), demisto.params().get(\"insecure\", None),", "None), includeDescription=args.get(\"include_description\", None), accessMode=argToList(args.get(\"access_mode\", None)), explicitAccess=argToList(args.get(\"explicit_access\", None)), sortBy=argToList(args.get(\"sort_by\", None)), includeFlags=argToList(args.get(\"include_flags\",", "caseID=case_id, limit=args.get(\"limit\", None), offset=args.get(\"offset\", None) ) readable_output = f\"# #{case_id}:", "demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == \"argus-add-case-tag\": return_results(add_case_tag_command(demisto.args())) elif demisto.command() ==", "elif demisto.command() == \"argus-list-case-tags\": return_results(list_case_tags_command(demisto.args())) elif demisto.command() == \"argus-list-case-comments\": return_results(list_case_comments_command(demisto.args()))", "lst[i], \"value\": lst[i + 1]}) return tags def str_to_dict(string: str)", "f\"_Flags: {str(comment['flags'])}_\\n\" if comment[\"flags\"] else \"\" string += \"* *", "title if title else \"\" for comment in comments: string", "list_case_tags_command(args: Dict[str, Any]) -> CommandResults: case_id = args.get(\"case_id\", None) if", "raw_response=result, ) def close_case_command(args: Dict[str, Any]) -> CommandResults: case_id =", "proxies argus_session.verify = verify def argus_priority_to_demisto_severity(priority: str) -> int: mapping", "tag_id: raise ValueError(\"tag id not specified\") result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id)", "skipRedirect=args.get(\"skip_redirect\", None) ) return CommandResults( readable_output=pretty_print_case_metadata(result), outputs_prefix=\"Argus.Case\", outputs=result, raw_response=result, )", "remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, ) from argus_api.api.events.v1 import get_event_by_path from argus_api.api.events.v1.case.case", "None, verify: bool = None ): argus_session.api_key = api_key argus_session.base_url", "= title if title else \"\" string += f\"#### *{comment['addedByUser']['userName']}", "demisto.command() == \"argus-update-case\": return_results(update_case_command(demisto.args())) elif demisto.command() == \"argus-get-event\": return_results(get_event_command(demisto.args())) elif", "None), category=args.get(\"category\", None), reporter=args.get(\"reporter\", None), assignedUser=args.get(\"assigned_user\", None), assignedTech=args.get(\"assigned_tech\", None), customerReference=args.get(\"customer_reference\",", "get_pcap_command(args: Dict[str, Any]) -> Any: event_type = args.get(\"type\", None) timestamp", "{response['responseCode']}, {response}\" ) def fetch_incidents( last_run: dict, first_fetch_period: str, limit:", "if sort_by == \"ascending\" else [\"-addedTimestamp\"] result = list_case_comments( caseID=case_id,", "readable_output = f\"# #{case_id}: attachment metadata\\n\" readable_output += f\"#### *{result['data']['addedByUser']['userName']}", "Dict[str, Any]) -> CommandResults: ip = args.get(\"ip\", None) if not", "list) -> List[Dict]: if not lst: return [] if len(lst)", "from argus_api import session as argus_session from argus_api.api.currentuser.v1.user import get_current_user", "CommandResults( readable_output=pretty_print_events(dict(result), \"# Find events\\n\"), outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def", "for case in result[\"data\"]: incidents.append( { \"name\": f\"#{case['id']}: {case['subject']}\", \"occurred\":", "not case_id: raise ValueError(\"case_id not specified\") if not comment: raise", "demistomock as demisto from CommonServerPython import * \"\"\" IMPORTS \"\"\"", "if isinstance(first_fetch, str): if first_fetch[0] != \"-\": first_fetch = f\"-{first_fetch}\"", "CommandResults( readable_output=tableToMarkdown(\"PDNS records\", result[\"data\"]), outputs_prefix=\"Argus.PDNS\", outputs=result, raw_response=result, ) def fetch_observations_for_domain_command(args:", "[\"low\", \"medium\", \"high\", \"critical\"] min_severity_list = [] for severity in", "None) case_type = args.get(\"type\", None) tags = args.get(\"tags\", None) if", "import get_current_user from argus_api.api.cases.v2.case import ( add_case_tag, add_comment, advanced_case_search, close_case,", ") return CommandResults( readable_output=pretty_print_comment( result[\"data\"], f\"# #{case_id}: Added comment\\n\" ),", "None), ) readable_output = f\"# #{case_id}: close case\\n\" readable_output +=", "Any]) -> CommandResults: case_id = args.get(\"case_id\", None) tag_id = args.get(\"tag_id\",", "last_run else None # noinspection PyTypeChecker result = advanced_case_search( startTimestamp=start_timestamp", "outputs_prefix=\"Argus.Events\", outputs=result, raw_response=result, ) def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:", "else first_fetch_period, endTimestamp=\"now\", limit=limit, sortBy=[\"createdTimestamp\"], priority=build_argus_priority_from_min_severity(min_severity), subCriteria=[ {\"exclude\": True, \"status\":", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def add_comment_command(args: Dict[str,", "args.get(\"case_id\", None) if not case_id: raise ValueError(\"case id not specified\")", "demisto.command() == \"argus-delete-case\": return_results(delete_case_command(demisto.args())) elif demisto.command() == \"argus-delete-comment\": return_results(delete_comment_command(demisto.args())) elif", "not event_id: raise ValueError(\"event id not specified\") result = get_event_by_path(", "return \"ok\" return ( f\"Unable to communicate with Argus API", "return_results(add_case_tag_command(demisto.args())) elif demisto.command() == \"argus-add-comment\": return_results(add_comment_command(demisto.args())) elif demisto.command() == \"argus-advanced-case-search\":", "associatedCaseID=argToList(args.get(\"associated_case_id\", None)), sourceIPMinBits=args.get(\"source_ip_min_bits\", None), destinationIPMinBits=args.get(\"destination_ip_min_bits\", None), startTimestamp=args.get(\"start_timestamp\", \"-24hours\"), endTimestamp=args.get(\"end_timestamp\", \"now\"),", "None ): argus_session.api_key = api_key argus_session.base_url = base_url argus_session.proxies =", "test_module_command() -> str: response = get_current_user() if response[\"responseCode\"] == 200:", "\"argus-get-case-metadata-by-id\": return_results(get_case_metadata_by_id_command(demisto.args())) elif demisto.command() == \"argus-list-case-attachments\": return_results(list_case_attachments_command(demisto.args())) elif demisto.command() ==", "-> CommandResults: case_id = args.get(\"case_id\", None) attachment_id = args.get(\"attachment_id\", None)", "str) -> int: mapping = { \"pendingCustomer\": 0, \"pendingSoc\": 0,", "event_id: raise ValueError(\"event id not specified\") result = get_payload( type=event_type,", "return CommandResults( readable_output=readable_output, outputs_prefix=\"Argus.Tags\", outputs=result, raw_response=result, ) def remove_case_tag_by_key_value_command(args: Dict[str,", "= None) -> str: string = title if title else", "-> CommandResults: # noinspection PyTypeChecker result = find_n_i_d_s_events( skipFutureEvents=args.get(\"skip_future_events\", None),", "fileResult(f\"{event_id}_pcap\", result.content) def find_nids_events_command(args: Dict[str, Any]) -> CommandResults: # noinspection", "= args.get(\"tag_id\", None) if not case_id: raise ValueError(\"case id not" ]
[ "args): pass class day03part2(day03): def solve(self, args): pass class examples(unittest.TestCase):", "def test_part1(self): day3 = day03part1() def test_part2(self): day3 = day03part2()", "lib.common import * filename = \"inputs/2020_12_03_input.txt\" class day03: def __init__(self):", "pass class day03part1(day03): def solve(self, args): pass class day03part2(day03): def", "def test_examples_part1(self): day3 = day03part1() # self.assetTrue() def test_examples_part2(self): day3", "__init__(self): pass class day03part1(day03): def solve(self, args): pass class day03part2(day03):", "day03part1() # self.assetTrue() def test_examples_part2(self): day3 = day03part2() # self.assetTrue()", "= day03part2() # self.assetTrue() class solutions(unittest.TestCase): def test_part1(self): day3 =", "day3 = day03part2() # self.assetTrue() class solutions(unittest.TestCase): def test_part1(self): day3", "args): pass class examples(unittest.TestCase): def test_examples_part1(self): day3 = day03part1() #", "os import unittest from lib.common import * filename = \"inputs/2020_12_03_input.txt\"", "self.assetTrue() def test_examples_part2(self): day3 = day03part2() # self.assetTrue() class solutions(unittest.TestCase):", "import unittest from lib.common import * filename = \"inputs/2020_12_03_input.txt\" class", "examples(unittest.TestCase): def test_examples_part1(self): day3 = day03part1() # self.assetTrue() def test_examples_part2(self):", "def test_examples_part2(self): day3 = day03part2() # self.assetTrue() class solutions(unittest.TestCase): def", "pass class day03part2(day03): def solve(self, args): pass class examples(unittest.TestCase): def", "# self.assetTrue() class solutions(unittest.TestCase): def test_part1(self): day3 = day03part1() def", "import sys, os import unittest from lib.common import * filename", "= \"inputs/2020_12_03_input.txt\" class day03: def __init__(self): pass class day03part1(day03): def", "class day03part2(day03): def solve(self, args): pass class examples(unittest.TestCase): def test_examples_part1(self):", "test_examples_part2(self): day3 = day03part2() # self.assetTrue() class solutions(unittest.TestCase): def test_part1(self):", "def solve(self, args): pass class day03part2(day03): def solve(self, args): pass", "* filename = \"inputs/2020_12_03_input.txt\" class day03: def __init__(self): pass class", "solve(self, args): pass class examples(unittest.TestCase): def test_examples_part1(self): day3 = day03part1()", "sys, os import unittest from lib.common import * filename =", "class examples(unittest.TestCase): def test_examples_part1(self): day3 = day03part1() # self.assetTrue() def", "test_examples_part1(self): day3 = day03part1() # self.assetTrue() def test_examples_part2(self): day3 =", "pass class examples(unittest.TestCase): def test_examples_part1(self): day3 = day03part1() # self.assetTrue()", "day03part1(day03): def solve(self, args): pass class day03part2(day03): def solve(self, args):", "\"inputs/2020_12_03_input.txt\" class day03: def __init__(self): pass class day03part1(day03): def solve(self,", "class day03: def __init__(self): pass class day03part1(day03): def solve(self, args):", "def solve(self, args): pass class examples(unittest.TestCase): def test_examples_part1(self): day3 =", "from lib.common import * filename = \"inputs/2020_12_03_input.txt\" class day03: def", "filename = \"inputs/2020_12_03_input.txt\" class day03: def __init__(self): pass class day03part1(day03):", "# self.assetTrue() def test_examples_part2(self): day3 = day03part2() # self.assetTrue() class", "day03part2() # self.assetTrue() class solutions(unittest.TestCase): def test_part1(self): day3 = day03part1()", "python3 import sys, os import unittest from lib.common import *", "#!/usr/bin/env python3 import sys, os import unittest from lib.common import", "def __init__(self): pass class day03part1(day03): def solve(self, args): pass class", "solutions(unittest.TestCase): def test_part1(self): day3 = day03part1() def test_part2(self): day3 =", "solve(self, args): pass class day03part2(day03): def solve(self, args): pass class", "day3 = day03part1() # self.assetTrue() def test_examples_part2(self): day3 = day03part2()", "= day03part1() # self.assetTrue() def test_examples_part2(self): day3 = day03part2() #", "self.assetTrue() class solutions(unittest.TestCase): def test_part1(self): day3 = day03part1() def test_part2(self):", "class solutions(unittest.TestCase): def test_part1(self): day3 = day03part1() def test_part2(self): day3", "unittest from lib.common import * filename = \"inputs/2020_12_03_input.txt\" class day03:", "import * filename = \"inputs/2020_12_03_input.txt\" class day03: def __init__(self): pass", "day03part2(day03): def solve(self, args): pass class examples(unittest.TestCase): def test_examples_part1(self): day3", "class day03part1(day03): def solve(self, args): pass class day03part2(day03): def solve(self,", "day03: def __init__(self): pass class day03part1(day03): def solve(self, args): pass" ]
[ "from bph.core.server.template import BphTemplateServer as TemplateServer from bph.core.sample import BphSample", "BphLabFile as LabFile from bph.core.session import BphSession as Session session", "as Session session = Session(project_name='blackhat_arsenal_2019') session.start() templateserver = TemplateServer() templateserver.start()", "from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat # Core Imports from", "templateserver = TemplateServer() templateserver.start() capturebat = CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start()", "session.start() templateserver = TemplateServer() templateserver.start() capturebat = CaptureBat() capturebat.cleanup() capturebat.execute()", "= Session(project_name='blackhat_arsenal_2019') session.start() templateserver = TemplateServer() templateserver.start() capturebat = CaptureBat()", "import BphCaptureBat as CaptureBat # Core Imports from bph.core.server.template import", "as CaptureBat # Core Imports from bph.core.server.template import BphTemplateServer as", "LabFile from bph.core.session import BphSession as Session session = Session(project_name='blackhat_arsenal_2019')", "bph.tools.windows.capturebat import BphCaptureBat as CaptureBat # Core Imports from bph.core.server.template", "bph.core.session import BphSession as Session session = Session(project_name='blackhat_arsenal_2019') session.start() templateserver", "= TemplateServer() templateserver.start() capturebat = CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start() capturebat.execute(delay=15)", "= CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start() capturebat.execute(delay=15) capturebat.stop() capturebat.execute() capturebat.collect() capturebat.execute()", "CaptureBat # Core Imports from bph.core.server.template import BphTemplateServer as TemplateServer", "from bph.core.session import BphSession as Session session = Session(project_name='blackhat_arsenal_2019') session.start()", "BphTemplateServer as TemplateServer from bph.core.sample import BphSample as Sample from", "import BphLabFile as LabFile from bph.core.session import BphSession as Session", "TemplateServer() templateserver.start() capturebat = CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start() capturebat.execute(delay=15) capturebat.stop()", "bph.core.sample import BphLabFile as LabFile from bph.core.session import BphSession as", "capturebat = CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start() capturebat.execute(delay=15) capturebat.stop() capturebat.execute() capturebat.collect()", "Session(project_name='blackhat_arsenal_2019') session.start() templateserver = TemplateServer() templateserver.start() capturebat = CaptureBat() capturebat.cleanup()", "BphSample as Sample from bph.core.sample import BphLabFile as LabFile from", "from bph.core.sample import BphLabFile as LabFile from bph.core.session import BphSession", "import BphSession as Session session = Session(project_name='blackhat_arsenal_2019') session.start() templateserver =", "import BphTemplateServer as TemplateServer from bph.core.sample import BphSample as Sample", "from bph.core.sample import BphSample as Sample from bph.core.sample import BphLabFile", "as LabFile from bph.core.session import BphSession as Session session =", "# Core Imports from bph.core.server.template import BphTemplateServer as TemplateServer from", "<gh_stars>10-100 # Tool Imports from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat", "Sample from bph.core.sample import BphLabFile as LabFile from bph.core.session import", "session = Session(project_name='blackhat_arsenal_2019') session.start() templateserver = TemplateServer() templateserver.start() capturebat =", "templateserver.start() capturebat = CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start() capturebat.execute(delay=15) capturebat.stop() capturebat.execute()", "import BphSample as Sample from bph.core.sample import BphLabFile as LabFile", "as Sample from bph.core.sample import BphLabFile as LabFile from bph.core.session", "TemplateServer from bph.core.sample import BphSample as Sample from bph.core.sample import", "Core Imports from bph.core.server.template import BphTemplateServer as TemplateServer from bph.core.sample", "Session session = Session(project_name='blackhat_arsenal_2019') session.start() templateserver = TemplateServer() templateserver.start() capturebat", "BphCaptureBat as CaptureBat # Core Imports from bph.core.server.template import BphTemplateServer", "CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start() capturebat.execute(delay=15) capturebat.stop() capturebat.execute() capturebat.collect() capturebat.execute() capturebat.files()", "Tool Imports from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat # Core", "Imports from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat # Core Imports", "Imports from bph.core.server.template import BphTemplateServer as TemplateServer from bph.core.sample import", "bph.core.sample import BphSample as Sample from bph.core.sample import BphLabFile as", "bph.core.server.template import BphTemplateServer as TemplateServer from bph.core.sample import BphSample as", "BphSession as Session session = Session(project_name='blackhat_arsenal_2019') session.start() templateserver = TemplateServer()", "as TemplateServer from bph.core.sample import BphSample as Sample from bph.core.sample", "# Tool Imports from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat #" ]
[ "as a point of comparison. y_computed = (tau_eval * dt_dtau)**3", "y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given) ydot_i", "values and rates provided at [-1, 1] in tau space", "comparison. y_computed = (tau_eval * dt_dtau)**3 ydot_computed = 3.0 *", "y_given) + np.dot(Bd, ydot_given) # Compute our function as a", "time-derivative y_given = [4.0, 4.0] ydot_given = [-4.0, 4.0] #", "np.dot(Ad, y_given) + np.dot(Bd, ydot_given) # Compute our function as", "0.0, 8.0] ydot_given = [12.0, 0.0, 12.0] # Get the", "Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval) # Interpolate y", "import unittest import numpy as np from numpy.testing import assert_almost_equal", "space tau_given = [-1.0, 0.0, 1.0] tau_eval = np.linspace(-1, 1,", "results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) def test_cubic(self): # Interpolate with", "= 4.0 / 2.0 # Provide values for y =", "= [4.0, 4.0] ydot_given = [-4.0, 4.0] # Get the", "# Get the hermite matrices. Ai, Bi, Ad, Bd =", "dt_dtau)**2 ydot_computed = 2.0 * (tau_eval * dt_dtau) # Check", "= [12.0, 0.0, 12.0] # Get the hermite matrices. Ai,", "= (tau_eval * dt_dtau)**3 ydot_computed = 3.0 * (tau_eval *", "1] in tau space tau_given = [-1.0, 1.0] tau_eval =", "dt_dtau)**2 # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) if __name__", "Get the hermite matrices. Ai, Bi, Ad, Bd = hermite_matrices(tau_given,", "np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given) ydot_i = (1.0", "+ np.dot(Bd, ydot_given) # Compute our function as a point", "for y = t**2 and its time-derivative y_given = [4.0,", "# In time space use the boundaries [-2, 2] dt_dtau", "def test_cubic(self): # Interpolate with values and rates provided at", "tau_given = [-1.0, 1.0] tau_eval = np.linspace(-1, 1, 100) #", "= 2.0 * (tau_eval * dt_dtau) # Check results assert_almost_equal(y_i,", "tau_eval) # Interpolate y and ydot at tau_eval points in", "= hermite_matrices(tau_given, tau_eval) # Interpolate y and ydot at tau_eval", "[-1, 1] in tau space tau_given = [-1.0, 0.0, 1.0]", "from dymos.utils.hermite import hermite_matrices class TestHermiteMatrices(unittest.TestCase): def test_quadratic(self): # Interpolate", "Provide values for y = t**2 and its time-derivative y_given", "and its time-derivative y_given = [4.0, 4.0] ydot_given = [-4.0,", "dt_dtau * np.dot(Bi, ydot_given) ydot_i = (1.0 / dt_dtau) *", "[-1, 1] in tau space tau_given = [-1.0, 1.0] tau_eval", "test_quadratic(self): # Interpolate with values and rates provided at [-1,", "as np from numpy.testing import assert_almost_equal from dymos.utils.hermite import hermite_matrices", "Compute our function as a point of comparison. y_computed =", "comparison. y_computed = (tau_eval * dt_dtau)**2 ydot_computed = 2.0 *", "* dt_dtau)**3 ydot_computed = 3.0 * (tau_eval * dt_dtau)**2 #", "= [-1.0, 1.0] tau_eval = np.linspace(-1, 1, 100) # In", "rates provided at [-1, 1] in tau space tau_given =", "* dt_dtau) # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) def", "= [-4.0, 4.0] # Get the hermite matrices. Ai, Bi,", "ydot_computed) if __name__ == '__main__': # pragma: no cover unittest.main()", "the hermite matrices. Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)", "of comparison. y_computed = (tau_eval * dt_dtau)**3 ydot_computed = 3.0", "hermite matrices. Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval) #", "t**2 and its time-derivative y_given = [-8.0, 0.0, 8.0] ydot_given", "(1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given) #", "ydot_given) # Compute our function as a point of comparison.", "* (tau_eval * dt_dtau)**2 # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i,", "a point of comparison. y_computed = (tau_eval * dt_dtau)**2 ydot_computed", "ydot_computed = 2.0 * (tau_eval * dt_dtau) # Check results", "# Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) def test_cubic(self): #", "= np.linspace(-1, 1, 101) # In time space use the", "(tau_eval * dt_dtau)**2 ydot_computed = 2.0 * (tau_eval * dt_dtau)", "* (tau_eval * dt_dtau) # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i,", "Ad, Bd = hermite_matrices(tau_given, tau_eval) # Interpolate y and ydot", "tau_eval = np.linspace(-1, 1, 100) # In time space use", "the boundaries [-2, 2] dt_dtau = 4.0 / 2.0 #", "for y = t**2 and its time-derivative y_given = [-8.0,", "[-1.0, 0.0, 1.0] tau_eval = np.linspace(-1, 1, 101) # In", "4.0] ydot_given = [-4.0, 4.0] # Get the hermite matrices.", "3.0 * (tau_eval * dt_dtau)**2 # Check results assert_almost_equal(y_i, y_computed)", "point of comparison. y_computed = (tau_eval * dt_dtau)**3 ydot_computed =", "in tau space tau_given = [-1.0, 1.0] tau_eval = np.linspace(-1,", "our function as a point of comparison. y_computed = (tau_eval", "np.linspace(-1, 1, 101) # In time space use the boundaries", "= (tau_eval * dt_dtau)**2 ydot_computed = 2.0 * (tau_eval *", "= t**2 and its time-derivative y_given = [-8.0, 0.0, 8.0]", "of comparison. y_computed = (tau_eval * dt_dtau)**2 ydot_computed = 2.0", "with values and rates provided at [-1, 1] in tau", "its time-derivative y_given = [-8.0, 0.0, 8.0] ydot_given = [12.0,", "12.0] # Get the hermite matrices. Ai, Bi, Ad, Bd", "dt_dtau)**3 ydot_computed = 3.0 * (tau_eval * dt_dtau)**2 # Check", "[12.0, 0.0, 12.0] # Get the hermite matrices. Ai, Bi,", "and rates provided at [-1, 1] in tau space tau_given", "tau space tau_given = [-1.0, 0.0, 1.0] tau_eval = np.linspace(-1,", "in tau space. y_i = np.dot(Ai, y_given) + dt_dtau *", "(tau_eval * dt_dtau) # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed)", "100) # In time space use the boundaries [-2, 2]", "dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given) # Compute our", "* np.dot(Bi, ydot_given) ydot_i = (1.0 / dt_dtau) * np.dot(Ad,", "/ dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given) # Compute", "assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) def test_cubic(self): # Interpolate with values", "tau_eval = np.linspace(-1, 1, 101) # In time space use", "# Interpolate y and ydot at tau_eval points in tau", "dymos.utils.hermite import hermite_matrices class TestHermiteMatrices(unittest.TestCase): def test_quadratic(self): # Interpolate with", "def test_quadratic(self): # Interpolate with values and rates provided at", "Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) def test_cubic(self): # Interpolate", "/ 2.0 # Provide values for y = t**2 and", "boundaries [-2, 2] dt_dtau = 4.0 / 2.0 # Provide", "ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd,", "numpy as np from numpy.testing import assert_almost_equal from dymos.utils.hermite import", "matrices. Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval) # Interpolate", "and ydot at tau_eval points in tau space. y_i =", "= 3.0 * (tau_eval * dt_dtau)**2 # Check results assert_almost_equal(y_i,", "assert_almost_equal(ydot_i, ydot_computed) if __name__ == '__main__': # pragma: no cover", "time space use the boundaries [-2, 2] dt_dtau = 4.0", "2.0 # Provide values for y = t**2 and its", "1.0] tau_eval = np.linspace(-1, 1, 100) # In time space", "at [-1, 1] in tau space tau_given = [-1.0, 0.0,", "4.0 / 2.0 # Provide values for y = t**2", "t**2 and its time-derivative y_given = [4.0, 4.0] ydot_given =", "points in tau space. y_i = np.dot(Ai, y_given) + dt_dtau", "1.0] tau_eval = np.linspace(-1, 1, 101) # In time space", "at tau_eval points in tau space. y_i = np.dot(Ai, y_given)", "* dt_dtau)**2 ydot_computed = 2.0 * (tau_eval * dt_dtau) #", "ydot_given) ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) +", "y_given) + dt_dtau * np.dot(Bi, ydot_given) ydot_i = (1.0 /", "np from numpy.testing import assert_almost_equal from dymos.utils.hermite import hermite_matrices class", "+ dt_dtau * np.dot(Bi, ydot_given) ydot_i = (1.0 / dt_dtau)", "* np.dot(Ad, y_given) + np.dot(Bd, ydot_given) # Compute our function", "a point of comparison. y_computed = (tau_eval * dt_dtau)**3 ydot_computed", "1, 100) # In time space use the boundaries [-2,", "y = t**2 and its time-derivative y_given = [-8.0, 0.0,", "Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) if __name__ == '__main__':", "assert_almost_equal(ydot_i, ydot_computed) def test_cubic(self): # Interpolate with values and rates", "* dt_dtau)**2 # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) if", "= np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given) ydot_i =", "tau_eval points in tau space. y_i = np.dot(Ai, y_given) +", "space. y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)", "1] in tau space tau_given = [-1.0, 0.0, 1.0] tau_eval", "class TestHermiteMatrices(unittest.TestCase): def test_quadratic(self): # Interpolate with values and rates", "tau space. y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi,", "space use the boundaries [-2, 2] dt_dtau = 4.0 /", "as a point of comparison. y_computed = (tau_eval * dt_dtau)**2", "and its time-derivative y_given = [-8.0, 0.0, 8.0] ydot_given =", "np.dot(Bi, ydot_given) ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given)", "point of comparison. y_computed = (tau_eval * dt_dtau)**2 ydot_computed =", "TestHermiteMatrices(unittest.TestCase): def test_quadratic(self): # Interpolate with values and rates provided", "provided at [-1, 1] in tau space tau_given = [-1.0,", "ydot_computed = 3.0 * (tau_eval * dt_dtau)**2 # Check results", "101) # In time space use the boundaries [-2, 2]", "unittest import numpy as np from numpy.testing import assert_almost_equal from", "space tau_given = [-1.0, 1.0] tau_eval = np.linspace(-1, 1, 100)", "= [-1.0, 0.0, 1.0] tau_eval = np.linspace(-1, 1, 101) #", "ydot_given = [12.0, 0.0, 12.0] # Get the hermite matrices.", "y_computed = (tau_eval * dt_dtau)**2 ydot_computed = 2.0 * (tau_eval", "at [-1, 1] in tau space tau_given = [-1.0, 1.0]", "y_given = [-8.0, 0.0, 8.0] ydot_given = [12.0, 0.0, 12.0]", "1, 101) # In time space use the boundaries [-2,", "ydot_given = [-4.0, 4.0] # Get the hermite matrices. Ai,", "Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval) # Interpolate y and", "# Provide values for y = t**2 and its time-derivative", "= t**2 and its time-derivative y_given = [4.0, 4.0] ydot_given", "y and ydot at tau_eval points in tau space. y_i", "in tau space tau_given = [-1.0, 0.0, 1.0] tau_eval =", "2.0 * (tau_eval * dt_dtau) # Check results assert_almost_equal(y_i, y_computed)", "time-derivative y_given = [-8.0, 0.0, 8.0] ydot_given = [12.0, 0.0,", "y_computed = (tau_eval * dt_dtau)**3 ydot_computed = 3.0 * (tau_eval", "results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) if __name__ == '__main__': #", "(tau_eval * dt_dtau)**2 # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed)", "hermite_matrices(tau_given, tau_eval) # Interpolate y and ydot at tau_eval points", "test_cubic(self): # Interpolate with values and rates provided at [-1,", "y_computed) assert_almost_equal(ydot_i, ydot_computed) def test_cubic(self): # Interpolate with values and", "tau_given = [-1.0, 0.0, 1.0] tau_eval = np.linspace(-1, 1, 101)", "hermite_matrices class TestHermiteMatrices(unittest.TestCase): def test_quadratic(self): # Interpolate with values and", "2] dt_dtau = 4.0 / 2.0 # Provide values for", "import numpy as np from numpy.testing import assert_almost_equal from dymos.utils.hermite", "ydot at tau_eval points in tau space. y_i = np.dot(Ai,", "Interpolate y and ydot at tau_eval points in tau space.", "y_computed) assert_almost_equal(ydot_i, ydot_computed) if __name__ == '__main__': # pragma: no", "import hermite_matrices class TestHermiteMatrices(unittest.TestCase): def test_quadratic(self): # Interpolate with values", "4.0] # Get the hermite matrices. Ai, Bi, Ad, Bd", "Interpolate with values and rates provided at [-1, 1] in", "[-1.0, 1.0] tau_eval = np.linspace(-1, 1, 100) # In time", "= (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)", "use the boundaries [-2, 2] dt_dtau = 4.0 / 2.0", "assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) if __name__ == '__main__': # pragma:", "0.0, 1.0] tau_eval = np.linspace(-1, 1, 101) # In time", "assert_almost_equal from dymos.utils.hermite import hermite_matrices class TestHermiteMatrices(unittest.TestCase): def test_quadratic(self): #", "np.linspace(-1, 1, 100) # In time space use the boundaries", "= np.linspace(-1, 1, 100) # In time space use the", "its time-derivative y_given = [4.0, 4.0] ydot_given = [-4.0, 4.0]", "= [-8.0, 0.0, 8.0] ydot_given = [12.0, 0.0, 12.0] #", "# Interpolate with values and rates provided at [-1, 1]", "tau space tau_given = [-1.0, 1.0] tau_eval = np.linspace(-1, 1,", "[-4.0, 4.0] # Get the hermite matrices. Ai, Bi, Ad,", "[-8.0, 0.0, 8.0] ydot_given = [12.0, 0.0, 12.0] # Get", "# Compute our function as a point of comparison. y_computed", "ydot_computed) def test_cubic(self): # Interpolate with values and rates provided", "8.0] ydot_given = [12.0, 0.0, 12.0] # Get the hermite", "dt_dtau) # Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) def test_cubic(self):", "[4.0, 4.0] ydot_given = [-4.0, 4.0] # Get the hermite", "function as a point of comparison. y_computed = (tau_eval *", "import assert_almost_equal from dymos.utils.hermite import hermite_matrices class TestHermiteMatrices(unittest.TestCase): def test_quadratic(self):", "In time space use the boundaries [-2, 2] dt_dtau =", "from numpy.testing import assert_almost_equal from dymos.utils.hermite import hermite_matrices class TestHermiteMatrices(unittest.TestCase):", "dt_dtau = 4.0 / 2.0 # Provide values for y", "0.0, 12.0] # Get the hermite matrices. Ai, Bi, Ad,", "(tau_eval * dt_dtau)**3 ydot_computed = 3.0 * (tau_eval * dt_dtau)**2", "# Check results assert_almost_equal(y_i, y_computed) assert_almost_equal(ydot_i, ydot_computed) if __name__ ==", "np.dot(Bd, ydot_given) # Compute our function as a point of", "y = t**2 and its time-derivative y_given = [4.0, 4.0]", "Bd = hermite_matrices(tau_given, tau_eval) # Interpolate y and ydot at", "[-2, 2] dt_dtau = 4.0 / 2.0 # Provide values", "values for y = t**2 and its time-derivative y_given =", "y_given = [4.0, 4.0] ydot_given = [-4.0, 4.0] # Get", "numpy.testing import assert_almost_equal from dymos.utils.hermite import hermite_matrices class TestHermiteMatrices(unittest.TestCase): def" ]
[ "os from . import __author__, __name__, __version__ class Config(object): \"\"\"Base", "class Config(object): \"\"\"Base configuration.\"\"\" SERVER_NAME = os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME =", "'5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60", "from __future__ import absolute_import, division, print_function, unicode_literals import os from", "-*- coding: utf-8 -*- \"\"\"Application configuration.\"\"\" from __future__ import absolute_import,", "= 'sqlite://' # For faster tests; needs at least 4", "= 'dev' DEBUG = True DB_NAME = 'dev.db' # Put", "os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5'))", "= int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000", "\"ValueError: Invalid rounds\". BCRYPT_LOG_ROUNDS = 4 WTF_CSRF_ENABLED = False #", "__name__, __version__ class Config(object): \"\"\"Base configuration.\"\"\" SERVER_NAME = os.environ.get('SERVER_NAME', None)", "False # Disable Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE =", "SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED = False # Disable Debug", "os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25'))", "BABEL_DEFAULT_TIMEZONE = 'utc' EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST = os.environ.get('EMAIL_HOST',", "etc. SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE',", "unicode_literals import os from . import __author__, __name__, __version__ class", "os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME = __name__ APP_VERSION = __version__ APP_AUTHOR =", "= __author__ JSON_AS_ASCII = False SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') #", "# Disable Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple'", "False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE =", "DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True CACHE_TYPE = 'simple'", "os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED = False # Disable", "= 13 DEBUG_TB_ENABLED = False # Disable Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS", "= 60 * 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7 class ProdConfig(Config): \"\"\"Production", "toolbar. DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' # Can be", "XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7 class ProdConfig(Config):", "import os from . import __author__, __name__, __version__ class Config(object):", "= os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True CACHE_TYPE", "PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME = __name__ APP_VERSION = __version__", "DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' # For faster tests;", "# TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory.", "me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory. PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR,", "For faster tests; needs at least 4 to avoid \"ValueError:", "True SQLALCHEMY_DATABASE_URI = 'sqlite://' # For faster tests; needs at", "utf-8 -*- \"\"\"Application configuration.\"\"\" from __future__ import absolute_import, division, print_function,", "ENV = 'dev' DEBUG = True DB_NAME = 'dev.db' #", "os.path.abspath(os.path.dirname(__file__)) # This directory. PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS =", "DEBUG_TB_ENABLED = False # Disable Debug toolbar. class DevConfig(Config): \"\"\"Development", "= 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7", "# Put the db file in project root DB_PATH =", "the db file in project root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)", "'dev' DEBUG = True DB_NAME = 'dev.db' # Put the", "__future__ import absolute_import, division, print_function, unicode_literals import os from .", "\"\"\"Production configuration.\"\"\" ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI =", "True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' # For faster", "Config(object): \"\"\"Base configuration.\"\"\" SERVER_NAME = os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME',", "# Can be \"memcached\", \"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH", "# For faster tests; needs at least 4 to avoid", "class TestConfig(Config): \"\"\"Test configuration.\"\"\" TESTING = True DEBUG = True", "BCRYPT_LOG_ROUNDS = 4 WTF_CSRF_ENABLED = False # Allows form testing.", "file in project root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI =", "= os.path.abspath(os.path.dirname(__file__)) # This directory. PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS", "Can be \"memcached\", \"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH =", "False # Disable Debug toolbar. class DevConfig(Config): \"\"\"Development configuration.\"\"\" ENV", "60 * 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7 class ProdConfig(Config): \"\"\"Production configuration.\"\"\"", "SQLALCHEMY_DATABASE_URI = 'sqlite://' # For faster tests; needs at least", "PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED = False", "APP_NAME = __name__ APP_VERSION = __version__ APP_AUTHOR = __author__ JSON_AS_ASCII", "APP_VERSION = __version__ APP_AUTHOR = __author__ JSON_AS_ASCII = False SECRET_KEY", "= 'webpack/manifest.json' BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE = 'utc' EMAIL_DEFAULT_FROM", "= 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60", "EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN =", "WTF_CSRF_ENABLED = False # Allows form testing. EMAIL_BACKEND = 'flask_emails.backends.DummyBackend'", "= False CACHE_TYPE = 'simple' # Can be \"memcached\", \"redis\",", "os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED = False # Disable Debug toolbar. class", "XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS =", "__version__ class Config(object): \"\"\"Base configuration.\"\"\" SERVER_NAME = os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME", "4 WTF_CSRF_ENABLED = False # Allows form testing. EMAIL_BACKEND =", "TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory. PROJECT_ROOT", "SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True CACHE_TYPE = 'simple' #", "'dev.db' # Put the db file in project root DB_PATH", "= 'simple' # Can be \"memcached\", \"redis\", etc. class TestConfig(Config):", "False CACHE_TYPE = 'simple' # Can be \"memcached\", \"redis\", etc.", "'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True CACHE_TYPE = 'simple' # Can be", "__author__, __name__, __version__ class Config(object): \"\"\"Base configuration.\"\"\" SERVER_NAME = os.environ.get('SERVER_NAME',", "EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT =", "2 XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7 class", "DB_NAME = 'dev.db' # Put the db file in project", "__name__ APP_VERSION = __version__ APP_AUTHOR = __author__ JSON_AS_ASCII = False", "= False # Disable Debug toolbar. class DevConfig(Config): \"\"\"Development configuration.\"\"\"", "__author__ JSON_AS_ASCII = False SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO:", "= os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__))", "coding: utf-8 -*- \"\"\"Application configuration.\"\"\" from __future__ import absolute_import, division,", "True DB_NAME = 'dev.db' # Put the db file in", "13 DEBUG_TB_ENABLED = False # Disable Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS =", "True CACHE_TYPE = 'simple' # Can be \"memcached\", \"redis\", etc.", "from . import __author__, __name__, __version__ class Config(object): \"\"\"Base configuration.\"\"\"", "\"redis\", etc. class TestConfig(Config): \"\"\"Test configuration.\"\"\" TESTING = True DEBUG", "'postgresql://localhost/example') DEBUG_TB_ENABLED = False # Disable Debug toolbar. class DevConfig(Config):", "= int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME", "Can be \"memcached\", \"redis\", etc. class TestConfig(Config): \"\"\"Test configuration.\"\"\" TESTING", "class DevConfig(Config): \"\"\"Development configuration.\"\"\" ENV = 'dev' DEBUG = True", "'25')) EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS =", "APP_AUTHOR = __author__ JSON_AS_ASCII = False SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key')", "= True SQLALCHEMY_DATABASE_URI = 'sqlite://' # For faster tests; needs", "'sv') BABEL_DEFAULT_TIMEZONE = 'utc' EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST =", "# Can be \"memcached\", \"redis\", etc. class TestConfig(Config): \"\"\"Test configuration.\"\"\"", "\"memcached\", \"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' BABEL_DEFAULT_LOCALE", "APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory. PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))", "\"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' BABEL_DEFAULT_LOCALE =", "class ProdConfig(Config): \"\"\"Production configuration.\"\"\" ENV = 'prod' DEBUG = False", "to avoid \"ValueError: Invalid rounds\". BCRYPT_LOG_ROUNDS = 4 WTF_CSRF_ENABLED =", "int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME =", "project root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED", "'simple' # Can be \"memcached\", \"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS = False", "in project root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)", "= True DB_NAME = 'dev.db' # Put the db file", "__version__ APP_AUTHOR = __author__ JSON_AS_ASCII = False SECRET_KEY = os.environ.get('XL_AUTH_SECRET',", "OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 *", "etc. class TestConfig(Config): \"\"\"Test configuration.\"\"\" TESTING = True DEBUG =", "absolute_import, division, print_function, unicode_literals import os from . import __author__,", "# -*- coding: utf-8 -*- \"\"\"Application configuration.\"\"\" from __future__ import", "= 7 class ProdConfig(Config): \"\"\"Production configuration.\"\"\" ENV = 'prod' DEBUG", "This directory. PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED", "EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT =", "= True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' # For", "os.pardir)) BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED = False # Disable Debug", "Debug toolbar. class DevConfig(Config): \"\"\"Development configuration.\"\"\" ENV = 'dev' DEBUG", "least 4 to avoid \"ValueError: Invalid rounds\". BCRYPT_LOG_ROUNDS = 4", "'sqlite://' # For faster tests; needs at least 4 to", "import absolute_import, division, print_function, unicode_literals import os from . import", "= __name__ APP_VERSION = __version__ APP_AUTHOR = __author__ JSON_AS_ASCII =", "SERVER_NAME = os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME =", "DevConfig(Config): \"\"\"Development configuration.\"\"\" ENV = 'dev' DEBUG = True DB_NAME", "os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True CACHE_TYPE =", "\"memcached\", \"redis\", etc. class TestConfig(Config): \"\"\"Test configuration.\"\"\" TESTING = True", "\"\"\"Test configuration.\"\"\" TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI =", "needs at least 4 to avoid \"ValueError: Invalid rounds\". BCRYPT_LOG_ROUNDS", "DEBUG_TB_ENABLED = True CACHE_TYPE = 'simple' # Can be \"memcached\",", "configuration.\"\"\" SERVER_NAME = os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME", "= os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE = 'utc' EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>')", "False SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED = False # Disable", "'http') APP_NAME = __name__ APP_VERSION = __version__ APP_AUTHOR = __author__", "os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) #", "= __version__ APP_AUTHOR = __author__ JSON_AS_ASCII = False SECRET_KEY =", "ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example')", "os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE = 'utc' EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST", "print_function, unicode_literals import os from . import __author__, __name__, __version__", "= 'utc' EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se')", "toolbar. class DevConfig(Config): \"\"\"Development configuration.\"\"\" ENV = 'dev' DEBUG =", "DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True", "EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2", "db file in project root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI", "= os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED = False # Disable Debug toolbar.", "TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' #", "configuration.\"\"\" from __future__ import absolute_import, division, print_function, unicode_literals import os", "= 'dev.db' # Put the db file in project root", "= False SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me", "\"\"\"Development configuration.\"\"\" ENV = 'dev' DEBUG = True DB_NAME =", "Invalid rounds\". BCRYPT_LOG_ROUNDS = 4 WTF_CSRF_ENABLED = False # Allows", "division, print_function, unicode_literals import os from . import __author__, __name__,", "Disable Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' #", "# Disable Debug toolbar. class DevConfig(Config): \"\"\"Development configuration.\"\"\" ENV =", "'simple' # Can be \"memcached\", \"redis\", etc. class TestConfig(Config): \"\"\"Test", "Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' # Can", "= 'simple' # Can be \"memcached\", \"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS =", ". import __author__, __name__, __version__ class Config(object): \"\"\"Base configuration.\"\"\" SERVER_NAME", "* 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7 class ProdConfig(Config): \"\"\"Production configuration.\"\"\" ENV", "rounds\". BCRYPT_LOG_ROUNDS = 4 WTF_CSRF_ENABLED = False # Allows form", "'secret-key') # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This", "= os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED = False #", "DEBUG_TB_ENABLED = False # Disable Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS = False", "be \"memcached\", \"redis\", etc. class TestConfig(Config): \"\"\"Test configuration.\"\"\" TESTING =", "# This directory. PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13", "= True CACHE_TYPE = 'simple' # Can be \"memcached\", \"redis\",", "36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2 XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS", "= 4 WTF_CSRF_ENABLED = False # Allows form testing. EMAIL_BACKEND", "DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' # Can be \"memcached\",", "avoid \"ValueError: Invalid rounds\". BCRYPT_LOG_ROUNDS = 4 WTF_CSRF_ENABLED = False", "= os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME = __name__ APP_VERSION = __version__ APP_AUTHOR", "TestConfig(Config): \"\"\"Test configuration.\"\"\" TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI", "False SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me APP_DIR", "directory. PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED =", "at least 4 to avoid \"ValueError: Invalid rounds\". BCRYPT_LOG_ROUNDS =", "'webpack/manifest.json' BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE = 'utc' EMAIL_DEFAULT_FROM =", "= False # Disable Debug toolbar. DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE", "be \"memcached\", \"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json'", "= os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT',", "= 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED", "DEBUG = False SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED = False", "root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED =", "'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED =", "= False SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example') DEBUG_TB_ENABLED = False #", "import __author__, __name__, __version__ class Config(object): \"\"\"Base configuration.\"\"\" SERVER_NAME =", "60 XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7 class ProdConfig(Config): \"\"\"Production configuration.\"\"\" ENV =", "ProdConfig(Config): \"\"\"Production configuration.\"\"\" ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI", "WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE = 'utc'", "BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED = False # Disable Debug toolbar.", "os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME = __name__ APP_VERSION", "faster tests; needs at least 4 to avoid \"ValueError: Invalid", "'utc' EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT", "int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000 XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS", "CACHE_TYPE = 'simple' # Can be \"memcached\", \"redis\", etc. class", "4 to avoid \"ValueError: Invalid rounds\". BCRYPT_LOG_ROUNDS = 4 WTF_CSRF_ENABLED", "DEBUG = True DB_NAME = 'dev.db' # Put the db", "'<EMAIL>') EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT", "Put the db file in project root DB_PATH = os.path.join(Config.PROJECT_ROOT,", "= False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE", "Disable Debug toolbar. class DevConfig(Config): \"\"\"Development configuration.\"\"\" ENV = 'dev'", "CACHE_TYPE = 'simple' # Can be \"memcached\", \"redis\", etc. SQLALCHEMY_TRACK_MODIFICATIONS", "SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv')", "'smtp.kb.se') EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25')) EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5')) OAUTH2_PROVIDER_TOKEN_EXPIRES_IN", "configuration.\"\"\" ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI',", "tests; needs at least 4 to avoid \"ValueError: Invalid rounds\".", "Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory. PROJECT_ROOT =", "JSON_AS_ASCII = False SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change", "configuration.\"\"\" ENV = 'dev' DEBUG = True DB_NAME = 'dev.db'", "\"\"\"Base configuration.\"\"\" SERVER_NAME = os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http')", "7 class ProdConfig(Config): \"\"\"Production configuration.\"\"\" ENV = 'prod' DEBUG =", "\"\"\"Application configuration.\"\"\" from __future__ import absolute_import, division, print_function, unicode_literals import", "-*- \"\"\"Application configuration.\"\"\" from __future__ import absolute_import, division, print_function, unicode_literals", "None) PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME = __name__ APP_VERSION =", "XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7 class ProdConfig(Config): \"\"\"Production configuration.\"\"\" ENV = 'prod'", "configuration.\"\"\" TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://'", "SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me APP_DIR =", "= os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>') EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se') EMAIL_PORT = int(os.environ.get('EMAIL_PORT',", "= 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True CACHE_TYPE = 'simple' # Can", "BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv') BABEL_DEFAULT_TIMEZONE = 'utc' EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM',", "= os.environ.get('SERVER_NAME', None) PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http') APP_NAME = __name__" ]
[ "def test_pairing_with_existing_credentials(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await pyatv.pair(self.conf, Protocol.MRP,", "= AppleTV(\"127.0.0.1\", \"Apple TV\") self.conf.add_service(self.service) async def tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close):", "return self.fake_atv.app @unittest_run_loop async def test_pairing_with_device(self): self.handle = await pyatv.pair(self.conf,", "PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device import FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase): def", "test_pairing_with_device(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await", "from pyatv.conf import MrpService, AppleTV from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER,", "pyatv from pyatv import exceptions from pyatv.const import Protocol from", "self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_bad_pin(self):", "AioHTTPTestCase.setUp(self) self.service = MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) ) self.conf = AppleTV(\"127.0.0.1\",", "await super().tearDownAsync() async def get_application(self, loop=None): self.fake_atv = FakeAppleTV(self.loop) self.state,", "self.fake_atv.get_port(Protocol.MRP) ) self.conf = AppleTV(\"127.0.0.1\", \"Apple TV\") self.conf.add_service(self.service) async def", "self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_existing_credentials(self):", "self.conf.add_service(self.service) async def tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close): await self.handle.close() else: self.handle.close()", "self.fake_atv.app @unittest_run_loop async def test_pairing_with_device(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP,", "fake MRP Apple TV.\"\"\" import inspect from aiohttp.test_utils import AioHTTPTestCase,", "from tests.fake_device import FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self): AioHTTPTestCase.setUp(self) self.service", "MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) ) self.conf = AppleTV(\"127.0.0.1\", \"Apple TV\") self.conf.add_service(self.service)", "MRP Apple TV.\"\"\" import inspect from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop", "AioHTTPTestCase, unittest_run_loop import pyatv from pyatv import exceptions from pyatv.const", "self.assertRaises(exceptions.PairingError): await self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop async def test_authentication(self):", "self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE + 1) with self.assertRaises(exceptions.PairingError): await", "self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await", "@unittest_run_loop async def test_pairing_with_device(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)", "self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_bad_pin(self): self.handle = await", "import exceptions from pyatv.const import Protocol from pyatv.conf import MrpService,", "= CLIENT_CREDENTIALS self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials)", "from pyatv import exceptions from pyatv.const import Protocol from pyatv.conf", "def tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close): await self.handle.close() else: self.handle.close() await super().tearDownAsync()", "Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish()", "self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired)", "self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired)", "self.fake_atv = FakeAppleTV(self.loop) self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app @unittest_run_loop", "def test_pairing_with_device(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin)", "Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE + 1) with", "pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device import FakeAppleTV class", "FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self): AioHTTPTestCase.setUp(self) self.service = MrpService( CLIENT_IDENTIFIER,", "= FakeAppleTV(self.loop) self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app @unittest_run_loop async", "self.usecase = self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app @unittest_run_loop async def test_pairing_with_device(self): self.handle", "self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop", "import inspect from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop import pyatv from", "await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE)", "def test_authentication(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await pyatv.connect(self.conf, self.loop)", "TV.\"\"\" import inspect from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop import pyatv", "self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials)", "AppleTV(\"127.0.0.1\", \"Apple TV\") self.conf.add_service(self.service) async def tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close): await", ") self.conf = AppleTV(\"127.0.0.1\", \"Apple TV\") self.conf.add_service(self.service) async def tearDownAsync(self):", "async def test_authentication(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await pyatv.connect(self.conf,", "MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self): AioHTTPTestCase.setUp(self) self.service = MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) )", "self.handle.pin(PIN_CODE + 1) with self.assertRaises(exceptions.PairingError): await self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials)", "self.conf = AppleTV(\"127.0.0.1\", \"Apple TV\") self.conf.add_service(self.service) async def tearDownAsync(self): if", "= self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app @unittest_run_loop async def test_pairing_with_device(self): self.handle =", "Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired)", "aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop import pyatv from pyatv import exceptions", "+ 1) with self.assertRaises(exceptions.PairingError): await self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop", "pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish()", "await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE +", "authentication tests with fake MRP Apple TV.\"\"\" import inspect from", "self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app @unittest_run_loop async def test_pairing_with_device(self):", "self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_bad_pin(self): self.handle = await pyatv.pair(self.conf,", "await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await", "from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop import pyatv from pyatv import", "tests with fake MRP Apple TV.\"\"\" import inspect from aiohttp.test_utils", "self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_existing_credentials(self): self.service.credentials =", "Apple TV.\"\"\" import inspect from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop import", "AppleTV from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device import", "pyatv import exceptions from pyatv.const import Protocol from pyatv.conf import", "self.service = MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) ) self.conf = AppleTV(\"127.0.0.1\", \"Apple", "self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop async def test_authentication(self): self.service.credentials = CLIENT_CREDENTIALS self.handle", "self.assertIsNone(self.service.credentials) @unittest_run_loop async def test_authentication(self): self.service.credentials = CLIENT_CREDENTIALS self.handle =", "await self.handle.close() else: self.handle.close() await super().tearDownAsync() async def get_application(self, loop=None):", "= MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) ) self.conf = AppleTV(\"127.0.0.1\", \"Apple TV\")", "async def get_application(self, loop=None): self.fake_atv = FakeAppleTV(self.loop) self.state, self.usecase =", "async def test_pairing_with_existing_credentials(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await pyatv.pair(self.conf,", "exceptions from pyatv.const import Protocol from pyatv.conf import MrpService, AppleTV", "\"\"\"Functional authentication tests with fake MRP Apple TV.\"\"\" import inspect", "setUp(self): AioHTTPTestCase.setUp(self) self.service = MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) ) self.conf =", "= await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE)", "else: self.handle.close() await super().tearDownAsync() async def get_application(self, loop=None): self.fake_atv =", "def get_application(self, loop=None): self.fake_atv = FakeAppleTV(self.loop) self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP)", "await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_bad_pin(self): self.handle", "def test_pairing_with_bad_pin(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin)", "pyatv.const import Protocol from pyatv.conf import MrpService, AppleTV from pyatv.mrp.server_auth", "self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_existing_credentials(self): self.service.credentials = CLIENT_CREDENTIALS", "unittest_run_loop import pyatv from pyatv import exceptions from pyatv.const import", "self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_bad_pin(self): self.handle =", "await self.handle.begin() self.handle.pin(PIN_CODE + 1) with self.assertRaises(exceptions.PairingError): await self.handle.finish() self.assertFalse(self.handle.has_paired)", "CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) ) self.conf = AppleTV(\"127.0.0.1\", \"Apple TV\") self.conf.add_service(self.service) async", "MrpService, AppleTV from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device", "FakeAppleTV(self.loop) self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app @unittest_run_loop async def", "self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop async def test_authentication(self): self.service.credentials =", "inspect from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop import pyatv from pyatv", "async def test_pairing_with_device(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials)", "pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await", "= await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE", "tests.fake_device import FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self): AioHTTPTestCase.setUp(self) self.service =", "inspect.iscoroutinefunction(self.handle.close): await self.handle.close() else: self.handle.close() await super().tearDownAsync() async def get_application(self,", "self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE + 1) with self.assertRaises(exceptions.PairingError): await self.handle.finish()", "self.handle.close() await super().tearDownAsync() async def get_application(self, loop=None): self.fake_atv = FakeAppleTV(self.loop)", "self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop async def test_authentication(self): self.service.credentials = CLIENT_CREDENTIALS", "self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def", "import AioHTTPTestCase, unittest_run_loop import pyatv from pyatv import exceptions from", "await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_existing_credentials(self): self.service.credentials", "get_application(self, loop=None): self.fake_atv = FakeAppleTV(self.loop) self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP) return", "self.handle.begin() self.handle.pin(PIN_CODE + 1) with self.assertRaises(exceptions.PairingError): await self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired)", "CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device import FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self):", "1) with self.assertRaises(exceptions.PairingError): await self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop async", "@unittest_run_loop async def test_authentication(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await", "self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_existing_credentials(self): self.service.credentials = CLIENT_CREDENTIALS self.handle", "super().tearDownAsync() async def get_application(self, loop=None): self.fake_atv = FakeAppleTV(self.loop) self.state, self.usecase", "with self.assertRaises(exceptions.PairingError): await self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop async def", "loop=None): self.fake_atv = FakeAppleTV(self.loop) self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app", "await self.handle.finish() self.assertFalse(self.handle.has_paired) self.assertFalse(self.state.has_paired) self.assertIsNone(self.service.credentials) @unittest_run_loop async def test_authentication(self): self.service.credentials", "@unittest_run_loop async def test_pairing_with_existing_credentials(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await", "Protocol from pyatv.conf import MrpService, AppleTV from pyatv.mrp.server_auth import PIN_CODE,", "if inspect.iscoroutinefunction(self.handle.close): await self.handle.close() else: self.handle.close() await super().tearDownAsync() async def", "self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired)", "import MrpService, AppleTV from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from", "self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials)", "async def test_pairing_with_bad_pin(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials)", "import FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self): AioHTTPTestCase.setUp(self) self.service = MrpService(", "import pyatv from pyatv import exceptions from pyatv.const import Protocol", "from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device import FakeAppleTV", "self.handle.close() else: self.handle.close() await super().tearDownAsync() async def get_application(self, loop=None): self.fake_atv", "test_pairing_with_bad_pin(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await", "tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close): await self.handle.close() else: self.handle.close() await super().tearDownAsync() async", "import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device import FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase):", "self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_existing_credentials(self): self.service.credentials = CLIENT_CREDENTIALS self.handle =", "test_pairing_with_existing_credentials(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)", "@unittest_run_loop async def test_pairing_with_bad_pin(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)", "self.service.credentials = CLIENT_CREDENTIALS self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired)", "\"Apple TV\") self.conf.add_service(self.service) async def tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close): await self.handle.close()", "def setUp(self): AioHTTPTestCase.setUp(self) self.service = MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP) ) self.conf", "pyatv.conf import MrpService, AppleTV from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS", "async def tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close): await self.handle.close() else: self.handle.close() await", "from pyatv.const import Protocol from pyatv.conf import MrpService, AppleTV from", "self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin()", "pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE + 1)", "= await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin()", "with fake MRP Apple TV.\"\"\" import inspect from aiohttp.test_utils import", "CLIENT_CREDENTIALS from tests.fake_device import FakeAppleTV class MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self): AioHTTPTestCase.setUp(self)", "import Protocol from pyatv.conf import MrpService, AppleTV from pyatv.mrp.server_auth import", "self.loop) self.assertIsNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin) await self.handle.begin() self.handle.pin(PIN_CODE + 1) with self.assertRaises(exceptions.PairingError):", "<gh_stars>0 \"\"\"Functional authentication tests with fake MRP Apple TV.\"\"\" import", "self.assertIsNotNone(self.service.credentials) @unittest_run_loop async def test_pairing_with_bad_pin(self): self.handle = await pyatv.pair(self.conf, Protocol.MRP,", "class MrpAuthFunctionalTest(AioHTTPTestCase): def setUp(self): AioHTTPTestCase.setUp(self) self.service = MrpService( CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP)", "TV\") self.conf.add_service(self.service) async def tearDownAsync(self): if inspect.iscoroutinefunction(self.handle.close): await self.handle.close() else:", "self.fake_atv.add_service(Protocol.MRP) return self.fake_atv.app @unittest_run_loop async def test_pairing_with_device(self): self.handle = await", "CLIENT_CREDENTIALS self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop) self.assertFalse(self.handle.has_paired) self.assertIsNotNone(self.service.credentials) self.assertTrue(self.handle.device_provides_pin)", "test_authentication(self): self.service.credentials = CLIENT_CREDENTIALS self.handle = await pyatv.connect(self.conf, self.loop) self.assertTrue(self.state.has_authenticated)", "await self.handle.begin() self.handle.pin(PIN_CODE) await self.handle.finish() self.assertTrue(self.handle.has_paired) self.assertTrue(self.state.has_paired) self.assertIsNotNone(self.service.credentials) @unittest_run_loop async" ]
[ "app_label = 'tests_app' class KeyConstructorUserModel(models.Model): property = models.ForeignKey(KeyConstructorUserProperty) class Meta:", "django.db import models class KeyConstructorUserProperty(models.Model): name = models.CharField(max_length=100) class Meta:", "'tests_app' class KeyConstructorUserModel(models.Model): property = models.ForeignKey(KeyConstructorUserProperty) class Meta: app_label =", "models.CharField(max_length=100) class Meta: app_label = 'tests_app' class KeyConstructorUserModel(models.Model): property =", "KeyConstructorUserProperty(models.Model): name = models.CharField(max_length=100) class Meta: app_label = 'tests_app' class", "class KeyConstructorUserProperty(models.Model): name = models.CharField(max_length=100) class Meta: app_label = 'tests_app'", "Meta: app_label = 'tests_app' class KeyConstructorUserModel(models.Model): property = models.ForeignKey(KeyConstructorUserProperty) class", "name = models.CharField(max_length=100) class Meta: app_label = 'tests_app' class KeyConstructorUserModel(models.Model):", "# -*- coding: utf-8 -*- from django.db import models class", "-*- from django.db import models class KeyConstructorUserProperty(models.Model): name = models.CharField(max_length=100)", "-*- coding: utf-8 -*- from django.db import models class KeyConstructorUserProperty(models.Model):", "from django.db import models class KeyConstructorUserProperty(models.Model): name = models.CharField(max_length=100) class", "class Meta: app_label = 'tests_app' class KeyConstructorUserModel(models.Model): property = models.ForeignKey(KeyConstructorUserProperty)", "= models.CharField(max_length=100) class Meta: app_label = 'tests_app' class KeyConstructorUserModel(models.Model): property", "utf-8 -*- from django.db import models class KeyConstructorUserProperty(models.Model): name =", "class KeyConstructorUserModel(models.Model): property = models.ForeignKey(KeyConstructorUserProperty) class Meta: app_label = 'tests_app'", "models class KeyConstructorUserProperty(models.Model): name = models.CharField(max_length=100) class Meta: app_label =", "= 'tests_app' class KeyConstructorUserModel(models.Model): property = models.ForeignKey(KeyConstructorUserProperty) class Meta: app_label", "import models class KeyConstructorUserProperty(models.Model): name = models.CharField(max_length=100) class Meta: app_label", "coding: utf-8 -*- from django.db import models class KeyConstructorUserProperty(models.Model): name" ]
[ "# type: (np.dtype) -> Union[int, float] \"\"\"Return minimum value that", "type we want to check limits for. :return: The specialized", "type: () -> float \"\"\"Provide maximum representable value by stored", "2.0 (the \"License\"); # you may not use this file", "The data type we want to check maximum value for.", "we want to check limits for. :return: The specialized class", "from __future__ import division from __future__ import print_function from __future__", "absolute_import from __future__ import division from __future__ import print_function from", "****************************************************************************** from __future__ import absolute_import from __future__ import division from", "representable value by stored data type. :return: The maximum value.", "for. :return: The minimum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object):", "stored data type. :return: The maximum value. \"\"\" return np.iinfo(self.data_type).max", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Intel Corporation # # Licensed under the Apache License, Version", "np.iinfo(self.data_type).max @property def min(self): # type: () -> int \"\"\"Provide", "want to check limits for. :return: The specialized class instance", "check minimum value for. :return: The minimum value. \"\"\" return", "\"\"\"Class providing access to numeric limits for floating point data", "data type. :return: The maximum value. \"\"\" return np.iinfo(self.data_type).max @property", "to extract numerical limits for given data type.\"\"\" @staticmethod def", "return np.finfo(self.data_type).max @property def min(self): # type: () -> float", "= data_type(1) if isinstance(value, numbers.Integral): return IntegralLimits(data_type) elif isinstance(value, numbers.Real):", "to be wrapped. :return: The numpy dtype object. \"\"\" return", "The minimum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object): \"\"\"Class providing", "else: raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type)) @staticmethod def _get_dtype(dtype):", "data type to be wrapped. :return: The numpy dtype object.", "The specialized class instance providing numeric limits. \"\"\" data_type =", "use this file except in compliance with the License. #", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "IntegralLimits(data_type) elif isinstance(value, numbers.Real): return FloatingPointLimits(data_type) else: raise ValueError('NumericLimits: unsupported", "License. # You may obtain a copy of the License", "for given data type.\"\"\" @staticmethod def _get_number_limits_class(dtype): # type: (np.dtype)", "\"\"\"Provide maximum representable value by stored data type. :return: The", "-> Union[IntegralLimits, FloatingPointLimits] \"\"\"Return specialized class instance with limits set", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "providing interface to extract numerical limits for given data type.\"\"\"", "access to numeric limits for floating point data types.\"\"\" def", "return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def min(cls, dtype): # type: (np.dtype) ->", "data type.\"\"\" @staticmethod def _get_number_limits_class(dtype): # type: (np.dtype) -> Union[IntegralLimits,", "dtype object wrapping provided data type. :param dtype: The data", "Union class NumericLimits(object): \"\"\"Class providing interface to extract numerical limits", "unicode_literals import numpy as np import numbers from typing import", "type.\"\"\" @staticmethod def _get_number_limits_class(dtype): # type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits]", "in given data type. :param dtype: The data type we", "-> np.dtype \"\"\"Return numpy dtype object wrapping provided data type.", "type: () -> int \"\"\"Provide maximum representable value by stored", "from __future__ import absolute_import from __future__ import division from __future__", "The numpy dtype object. \"\"\" return dtype if isinstance(dtype, np.dtype)", "in compliance with the License. # You may obtain a", "be wrapped. :return: The numpy dtype object. \"\"\" return dtype", "ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type)) @staticmethod def _get_dtype(dtype): # type:", "software # distributed under the License is distributed on an", "_get_number_limits_class(dtype): # type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits] \"\"\"Return specialized class", "****************************************************************************** # Copyright 2018 Intel Corporation # # Licensed under", "data_type(1) if isinstance(value, numbers.Integral): return IntegralLimits(data_type) elif isinstance(value, numbers.Real): return", "def min(self): # type: () -> int \"\"\"Provide minimum representable", "given data type.\"\"\" @staticmethod def _get_number_limits_class(dtype): # type: (np.dtype) ->", "for given data type. :param dtype: The data type we", ":return: The maximum value. \"\"\" return np.finfo(self.data_type).max @property def min(self):", "minimum value. \"\"\" return np.finfo(self.data_type).min class IntegralLimits(object): \"\"\"Class providing access", "minimum value for. :return: The minimum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min", "\"\"\" return np.iinfo(self.data_type).max @property def min(self): # type: () ->", "return np.iinfo(self.data_type).max @property def min(self): # type: () -> int", "with limits set for given data type. :param dtype: The", "data_type @property def max(self): # type: () -> float \"\"\"Provide", "division from __future__ import print_function from __future__ import unicode_literals import", "dtype: The data type we want to check maximum value", "limits for floating point data types.\"\"\" def __init__(self, data_type): #", "value by stored data type. :return: The maximum value. \"\"\"", "<{}>.'.format(dtype.type)) @staticmethod def _get_dtype(dtype): # type: (Union[np.dtype, int, float]) ->", "import numbers from typing import Union class NumericLimits(object): \"\"\"Class providing", "\"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def min(cls, dtype): # type: (np.dtype)", "type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits] \"\"\"Return specialized class instance with", "by stored data type. :return: The maximum value. \"\"\" return", "typing import Union class NumericLimits(object): \"\"\"Class providing interface to extract", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "def min(self): # type: () -> float \"\"\"Provide minimum representable", "object wrapping provided data type. :param dtype: The data type", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", ":return: The numpy dtype object. \"\"\" return dtype if isinstance(dtype,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "type: (np.dtype) -> Union[int, float] \"\"\"Return minimum value that can", "to in writing, software # distributed under the License is", "set for given data type. :param dtype: The data type", "# See the License for the specific language governing permissions", "dtype.type value = data_type(1) if isinstance(value, numbers.Integral): return IntegralLimits(data_type) elif", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "value by stored data type. :return: The minimum value. \"\"\"", "max(self): # type: () -> float \"\"\"Provide maximum representable value", "required by applicable law or agreed to in writing, software", "@classmethod def max(cls, dtype): # type: (np.dtype) -> Union[int, float]", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "@property def min(self): # type: () -> int \"\"\"Provide minimum", "integral data types.\"\"\" def __init__(self, data_type): # type: (type) ->", "to check maximum value for. :return: The maximum value. \"\"\"", "we want to check minimum value for. :return: The minimum", "() -> int \"\"\"Provide maximum representable value by stored data", "import Union class NumericLimits(object): \"\"\"Class providing interface to extract numerical", "data_type): # type: (type) -> None self.data_type = data_type @property", "providing access to numeric limits for integral data types.\"\"\" def", "np.dtype \"\"\"Return numpy dtype object wrapping provided data type. :param", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "from __future__ import print_function from __future__ import unicode_literals import numpy", "governing permissions and # limitations under the License. # ******************************************************************************", "that can be represented in given data type. :param dtype:", "from typing import Union class NumericLimits(object): \"\"\"Class providing interface to", "distributed under the License is distributed on an \"AS IS\"", "numbers.Integral): return IntegralLimits(data_type) elif isinstance(value, numbers.Real): return FloatingPointLimits(data_type) else: raise", "-> float \"\"\"Provide maximum representable value by stored data type.", "# type: () -> float \"\"\"Provide minimum representable value by", "instance providing numeric limits. \"\"\" data_type = dtype.type value =", "type: () -> float \"\"\"Provide minimum representable value by stored", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "\"\"\"Return specialized class instance with limits set for given data", "writing, software # distributed under the License is distributed on", "type we want to check minimum value for. :return: The", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "value for. :return: The maximum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod", "want to check maximum value for. :return: The maximum value.", "data type we want to check minimum value for. :return:", "CONDITIONS OF ANY KIND, either express or implied. # See", "import numpy as np import numbers from typing import Union", "limits. \"\"\" data_type = dtype.type value = data_type(1) if isinstance(value,", "def max(self): # type: () -> float \"\"\"Provide maximum representable", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "dtype): # type: (np.dtype) -> Union[int, float] \"\"\"Return maximum value", "The maximum value. \"\"\" return np.finfo(self.data_type).max @property def min(self): #", "-> float \"\"\"Provide minimum representable value by stored data type.", "class FloatingPointLimits(object): \"\"\"Class providing access to numeric limits for floating", "# type: () -> float \"\"\"Provide maximum representable value by", ":param dtype: The data type we want to check maximum", "cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object): \"\"\"Class providing access to numeric limits for", "(np.dtype) -> Union[int, float] \"\"\"Return maximum value that can be", "The minimum value. \"\"\" return np.finfo(self.data_type).min class IntegralLimits(object): \"\"\"Class providing", "def max(self): # type: () -> int \"\"\"Provide maximum representable", "NumericLimits(object): \"\"\"Class providing interface to extract numerical limits for given", "@staticmethod def _get_number_limits_class(dtype): # type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits] \"\"\"Return", "class NumericLimits(object): \"\"\"Class providing interface to extract numerical limits for", "isinstance(value, numbers.Integral): return IntegralLimits(data_type) elif isinstance(value, numbers.Real): return FloatingPointLimits(data_type) else:", "The data type we want to check minimum value for.", "under the License. # ****************************************************************************** from __future__ import absolute_import from", ":param dtype: The data type we want to check limits", "OR CONDITIONS OF ANY KIND, either express or implied. #", "The data type we want to check limits for. :return:", "the License is distributed on an \"AS IS\" BASIS, #", "int \"\"\"Provide maximum representable value by stored data type. :return:", "stored data type. :return: The minimum value. \"\"\" return np.finfo(self.data_type).min", "License. # ****************************************************************************** from __future__ import absolute_import from __future__ import", "numpy dtype object. \"\"\" return dtype if isinstance(dtype, np.dtype) else", "data type. :param dtype: The data type to be wrapped.", "FloatingPointLimits(data_type) else: raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type)) @staticmethod def", "dtype object. \"\"\" return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype)", "type: <{}>.'.format(dtype.type)) @staticmethod def _get_dtype(dtype): # type: (Union[np.dtype, int, float])", "@classmethod def min(cls, dtype): # type: (np.dtype) -> Union[int, float]", "np.finfo(self.data_type).min class IntegralLimits(object): \"\"\"Class providing access to numeric limits for", "law or agreed to in writing, software # distributed under", "type to be wrapped. :return: The numpy dtype object. \"\"\"", "limits set for given data type. :param dtype: The data", "value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object): \"\"\"Class providing access to", "Union[int, float] \"\"\"Return minimum value that can be represented in", "import division from __future__ import print_function from __future__ import unicode_literals", "(Union[np.dtype, int, float]) -> np.dtype \"\"\"Return numpy dtype object wrapping", "for floating point data types.\"\"\" def __init__(self, data_type): # type:", "\"\"\" return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype) @classmethod def", "def min(cls, dtype): # type: (np.dtype) -> Union[int, float] \"\"\"Return", "2018 Intel Corporation # # Licensed under the Apache License,", "isinstance(dtype, np.dtype) else np.dtype(dtype) @classmethod def max(cls, dtype): # type:", "class instance providing numeric limits. \"\"\" data_type = dtype.type value", "may obtain a copy of the License at # #", "minimum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object): \"\"\"Class providing access", "maximum value for. :return: The maximum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "floating point data types.\"\"\" def __init__(self, data_type): # type: (type)", "np.finfo(self.data_type).max @property def min(self): # type: () -> float \"\"\"Provide", "FloatingPointLimits(object): \"\"\"Class providing access to numeric limits for floating point", "may not use this file except in compliance with the", "value. \"\"\" return np.finfo(self.data_type).max @property def min(self): # type: ()", "Union[IntegralLimits, FloatingPointLimits] \"\"\"Return specialized class instance with limits set for", "-> Union[int, float] \"\"\"Return maximum value that can be represented", "@property def min(self): # type: () -> float \"\"\"Provide minimum", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# ****************************************************************************** from __future__ import absolute_import from __future__ import division", "return FloatingPointLimits(data_type) else: raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type)) @staticmethod", "this file except in compliance with the License. # You", "maximum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def min(cls, dtype): #", "stored data type. :return: The minimum value. \"\"\" return np.iinfo(self.data_type).min", "import absolute_import from __future__ import division from __future__ import print_function", ":return: The maximum value. \"\"\" return np.iinfo(self.data_type).max @property def min(self):", "type. :param dtype: The data type we want to check", "type: () -> int \"\"\"Provide minimum representable value by stored", "representable value by stored data type. :return: The minimum value.", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "elif isinstance(value, numbers.Real): return FloatingPointLimits(data_type) else: raise ValueError('NumericLimits: unsupported data", ":param dtype: The data type we want to check minimum", "# # Licensed under the Apache License, Version 2.0 (the", "data type we want to check maximum value for. :return:", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "extract numerical limits for given data type.\"\"\" @staticmethod def _get_number_limits_class(dtype):", "check maximum value for. :return: The maximum value. \"\"\" return", "# Copyright 2018 Intel Corporation # # Licensed under the", "numeric limits. \"\"\" data_type = dtype.type value = data_type(1) if", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "limitations under the License. # ****************************************************************************** from __future__ import absolute_import", "float] \"\"\"Return maximum value that can be represented in given", "data_type = dtype.type value = data_type(1) if isinstance(value, numbers.Integral): return", "minimum value that can be represented in given data type.", "import print_function from __future__ import unicode_literals import numpy as np", "access to numeric limits for integral data types.\"\"\" def __init__(self,", "= dtype.type value = data_type(1) if isinstance(value, numbers.Integral): return IntegralLimits(data_type)", "maximum value. \"\"\" return np.finfo(self.data_type).max @property def min(self): # type:", "can be represented in given data type. :param dtype: The", "value for. :return: The minimum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min class", "float] \"\"\"Return minimum value that can be represented in given", "(type) -> None self.data_type = data_type @property def max(self): #", "__future__ import unicode_literals import numpy as np import numbers from", "__init__(self, data_type): # type: (type) -> None self.data_type = data_type", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "if isinstance(dtype, np.dtype) else np.dtype(dtype) @classmethod def max(cls, dtype): #", "_get_dtype(dtype): # type: (Union[np.dtype, int, float]) -> np.dtype \"\"\"Return numpy", "or implied. # See the License for the specific language", "maximum value that can be represented in given data type.", "__future__ import division from __future__ import print_function from __future__ import", "want to check minimum value for. :return: The minimum value.", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "data type we want to check limits for. :return: The", "# type: () -> int \"\"\"Provide minimum representable value by", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "object. \"\"\" return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype) @classmethod", "from __future__ import unicode_literals import numpy as np import numbers", "min(cls, dtype): # type: (np.dtype) -> Union[int, float] \"\"\"Return minimum", "cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def min(cls, dtype): # type: (np.dtype) -> Union[int,", "be represented in given data type. :param dtype: The data", "data type: <{}>.'.format(dtype.type)) @staticmethod def _get_dtype(dtype): # type: (Union[np.dtype, int,", ":return: The minimum value. \"\"\" return np.finfo(self.data_type).min class IntegralLimits(object): \"\"\"Class", "value. \"\"\" return np.finfo(self.data_type).min class IntegralLimits(object): \"\"\"Class providing access to", "to numeric limits for floating point data types.\"\"\" def __init__(self,", "def _get_dtype(dtype): # type: (Union[np.dtype, int, float]) -> np.dtype \"\"\"Return", "as np import numbers from typing import Union class NumericLimits(object):", "(the \"License\"); # you may not use this file except", "class instance with limits set for given data type. :param", "# you may not use this file except in compliance", "data type. :return: The minimum value. \"\"\" return np.finfo(self.data_type).min class", "type. :return: The maximum value. \"\"\" return np.finfo(self.data_type).max @property def", "dtype: The data type we want to check limits for.", ":return: The specialized class instance providing numeric limits. \"\"\" data_type", "Union[int, float] \"\"\"Return maximum value that can be represented in", "def __init__(self, data_type): # type: (type) -> None self.data_type =", "# type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits] \"\"\"Return specialized class instance", "np.dtype) else np.dtype(dtype) @classmethod def max(cls, dtype): # type: (np.dtype)", "None self.data_type = data_type @property def max(self): # type: ()", "value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def min(cls, dtype): # type:", "# # Unless required by applicable law or agreed to", "provided data type. :param dtype: The data type to be", "numpy as np import numbers from typing import Union class", "# type: (np.dtype) -> Union[int, float] \"\"\"Return maximum value that", "value that can be represented in given data type. :param", "# ****************************************************************************** # Copyright 2018 Intel Corporation # # Licensed", "-> None self.data_type = data_type @property def max(self): # type:", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"\"\"Class providing interface to extract numerical limits for given data", "maximum representable value by stored data type. :return: The maximum", "Version 2.0 (the \"License\"); # you may not use this", "float \"\"\"Provide maximum representable value by stored data type. :return:", "__future__ import absolute_import from __future__ import division from __future__ import", "implied. # See the License for the specific language governing", "specialized class instance with limits set for given data type.", "under the Apache License, Version 2.0 (the \"License\"); # you", "return IntegralLimits(data_type) elif isinstance(value, numbers.Real): return FloatingPointLimits(data_type) else: raise ValueError('NumericLimits:", ":return: The maximum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def min(cls,", "dtype: The data type we want to check minimum value", "numeric limits for integral data types.\"\"\" def __init__(self, data_type): #", "max(self): # type: () -> int \"\"\"Provide maximum representable value", "specialized class instance providing numeric limits. \"\"\" data_type = dtype.type", "type: (np.dtype) -> Union[int, float] \"\"\"Return maximum value that can", "by applicable law or agreed to in writing, software #", "wrapped. :return: The numpy dtype object. \"\"\" return dtype if", "numeric limits for floating point data types.\"\"\" def __init__(self, data_type):", "\"\"\" data_type = dtype.type value = data_type(1) if isinstance(value, numbers.Integral):", "\"\"\"Return minimum value that can be represented in given data", "\"\"\"Class providing access to numeric limits for integral data types.\"\"\"", "\"\"\"Return maximum value that can be represented in given data", "self.data_type = data_type @property def max(self): # type: () ->", "Copyright 2018 Intel Corporation # # Licensed under the Apache", "unsupported data type: <{}>.'.format(dtype.type)) @staticmethod def _get_dtype(dtype): # type: (Union[np.dtype,", "data type. :param dtype: The data type we want to", "return np.finfo(self.data_type).min class IntegralLimits(object): \"\"\"Class providing access to numeric limits", "int, float]) -> np.dtype \"\"\"Return numpy dtype object wrapping provided", "min(self): # type: () -> float \"\"\"Provide minimum representable value", "instance with limits set for given data type. :param dtype:", "(np.dtype) -> Union[int, float] \"\"\"Return minimum value that can be", "the License. # ****************************************************************************** from __future__ import absolute_import from __future__", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= data_type @property def max(self): # type: () -> int", "Unless required by applicable law or agreed to in writing,", "# type: (Union[np.dtype, int, float]) -> np.dtype \"\"\"Return numpy dtype", "numerical limits for given data type.\"\"\" @staticmethod def _get_number_limits_class(dtype): #", "maximum value. \"\"\" return np.iinfo(self.data_type).max @property def min(self): # type:", "the specific language governing permissions and # limitations under the", "to check limits for. :return: The specialized class instance providing", "applicable law or agreed to in writing, software # distributed", "limits for. :return: The specialized class instance providing numeric limits.", "providing access to numeric limits for floating point data types.\"\"\"", "isinstance(value, numbers.Real): return FloatingPointLimits(data_type) else: raise ValueError('NumericLimits: unsupported data type:", "raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type)) @staticmethod def _get_dtype(dtype): #", ":return: The minimum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object): \"\"\"Class", "dtype: The data type to be wrapped. :return: The numpy", "\"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object): \"\"\"Class providing access to numeric", "() -> float \"\"\"Provide minimum representable value by stored data", "limits for given data type.\"\"\" @staticmethod def _get_number_limits_class(dtype): # type:", "type. :return: The maximum value. \"\"\" return np.iinfo(self.data_type).max @property def", "in writing, software # distributed under the License is distributed", "The data type to be wrapped. :return: The numpy dtype", "we want to check maximum value for. :return: The maximum", "type we want to check maximum value for. :return: The", "dtype if isinstance(dtype, np.dtype) else np.dtype(dtype) @classmethod def max(cls, dtype):", "= data_type @property def max(self): # type: () -> float", "numpy dtype object wrapping provided data type. :param dtype: The", "and # limitations under the License. # ****************************************************************************** from __future__", "check limits for. :return: The specialized class instance providing numeric", "providing numeric limits. \"\"\" data_type = dtype.type value = data_type(1)", "() -> float \"\"\"Provide maximum representable value by stored data", "type: (type) -> None self.data_type = data_type @property def max(self):", "np import numbers from typing import Union class NumericLimits(object): \"\"\"Class", "-> Union[int, float] \"\"\"Return minimum value that can be represented", "float]) -> np.dtype \"\"\"Return numpy dtype object wrapping provided data", "max(cls, dtype): # type: (np.dtype) -> Union[int, float] \"\"\"Return maximum", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "data_type @property def max(self): # type: () -> int \"\"\"Provide", "License, Version 2.0 (the \"License\"); # you may not use", "type: (Union[np.dtype, int, float]) -> np.dtype \"\"\"Return numpy dtype object", "# You may obtain a copy of the License at", "type. :return: The minimum value. \"\"\" return np.finfo(self.data_type).min class IntegralLimits(object):", "numbers from typing import Union class NumericLimits(object): \"\"\"Class providing interface", "def _get_number_limits_class(dtype): # type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits] \"\"\"Return specialized", "int \"\"\"Provide minimum representable value by stored data type. :return:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "for. :return: The specialized class instance providing numeric limits. \"\"\"", "(np.dtype) -> Union[IntegralLimits, FloatingPointLimits] \"\"\"Return specialized class instance with limits", "@staticmethod def _get_dtype(dtype): # type: (Union[np.dtype, int, float]) -> np.dtype", "The maximum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def min(cls, dtype):", "stored data type. :return: The maximum value. \"\"\" return np.finfo(self.data_type).max", "the License for the specific language governing permissions and #", "# type: (type) -> None self.data_type = data_type @property def", "if isinstance(value, numbers.Integral): return IntegralLimits(data_type) elif isinstance(value, numbers.Real): return FloatingPointLimits(data_type)", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "import unicode_literals import numpy as np import numbers from typing", "type. :param dtype: The data type to be wrapped. :return:", "limits for integral data types.\"\"\" def __init__(self, data_type): # type:", "class IntegralLimits(object): \"\"\"Class providing access to numeric limits for integral", "min(self): # type: () -> int \"\"\"Provide minimum representable value", "for. :return: The maximum value. \"\"\" return cls._get_number_limits_class(cls._get_dtype(dtype)).max @classmethod def", "-> int \"\"\"Provide maximum representable value by stored data type.", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "return cls._get_number_limits_class(cls._get_dtype(dtype)).min class FloatingPointLimits(object): \"\"\"Class providing access to numeric limits", "interface to extract numerical limits for given data type.\"\"\" @staticmethod", "\"\"\" return np.finfo(self.data_type).min class IntegralLimits(object): \"\"\"Class providing access to numeric", "numbers.Real): return FloatingPointLimits(data_type) else: raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type))", ":param dtype: The data type to be wrapped. :return: The", "wrapping provided data type. :param dtype: The data type to", "point data types.\"\"\" def __init__(self, data_type): # type: (type) ->", "def max(cls, dtype): # type: (np.dtype) -> Union[int, float] \"\"\"Return", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "given data type. :param dtype: The data type we want", "print_function from __future__ import unicode_literals import numpy as np import", "# limitations under the License. # ****************************************************************************** from __future__ import", "@property def max(self): # type: () -> float \"\"\"Provide maximum", "to numeric limits for integral data types.\"\"\" def __init__(self, data_type):", "permissions and # limitations under the License. # ****************************************************************************** from", "float \"\"\"Provide minimum representable value by stored data type. :return:", "\"\"\" return np.finfo(self.data_type).max @property def min(self): # type: () ->", "\"License\"); # you may not use this file except in", "\"\"\"Provide minimum representable value by stored data type. :return: The", "value = data_type(1) if isinstance(value, numbers.Integral): return IntegralLimits(data_type) elif isinstance(value,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"\"\"Return numpy dtype object wrapping provided data type. :param dtype:", "() -> int \"\"\"Provide minimum representable value by stored data", "# distributed under the License is distributed on an \"AS", "else np.dtype(dtype) @classmethod def max(cls, dtype): # type: (np.dtype) ->", "@property def max(self): # type: () -> int \"\"\"Provide maximum", "by stored data type. :return: The minimum value. \"\"\" return", "represented in given data type. :param dtype: The data type", "# Unless required by applicable law or agreed to in", "value. \"\"\" return np.iinfo(self.data_type).max @property def min(self): # type: ()", "FloatingPointLimits] \"\"\"Return specialized class instance with limits set for given", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# type: () -> int \"\"\"Provide maximum representable value by", "return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype) @classmethod def max(cls,", "data types.\"\"\" def __init__(self, data_type): # type: (type) -> None", "-> int \"\"\"Provide minimum representable value by stored data type.", "You may obtain a copy of the License at #", "for integral data types.\"\"\" def __init__(self, data_type): # type: (type)", "minimum representable value by stored data type. :return: The minimum", "data type. :return: The maximum value. \"\"\" return np.finfo(self.data_type).max @property", "__future__ import print_function from __future__ import unicode_literals import numpy as", "np.dtype(dtype) @classmethod def max(cls, dtype): # type: (np.dtype) -> Union[int,", "dtype): # type: (np.dtype) -> Union[int, float] \"\"\"Return minimum value", "Corporation # # Licensed under the Apache License, Version 2.0", "types.\"\"\" def __init__(self, data_type): # type: (type) -> None self.data_type", "The maximum value. \"\"\" return np.iinfo(self.data_type).max @property def min(self): #", "the Apache License, Version 2.0 (the \"License\"); # you may", "to check minimum value for. :return: The minimum value. \"\"\"", "IntegralLimits(object): \"\"\"Class providing access to numeric limits for integral data" ]
[ "code must retain the above copyright notice, this list of", "provided that the following conditions are met: # # Redistributions", "following disclaimer. # Redistributions in binary form must reproduce the", "for a particular purpose are disclaimed. In no event shall", ": vertex connectivity FaceNormals : Outer Normal per face, having", "according to N.Max [1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] #", "- x[:, 1] * y[:, 0]] else: return np.cross(x, y)", "this software, even if advised of the possibility of such", "from this software without specific prior written permission. # #", "# # Redistribution and use in source and binary forms,", "# # Redistributions of source code must retain the above", "#Calculate weights according to N.Max [1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis]", "1] * y[:, 0]] else: return np.cross(x, y) def normr(vec):", "if max([x.shape[0], y.shape[0]]) >= 500: return np.c_[x[:, 1] * y[:,", "VertNormals : Unit normal at the vertex \"\"\" VertNormals =np.zeros(vertices.shape)", "OSI Certified is a certification mark of the Open Source", "source code must retain the above copyright notice, this list", "return np.cross(x, y) def normr(vec): \"\"\" Normalizes an array of", "tort (including negligence or otherwise) arising in any way out", "verts=faces.T[2] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals def", "np.bincount(verts, minlength=npts) reord = np.argsort(verts) tri_idx = np.unravel_index(reord, (len(tris), 3))[0]", "copyright owner or contributors be liable for any direct, indirect,", "a np array of vectors to unit vectors \"\"\" return", "implied warranties, including, but not limited to, the implied warranties", "or consequential damages (including, but not limited to, procurement of", "this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] =", "owner or contributors be liable for any direct, indirect, incidental,", "even if advised of the possibility of such damage. def", "= [list() for _ in xrange(this['np'])] # for p in", "binary forms, with or without modification, are permitted provided that", "0] - x[:, 0] * y[:, 2], x[:, 0] *", "the Open Source Initiative. # # Copyright (c) 2011-2019, authors", "wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the weights according to MWA for normals", "This software is OSI Certified Open Source Software. OSI Certified", "1] * y[:, 2] - x[:, 2] * y[:, 1],", "the copyright owner or contributors be liable for any direct,", "\"\"\" if max([x.shape[0], y.shape[0]]) >= 500: return np.c_[x[:, 1] *", "a particular purpose are disclaimed. In no event shall the", "with the distribution. # Neither the names of MNE-Python authors", "array of vectors. e.g. to convert a np array of", "1], x[:, 2] * y[:, 0] - x[:, 0] *", "cross product between list of 3D vectors Input x :", "the possibility of such damage. def triangle_neighbors(tris, npts): \"\"\"Efficiently compute", "= np.bincount(verts, minlength=npts) reord = np.argsort(verts) tri_idx = np.unravel_index(reord, (len(tris),", "that the following conditions are met: # # Redistributions of", "else: return np.cross(x, y) def normr(vec): \"\"\" Normalizes an array", "this list of conditions and the following disclaimer. # Redistributions", "conditions are met: # # Redistributions of source code must", "= [np.array(nb, int) for nb in this['neighbor_tri']] # verts =", "the following, but is faster (vectorized): # # this['neighbor_tri'] =", "interruption) however caused and on any theory of liability, whether", "Software. OSI Certified is a certification mark of the Open", "other materials provided with the distribution. # Neither the names", "the weights according to MWA for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] #", "are met: # # Redistributions of source code must retain", "np array of vectors to unit vectors \"\"\" return vec/np.sqrt((vec**2).sum(axis=1))[:,np.newaxis]", "return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT: Vertices : vertices Faces", "of source code must retain the above copyright notice, this", "The first two functions are modified from MNE surface project.", "np.cumsum(np.r_[0, counts]) # the sort below slows it down a", "names of MNE-Python authors nor the names of any contributors", "any theory of liability, whether in contract, strict liability, or", "the following conditions are met: # # Redistributions of source", "of such damage. def triangle_neighbors(tris, npts): \"\"\"Efficiently compute vertex neighboring", "sort below slows it down a bit, but is needed", "v1, v2 in zip(idx[:-1], idx[1:])]) return neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k):", "in zip(idx[:-1], idx[1:])]) return neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get vertices", "vectors Input x : Mx3 array y : Mx3 array", "faster (vectorized): # # this['neighbor_tri'] = [list() for _ in", "materials provided with the distribution. # Neither the names of", "Copyright (c) 2011-2019, authors of MNE-Python. All rights reserved. #", "verts = this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) #", "de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according to N.Max [1999] for normals", "1] - x[:, 1] * y[:, 0]] else: return np.cross(x,", "is needed for equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for v1, v2", "<filename>curvpack/utils.py import numpy as np # The first two functions", "# wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j in", "of merchantability and fitness for a particular purpose are disclaimed.", "written permission. # # This software is provided by the", "VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals def fastcross(x, y): \"\"\"Compute cross product", "disclaimed. In no event shall the copyright owner or contributors", "product between list of 3D vectors Input x : Mx3", "certification mark of the Open Source Initiative. # # Copyright", "to, the implied warranties of merchantability and fitness for a", "derived from this software without specific prior written permission. #", "must retain the above copyright notice, this list of conditions", "notice, this list of conditions and the following disclaimer. #", "tris[neighbor_tri[k]] verts = np.setdiff1d(verts, [k], assume_unique=False) nneighbors = len(verts) return", "Cross product of x and y. \"\"\" if max([x.shape[0], y.shape[0]])", "met: # # Redistributions of source code must retain the", "and/or other materials provided with the distribution. # Neither the", "500: return np.c_[x[:, 1] * y[:, 2] - x[:, 2]", "vertices of 1-ring \"\"\" verts = tris[neighbor_tri[k]] verts = np.setdiff1d(verts,", "warranties of merchantability and fitness for a particular purpose are", "or without modification, are permitted provided that the following conditions", "this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]", "a given vertex \"\"\" # this code replaces the following,", "Mx3 array Output z : Mx3 array Cross product of", "L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according to N.Max [1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis]", "# the sort below slows it down a bit, but", "INPUT: Vertices : vertices Faces : vertex connectivity FaceNormals :", "* y[:, 2], x[:, 0] * y[:, 1] - x[:,", "this['neighbor_tri'] = [list() for _ in xrange(this['np'])] # for p", "triangles in the 1-ring of a given vertex \"\"\" #", "= np.setdiff1d(verts, [k], assume_unique=False) nneighbors = len(verts) return verts def", "for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals def fastcross(x,", "# # this['neighbor_tri'] = [list() for _ in xrange(this['np'])] #", "verts = np.setdiff1d(verts, [k], assume_unique=False) nneighbors = len(verts) return verts", "any contributors may be used to endorse or promote products", "np.argsort(verts) tri_idx = np.unravel_index(reord, (len(tris), 3))[0] idx = np.cumsum(np.r_[0, counts])", "the following disclaimer. # Redistributions in binary form must reproduce", ": Mx3 array Output z : Mx3 array Cross product", "np.setdiff1d(verts, [k], assume_unique=False) nneighbors = len(verts) return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2):", "triangles. Returns the triangles in the 1-ring of a given", "verts = tris[neighbor_tri[k]] verts = np.setdiff1d(verts, [k], assume_unique=False) nneighbors =", "liability, or tort (including negligence or otherwise) arising in any", "normal at the vertex \"\"\" VertNormals =np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2)", "retain the above copyright notice, this list of conditions and", "products derived from this software without specific prior written permission.", "Mx3 array Cross product of x and y. \"\"\" if", "x[:, 2] * y[:, 0] - x[:, 0] * y[:,", "of the possibility of such damage. def triangle_neighbors(tris, npts): \"\"\"Efficiently", "import numpy as np # The first two functions are", "of MNE-Python. All rights reserved. # # Redistribution and use", "neighboring triangles. Returns the triangles in the 1-ring of a", "v2 in zip(idx[:-1], idx[1:])]) return neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get", "and y. \"\"\" if max([x.shape[0], y.shape[0]]) >= 500: return np.c_[x[:,", "for p in xrange(this['ntri']): # verts = this['tris'][p] # this['neighbor_tri'][verts[0]].append(p)", "wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1]", "y[:, 2], x[:, 0] * y[:, 1] - x[:, 1]", "the above copyright notice, this list of conditions and the", "Redistributions of source code must retain the above copyright notice,", "by the copyright holders and contributors \"as is\" and any", "[0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals def fastcross(x, y): \"\"\"Compute cross", "copyright holders and contributors \"as is\" and any express or", "vectors OUTPUT: VertNormals : Unit normal at the vertex \"\"\"", "VertNormals =np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights", "Source Software. OSI Certified is a certification mark of the", "for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for", "return np.c_[x[:, 1] * y[:, 2] - x[:, 2] *", "LIcense follows # This software is OSI Certified Open Source", "Redistribution and use in source and binary forms, with or", "nneighbors = len(verts) return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT: Vertices", "without modification, are permitted provided that the following conditions are", "y[:, 0] - x[:, 0] * y[:, 2], x[:, 0]", "OSI Certified Open Source Software. OSI Certified is a certification", "substitute goods or services; loss of use, data, or profits;", "lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according to N.Max", "list of 3D vectors Input x : Mx3 array y", "FaceNormals : Outer Normal per face, having magnitude equal to", "of use, data, or profits; or business interruption) however caused", "in contract, strict liability, or tort (including negligence or otherwise)", "used to endorse or promote products derived from this software", "liability, whether in contract, strict liability, or tort (including negligence", "int) for nb in this['neighbor_tri']] # verts = tris.ravel() counts", "e0,e1,e2 : edge vectors OUTPUT: VertNormals : Unit normal at", "promote products derived from this software without specific prior written", "of any contributors may be used to endorse or promote", "in binary form must reproduce the above copyright notice, this", "indirect, incidental, special, exemplary, or consequential damages (including, but not", "face e0,e1,e2 : edge vectors OUTPUT: VertNormals : Unit normal", "whether in contract, strict liability, or tort (including negligence or", "Normal per face, having magnitude equal to area of face", "rights reserved. # # Redistribution and use in source and", "vertex \"\"\" VertNormals =np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2]", "j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j])", "however caused and on any theory of liability, whether in", "conditions and the following disclaimer. # Redistributions in binary form", "\"\"\"Efficiently compute vertex neighboring triangles. Returns the triangles in the", "assume_unique=False) nneighbors = len(verts) return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT:", "or implied warranties, including, but not limited to, the implied", "zip(idx[:-1], idx[1:])]) return neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get vertices of", "in xrange(this['ntri']): # verts = this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p)", "return VertNormals def fastcross(x, y): \"\"\"Compute cross product between list", "to MWA for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis]", "limited to, the implied warranties of merchantability and fitness for", "\"\"\" # this code replaces the following, but is faster", "3D vectors Input x : Mx3 array y : Mx3", "1-ring of a given vertex \"\"\" # this code replaces", "special, exemplary, or consequential damages (including, but not limited to,", "normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j", "the use of this software, even if advised of the", "particular purpose are disclaimed. In no event shall the copyright", "k): \"\"\"Get vertices of 1-ring \"\"\" verts = tris[neighbor_tri[k]] verts", "x and y. \"\"\" if max([x.shape[0], y.shape[0]]) >= 500: return", "mark of the Open Source Initiative. # # Copyright (c)", "=np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according", "arising in any way out of the use of this", "fitness for a particular purpose are disclaimed. In no event", "for v1, v2 in zip(idx[:-1], idx[1:])]) return neighbor_tri def get_surf_neighbors(tris,neighbor_tri,", "wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the weights according to MWA", "contract, strict liability, or tort (including negligence or otherwise) arising", "the copyright holders and contributors \"as is\" and any express", "# this['neighbor_tri'] = [list() for _ in xrange(this['np'])] # for", "permitted provided that the following conditions are met: # #", "reproduce the above copyright notice, this list of conditions and", "convert a np array of vectors to unit vectors \"\"\"", "of substitute goods or services; loss of use, data, or", "area of face e0,e1,e2 : edge vectors OUTPUT: VertNormals :", "source and binary forms, with or without modification, are permitted", "but not limited to, procurement of substitute goods or services;", "the implied warranties of merchantability and fitness for a particular", "in this['neighbor_tri']] # verts = tris.ravel() counts = np.bincount(verts, minlength=npts)", "= np.array([np.sort(tri_idx[v1:v2]) for v1, v2 in zip(idx[:-1], idx[1:])]) return neighbor_tri", "2] * y[:, 0] - x[:, 0] * y[:, 2],", "idx[1:])]) return neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get vertices of 1-ring", "implied warranties of merchantability and fitness for a particular purpose", "vertices Faces : vertex connectivity FaceNormals : Outer Normal per", "Initiative. # # Copyright (c) 2011-2019, authors of MNE-Python. All", "for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the weights according", "of vectors. e.g. to convert a np array of vectors", "- x[:, 0] * y[:, 2], x[:, 0] * y[:,", "and on any theory of liability, whether in contract, strict", "#Calculate the weights according to MWA for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis]", "following, but is faster (vectorized): # # this['neighbor_tri'] = [list()", "endorse or promote products derived from this software without specific", "y[:, 1], x[:, 2] * y[:, 0] - x[:, 0]", "* y[:, 0]] else: return np.cross(x, y) def normr(vec): \"\"\"", "possibility of such damage. def triangle_neighbors(tris, npts): \"\"\"Efficiently compute vertex", "y): \"\"\"Compute cross product between list of 3D vectors Input", "2011-2019, authors of MNE-Python. All rights reserved. # # Redistribution", "software, even if advised of the possibility of such damage.", "above copyright notice, this list of conditions and the following", "in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2]", "# this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] = [np.array(nb,", "authors nor the names of any contributors may be used", "software is provided by the copyright holders and contributors \"as", "np # The first two functions are modified from MNE", "max([x.shape[0], y.shape[0]]) >= 500: return np.c_[x[:, 1] * y[:, 2]", "\"\"\" Normalizes an array of vectors. e.g. to convert a", "tris.ravel() counts = np.bincount(verts, minlength=npts) reord = np.argsort(verts) tri_idx =", "without specific prior written permission. # # This software is", "in source and binary forms, with or without modification, are", "# # This software is provided by the copyright holders", "disclaimer. # Redistributions in binary form must reproduce the above", "[list() for _ in xrange(this['np'])] # for p in xrange(this['ntri']):", "= np.cumsum(np.r_[0, counts]) # the sort below slows it down", "normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the weights according to", "and fitness for a particular purpose are disclaimed. In no", "compute vertex neighboring triangles. Returns the triangles in the 1-ring", "specific prior written permission. # # This software is provided", "is\" and any express or implied warranties, including, but not", "project. LIcense follows # This software is OSI Certified Open", "counts = np.bincount(verts, minlength=npts) reord = np.argsort(verts) tri_idx = np.unravel_index(reord,", "merchantability and fitness for a particular purpose are disclaimed. In", "In no event shall the copyright owner or contributors be", "= np.unravel_index(reord, (len(tris), 3))[0] idx = np.cumsum(np.r_[0, counts]) # the", "business interruption) however caused and on any theory of liability,", "product of x and y. \"\"\" if max([x.shape[0], y.shape[0]]) >=", "goods or services; loss of use, data, or profits; or", "between list of 3D vectors Input x : Mx3 array", "# # Copyright (c) 2011-2019, authors of MNE-Python. All rights", "the triangles in the 1-ring of a given vertex \"\"\"", "# for p in xrange(this['ntri']): # verts = this['tris'][p] #", "y[:, 1] - x[:, 1] * y[:, 0]] else: return", "weights according to N.Max [1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis]", "\"\"\" INPUT: Vertices : vertices Faces : vertex connectivity FaceNormals", "0] * y[:, 2], x[:, 0] * y[:, 1] -", "two functions are modified from MNE surface project. LIcense follows", "fastcross(x, y): \"\"\"Compute cross product between list of 3D vectors", "vertex neighboring triangles. Returns the triangles in the 1-ring of", "of liability, whether in contract, strict liability, or tort (including", "if advised of the possibility of such damage. def triangle_neighbors(tris,", "not limited to, procurement of substitute goods or services; loss", "(vectorized): # # this['neighbor_tri'] = [list() for _ in xrange(this['np'])]", "nor the names of any contributors may be used to", "the sort below slows it down a bit, but is", "names of any contributors may be used to endorse or", "(c) 2011-2019, authors of MNE-Python. All rights reserved. # #", "form must reproduce the above copyright notice, this list of", "disclaimer in the documentation and/or other materials provided with the", "use of this software, even if advised of the possibility", "and binary forms, with or without modification, are permitted provided", "contributors be liable for any direct, indirect, incidental, special, exemplary,", "but is faster (vectorized): # # this['neighbor_tri'] = [list() for", "prior written permission. # # This software is provided by", "consequential damages (including, but not limited to, procurement of substitute", "Vertices : vertices Faces : vertex connectivity FaceNormals : Outer", "\"\"\"Compute cross product between list of 3D vectors Input x", ": vertices Faces : vertex connectivity FaceNormals : Outer Normal", "OUTPUT: VertNormals : Unit normal at the vertex \"\"\" VertNormals", "0]] else: return np.cross(x, y) def normr(vec): \"\"\" Normalizes an", "out of the use of this software, even if advised", "y.shape[0]]) >= 500: return np.c_[x[:, 1] * y[:, 2] -", "including, but not limited to, the implied warranties of merchantability", "this['neighbor_tri']] # verts = tris.ravel() counts = np.bincount(verts, minlength=npts) reord", "be used to endorse or promote products derived from this", "to, procurement of substitute goods or services; loss of use,", "first two functions are modified from MNE surface project. LIcense", "[0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for", "a bit, but is needed for equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2])", "equal to area of face e0,e1,e2 : edge vectors OUTPUT:", "(including negligence or otherwise) arising in any way out of", "MNE surface project. LIcense follows # This software is OSI", "is OSI Certified Open Source Software. OSI Certified is a", "face, having magnitude equal to area of face e0,e1,e2 :", "VertNormals def fastcross(x, y): \"\"\"Compute cross product between list of", "and use in source and binary forms, with or without", "Certified Open Source Software. OSI Certified is a certification mark", "in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals)", "* y[:, 1] - x[:, 1] * y[:, 0]] else:", "x[:, 0] * y[:, 2], x[:, 0] * y[:, 1]", "copyright notice, this list of conditions and the following disclaimer.", "otherwise) arising in any way out of the use of", "binary form must reproduce the above copyright notice, this list", "a certification mark of the Open Source Initiative. # #", "from MNE surface project. LIcense follows # This software is", "idx = np.cumsum(np.r_[0, counts]) # the sort below slows it", "services; loss of use, data, or profits; or business interruption)", "damages (including, but not limited to, procurement of substitute goods", "following disclaimer in the documentation and/or other materials provided with", "= this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri']", "for nb in this['neighbor_tri']] # verts = tris.ravel() counts =", "= tris.ravel() counts = np.bincount(verts, minlength=npts) reord = np.argsort(verts) tri_idx", "y) def normr(vec): \"\"\" Normalizes an array of vectors. e.g.", "permission. # # This software is provided by the copyright", "minlength=npts) reord = np.argsort(verts) tri_idx = np.unravel_index(reord, (len(tris), 3))[0] idx", "neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for v1, v2 in zip(idx[:-1], idx[1:])]) return", "and the following disclaimer. # Redistributions in binary form must", "def triangle_neighbors(tris, npts): \"\"\"Efficiently compute vertex neighboring triangles. Returns the", "MWA for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0]", "contributors may be used to endorse or promote products derived", "of x and y. \"\"\" if max([x.shape[0], y.shape[0]]) >= 500:", "modified from MNE surface project. LIcense follows # This software", "to endorse or promote products derived from this software without", "* y[:, 1], x[:, 2] * y[:, 0] - x[:,", "of the use of this software, even if advised of", "2] * y[:, 1], x[:, 2] * y[:, 0] -", "in any way out of the use of this software,", "are permitted provided that the following conditions are met: #", "such damage. def triangle_neighbors(tris, npts): \"\"\"Efficiently compute vertex neighboring triangles.", "verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT: Vertices : vertices Faces :", "of this software, even if advised of the possibility of", "for equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for v1, v2 in zip(idx[:-1],", "in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals def fastcross(x, y): \"\"\"Compute", "exemplary, or consequential damages (including, but not limited to, procurement", "Open Source Initiative. # # Copyright (c) 2011-2019, authors of", "z : Mx3 array Cross product of x and y.", "# this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] = [np.array(nb, int) for", "theory of liability, whether in contract, strict liability, or tort", "x : Mx3 array y : Mx3 array Output z", "# wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for", "p in xrange(this['ntri']): # verts = this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) #", "Redistributions in binary form must reproduce the above copyright notice,", "software is OSI Certified Open Source Software. OSI Certified is", "following conditions are met: # # Redistributions of source code", "[1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the weights", "loss of use, data, or profits; or business interruption) however", "weights according to MWA for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis]", "np.array([np.sort(tri_idx[v1:v2]) for v1, v2 in zip(idx[:-1], idx[1:])]) return neighbor_tri def", ": Outer Normal per face, having magnitude equal to area", "VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals", "# Redistribution and use in source and binary forms, with", "with or without modification, are permitted provided that the following", "direct, indirect, incidental, special, exemplary, or consequential damages (including, but", "negligence or otherwise) arising in any way out of the", "this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']] # verts", "purpose are disclaimed. In no event shall the copyright owner", "wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j in [0,1,2]:", "y[:, 2] - x[:, 2] * y[:, 1], x[:, 2]", "* y[:, 0] - x[:, 0] * y[:, 2], x[:,", "or otherwise) arising in any way out of the use", "normr(vec): \"\"\" Normalizes an array of vectors. e.g. to convert", "to convert a np array of vectors to unit vectors", "bit, but is needed for equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for", "this code replaces the following, but is faster (vectorized): #", "in xrange(this['np'])] # for p in xrange(this['ntri']): # verts =", "# Redistributions of source code must retain the above copyright", "profits; or business interruption) however caused and on any theory", "caused and on any theory of liability, whether in contract,", "3))[0] idx = np.cumsum(np.r_[0, counts]) # the sort below slows", "Normalizes an array of vectors. e.g. to convert a np", "the following disclaimer in the documentation and/or other materials provided", "list of conditions and the following disclaimer in the documentation", ": Mx3 array Cross product of x and y. \"\"\"", "xrange(this['ntri']): # verts = this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) #", "y. \"\"\" if max([x.shape[0], y.shape[0]]) >= 500: return np.c_[x[:, 1]", "warranties, including, but not limited to, the implied warranties of", "are disclaimed. In no event shall the copyright owner or", "x[:, 1] * y[:, 0]] else: return np.cross(x, y) def", ": Mx3 array y : Mx3 array Output z :", "np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:,", "wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the weights according to MWA for", "edge vectors OUTPUT: VertNormals : Unit normal at the vertex", "holders and contributors \"as is\" and any express or implied", "# wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j])", "per face, having magnitude equal to area of face e0,e1,e2", "this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] = [np.array(nb, int)", "event shall the copyright owner or contributors be liable for", "slows it down a bit, but is needed for equivalence", "(len(tris), 3))[0] idx = np.cumsum(np.r_[0, counts]) # the sort below", "of 1-ring \"\"\" verts = tris[neighbor_tri[k]] verts = np.setdiff1d(verts, [k],", "may be used to endorse or promote products derived from", "Input x : Mx3 array y : Mx3 array Output", "Unit normal at the vertex \"\"\" VertNormals =np.zeros(vertices.shape) #edge lengths", "# this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] = [np.array(nb, int) for nb in", "(including, but not limited to, procurement of substitute goods or", "for any direct, indirect, incidental, special, exemplary, or consequential damages", "x[:, 2] * y[:, 1], x[:, 2] * y[:, 0]", "j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j])", "Output z : Mx3 array Cross product of x and", "needed for equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for v1, v2 in", "the names of any contributors may be used to endorse", "code replaces the following, but is faster (vectorized): # #", "# Copyright (c) 2011-2019, authors of MNE-Python. All rights reserved.", "any direct, indirect, incidental, special, exemplary, or consequential damages (including,", "must reproduce the above copyright notice, this list of conditions", "are modified from MNE surface project. LIcense follows # This", "this software without specific prior written permission. # # This", "This software is provided by the copyright holders and contributors", "of MNE-Python authors nor the names of any contributors may", "= np.argsort(verts) tri_idx = np.unravel_index(reord, (len(tris), 3))[0] idx = np.cumsum(np.r_[0,", "of face e0,e1,e2 : edge vectors OUTPUT: VertNormals : Unit", "at the vertex \"\"\" VertNormals =np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2)", "it down a bit, but is needed for equivalence neighbor_tri", "neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get vertices of 1-ring \"\"\" verts", "vertex \"\"\" # this code replaces the following, but is", "# verts = tris.ravel() counts = np.bincount(verts, minlength=npts) reord =", "not limited to, the implied warranties of merchantability and fitness", "def normr(vec): \"\"\" Normalizes an array of vectors. e.g. to", "follows # This software is OSI Certified Open Source Software.", "# Neither the names of MNE-Python authors nor the names", "#edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according to", "0] * y[:, 1] - x[:, 1] * y[:, 0]]", "Mx3 array y : Mx3 array Output z : Mx3", "contributors \"as is\" and any express or implied warranties, including,", "MNE-Python. All rights reserved. # # Redistribution and use in", "Source Initiative. # # Copyright (c) 2011-2019, authors of MNE-Python.", "MNE-Python authors nor the names of any contributors may be", "limited to, procurement of substitute goods or services; loss of", "# verts = this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p)", "Neither the names of MNE-Python authors nor the names of", "or business interruption) however caused and on any theory of", "below slows it down a bit, but is needed for", "of conditions and the following disclaimer in the documentation and/or", "verts = tris.ravel() counts = np.bincount(verts, minlength=npts) reord = np.argsort(verts)", ": edge vectors OUTPUT: VertNormals : Unit normal at the", "x[:, 0] * y[:, 1] - x[:, 1] * y[:,", "distribution. # Neither the names of MNE-Python authors nor the", "of a given vertex \"\"\" # this code replaces the", "VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j", "shall the copyright owner or contributors be liable for any", "notice, this list of conditions and the following disclaimer in", "or tort (including negligence or otherwise) arising in any way", "an array of vectors. e.g. to convert a np array", "All rights reserved. # # Redistribution and use in source", "and the following disclaimer in the documentation and/or other materials", "# this code replaces the following, but is faster (vectorized):", "# #Calculate the weights according to MWA for normals #", "\"\"\" VertNormals =np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate", "triangle_neighbors(tris, npts): \"\"\"Efficiently compute vertex neighboring triangles. Returns the triangles", "and contributors \"as is\" and any express or implied warranties,", "y[:, 0]] else: return np.cross(x, y) def normr(vec): \"\"\" Normalizes", "the documentation and/or other materials provided with the distribution. #", "to area of face e0,e1,e2 : edge vectors OUTPUT: VertNormals", "magnitude equal to area of face e0,e1,e2 : edge vectors", "[0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return", "\"as is\" and any express or implied warranties, including, but", "and any express or implied warranties, including, but not limited", "this list of conditions and the following disclaimer in the", "def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT: Vertices : vertices Faces : vertex", "modification, are permitted provided that the following conditions are met:", "Open Source Software. OSI Certified is a certification mark of", "verts=faces.T[1] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j in", "in the documentation and/or other materials provided with the distribution.", "incidental, special, exemplary, or consequential damages (including, but not limited", "the distribution. # Neither the names of MNE-Python authors nor", "[np.array(nb, int) for nb in this['neighbor_tri']] # verts = tris.ravel()", "de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according to N.Max [1999]", "use in source and binary forms, with or without modification,", "= len(verts) return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT: Vertices :", "or services; loss of use, data, or profits; or business", "* y[:, 2] - x[:, 2] * y[:, 1], x[:,", "for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j in [0,1,2]:", "to N.Max [1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate", "of the Open Source Initiative. # # Copyright (c) 2011-2019,", "reserved. # # Redistribution and use in source and binary", "np.unravel_index(reord, (len(tris), 3))[0] idx = np.cumsum(np.r_[0, counts]) # the sort", "vectors. e.g. to convert a np array of vectors to", "no event shall the copyright owner or contributors be liable", "on any theory of liability, whether in contract, strict liability,", "_ in xrange(this['np'])] # for p in xrange(this['ntri']): # verts", "advised of the possibility of such damage. def triangle_neighbors(tris, npts):", "replaces the following, but is faster (vectorized): # # this['neighbor_tri']", "2], x[:, 0] * y[:, 1] - x[:, 1] *", "\"\"\"Get vertices of 1-ring \"\"\" verts = tris[neighbor_tri[k]] verts =", "but not limited to, the implied warranties of merchantability and", "software without specific prior written permission. # # This software", "is provided by the copyright holders and contributors \"as is\"", "procurement of substitute goods or services; loss of use, data,", "Faces : vertex connectivity FaceNormals : Outer Normal per face,", "of conditions and the following disclaimer. # Redistributions in binary", "be liable for any direct, indirect, incidental, special, exemplary, or", "2] - x[:, 2] * y[:, 1], x[:, 2] *", "surface project. LIcense follows # This software is OSI Certified", "tri_idx = np.unravel_index(reord, (len(tris), 3))[0] idx = np.cumsum(np.r_[0, counts]) #", "forms, with or without modification, are permitted provided that the", "# The first two functions are modified from MNE surface", "1-ring \"\"\" verts = tris[neighbor_tri[k]] verts = np.setdiff1d(verts, [k], assume_unique=False)", "the 1-ring of a given vertex \"\"\" # this code", "counts]) # the sort below slows it down a bit,", "way out of the use of this software, even if", "data, or profits; or business interruption) however caused and on", "any express or implied warranties, including, but not limited to,", "# this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']] #", "or contributors be liable for any direct, indirect, incidental, special,", "is faster (vectorized): # # this['neighbor_tri'] = [list() for _", "functions are modified from MNE surface project. LIcense follows #", "provided with the distribution. # Neither the names of MNE-Python", "conditions and the following disclaimer in the documentation and/or other", "given vertex \"\"\" # this code replaces the following, but", "# This software is OSI Certified Open Source Software. OSI", "liable for any direct, indirect, incidental, special, exemplary, or consequential", "or promote products derived from this software without specific prior", ">= 500: return np.c_[x[:, 1] * y[:, 2] - x[:,", "equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for v1, v2 in zip(idx[:-1], idx[1:])])", "VertNormals=normr(VertNormals) return VertNormals def fastcross(x, y): \"\"\"Compute cross product between", "return neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get vertices of 1-ring \"\"\"", "the names of MNE-Python authors nor the names of any", "- x[:, 2] * y[:, 1], x[:, 2] * y[:,", "def fastcross(x, y): \"\"\"Compute cross product between list of 3D", "N.Max [1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the", "array Output z : Mx3 array Cross product of x", "copyright notice, this list of conditions and the following disclaimer", "array Cross product of x and y. \"\"\" if max([x.shape[0],", "authors of MNE-Python. All rights reserved. # # Redistribution and", "# This software is provided by the copyright holders and", "nb in this['neighbor_tri']] # verts = tris.ravel() counts = np.bincount(verts,", "# Redistributions in binary form must reproduce the above copyright", "reord = np.argsort(verts) tri_idx = np.unravel_index(reord, (len(tris), 3))[0] idx =", "down a bit, but is needed for equivalence neighbor_tri =", "[k], assume_unique=False) nneighbors = len(verts) return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\"", "Outer Normal per face, having magnitude equal to area of", "\"\"\" verts = tris[neighbor_tri[k]] verts = np.setdiff1d(verts, [k], assume_unique=False) nneighbors", "def get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get vertices of 1-ring \"\"\" verts =", "= tris[neighbor_tri[k]] verts = np.setdiff1d(verts, [k], assume_unique=False) nneighbors = len(verts)", "according to MWA for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] #", "express or implied warranties, including, but not limited to, the", "array y : Mx3 array Output z : Mx3 array", "verts=faces.T[0] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j in", "the vertex \"\"\" VertNormals =np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2)", "documentation and/or other materials provided with the distribution. # Neither", "vertex connectivity FaceNormals : Outer Normal per face, having magnitude", "any way out of the use of this software, even", "connectivity FaceNormals : Outer Normal per face, having magnitude equal", "but is needed for equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for v1,", "of 3D vectors Input x : Mx3 array y :", "list of conditions and the following disclaimer. # Redistributions in", "strict liability, or tort (including negligence or otherwise) arising in", "for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j in [0,1,2]:", "e.g. to convert a np array of vectors to unit", "damage. def triangle_neighbors(tris, npts): \"\"\"Efficiently compute vertex neighboring triangles. Returns", ": Unit normal at the vertex \"\"\" VertNormals =np.zeros(vertices.shape) #edge", "GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT: Vertices : vertices Faces : vertex connectivity", "is a certification mark of the Open Source Initiative. #", "wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j", "numpy as np # The first two functions are modified", "or profits; or business interruption) however caused and on any", "in the 1-ring of a given vertex \"\"\" # this", "de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according to N.Max [1999] for", "np.cross(x, y) def normr(vec): \"\"\" Normalizes an array of vectors.", "this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] = [np.array(nb, int) for nb", "npts): \"\"\"Efficiently compute vertex neighboring triangles. Returns the triangles in", "xrange(this['np'])] # for p in xrange(this['ntri']): # verts = this['tris'][p]", "provided by the copyright holders and contributors \"as is\" and", "having magnitude equal to area of face e0,e1,e2 : edge", "get_surf_neighbors(tris,neighbor_tri, k): \"\"\"Get vertices of 1-ring \"\"\" verts = tris[neighbor_tri[k]]", "as np # The first two functions are modified from", "y : Mx3 array Output z : Mx3 array Cross", "len(verts) return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): \"\"\" INPUT: Vertices : vertices", "for _ in xrange(this['np'])] # for p in xrange(this['ntri']): #", "j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals def fastcross(x, y):", "Returns the triangles in the 1-ring of a given vertex", "Certified is a certification mark of the Open Source Initiative.", "use, data, or profits; or business interruption) however caused and" ]
[ "assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay = 12345 assert stream.heartbeat_fail_delay ==", "list(filter(lambda x: x is not None, data)) iter_data = iter(data)", "1 def test_busy_true(self, stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation)", "in range(5)] ) def test_read_none(self, stream): iter_data = iter([None, \"packet\"]", "2.0 (the \"License\"); # you may not use this file", "test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def test_write_out(self, stream): connection_context =", "result_data assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert", "= True stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def test_write_out(self,", "stream.closed = True stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called() #", "pipe @fixture def session(self): session = MockSession() return session @fixture", "with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat def test_heartbeat_fail_delay(self, stream): assert", "== 4: stream.station = station if item is ...: break", "[call(stream, wait_timeout=None, test_kw=1) for _ in range(6)] ) def test_read_while_stream_open(self,", "[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)] ) def test_read_none(self,", "pipe(self): pipe = mock.MagicMock(spec=ZeroPipe) return pipe @fixture def session(self): session", "in stream.read(test_kw=1): if item is ...: break result.append(item) if platform.python_implementation().lower()", "[\"packet\", 0, {}, \"\", None] + [None, \"packet\"] * 5", "pytest import fixture from tethys.core.pipes.pipe_zero import ZeroPipe from tethys.core.sessions.sess_zero import", "== 1 def test_busy_true(self, stream): stream.refresh = mock.MagicMock() stream.station =", "1 assert connection_context.__exit__.call_count == 1 def test_write_many(self, stream): connection_context =", "stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in range(5)] ) def", "test_kw=1) for _ in data] ) def test_read_n_packets(self, stream): iter_data", "test_read_when_station_changed(self, stream, station): iter_data = iter(range(10)) def recv_cb(*_, **__): try:", "stream.read(count=5, test_kw=1): if item is ...: break result.append(item) assert result", "stream.transport.send = mock.MagicMock() stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "== 1 assert connection_context.__exit__.call_count == 1 def test_write_many(self, stream): connection_context", "session, transport) # init def test_init_with_transport_cb(self, pipe, session, transport): def", "mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1 stream.heartbeat_ts = time.time() - 10 assert", "True iter_data = iter([0, 1, 2, 3, None, 4]) def", "stream.save.assert_not_called() # read def test_read(self, stream): data = [\"packet\", 0,", "stream.transport == transport # conn context def test_new_connection_context(self, stream): with", "import mock from unittest.mock import patch, call from pytest import", "stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called() # redirect def test_redirect(self,", "result = [] for item in stream.read(test_kw=1): if item is", "stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count ==", "= None def __init__(self): self._closed = False @property def closed(self):", "test_busy_true(self, stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay =", "next(iter_data) except StopIteration: return ... connection_context = mock.MagicMock() stream.connection_context =", "[\"packet\"] * 10) def recv_cb(*_, **__): try: return next(iter_data) except", "from tethys.core.transports.transport_zero import ZeroTransport class MockTransport(ZeroTransport): def __init__(self): pass connect", "[call(stream, wait_timeout=None, test_kw=1) for _ in data] ) def test_read_n_packets(self,", "station.stream_lock_ttl = 0 stream.save = mock.MagicMock() stream.redirect_to(station) assert stream.station ==", "stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay = 12345 assert stream.heartbeat_fail_delay == 12345 def test_busy_false(self,", "assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _", "assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count", "\"packet\"] + [\"packet\"] * 10) def recv_cb(*_, **__): try: return", "iter([None, \"packet\"] + [\"packet\"] * 10) def recv_cb(*_, **__): try:", ") stream.transport.send = mock.MagicMock() stream.closed = True stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called()", "stream.redirect_to(station) assert stream.station == station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) # open/close context", "use this file except in compliance with the License. #", "== 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1) #", "pipe, session, transport): return ZeroStream(pipe, session, transport) # init def", "@fixture def station(self): return MockStation() @fixture def stream(self, pipe, session,", "recv_cb(*_, **__): try: return next(iter_data) except StopIteration: return ... connection_context", "test_read_while_stream_open(self, stream): iter_data = iter(range(10)) def recv_cb(*_, **__): try: return", "assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "return MockStation() @fixture def stream(self, pipe, session, transport): return ZeroStream(pipe,", "stream): stream.save = mock.MagicMock() stream.closed = True assert stream.open() is", "close def test_close(self, stream): stream.save = mock.MagicMock() assert stream.close() is", "stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1) def test_ack_closed(self,", "License. # You may obtain a copy of the License", "from unittest import mock from unittest.mock import patch, call from", "12345): stream.heartbeat() assert stream.heartbeat_ts == 12345 stream.save.assert_called_once_with(save_dependency=False) # open def", "1 assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_called_once_with(stream,", "def test_old_connection_context(self, stream): MockTransport._connections[stream.id] = stream with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called()", "= True iter_data = iter([0, 1, 2, 3, None, 4])", "in data] ) def test_read_n_packets(self, stream): iter_data = iter([None, \"packet\"]", "def stream(self, pipe, session, transport): return ZeroStream(pipe, session, transport) #", "under the License is distributed on an \"AS IS\" BASIS,", "break result.append(item) assert result == [] assert stream.connection_context.call_count == 1", "== transport # conn context def test_new_connection_context(self, stream): with stream.connection_context():", "= [] for item in stream.read(test_kw=1): if item == 4:", "License for the specific language governing permissions and # limitations", "call from pytest import fixture from tethys.core.pipes.pipe_zero import ZeroPipe from", "== 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in data]", "+ [None, \"packet\"] * 5 result_data = list(filter(lambda x: x", "in stream.read(wait_timeout=1, test_kw=1): if item is ...: break result.append(item) assert", "= mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.closed =", "in stream.read(test_kw=1): if item == 4: stream.closed = True if", "Reserved. # # Licensed under the Apache License, Version 2.0", "transport(self): return MockTransport() @fixture def station(self): return MockStation() @fixture def", "# init def test_init_with_transport_cb(self, pipe, session, transport): def get_transport(_): return", "mock from unittest.mock import patch, call from pytest import fixture", "MockTransport() @fixture def station(self): return MockStation() @fixture def stream(self, pipe,", "self._closed class MockStation(ZeroStation): def __init__(self): pass class TestZeroStream: @staticmethod def", "stream.closed is True stream.save.assert_not_called() # read def test_read(self, stream): data", "assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1) # write def", "def recv_cb(*_, **__): try: return next(iter_data) except StopIteration: return ...", "stream.closed = True assert stream.open() is stream assert stream.closed is", "redirect def test_redirect(self, stream, station): station.save = mock.MagicMock() station.stream_lock_ttl =", "stream.session._closed = True iter_data = iter([0, 1, 2, 3, None,", "False stream.save.assert_not_called() # close def test_close(self, stream): stream.save = mock.MagicMock()", "= 1 stream.heartbeat_ts = time.time() - 10 assert stream.is_busy is", "test_init_with_transport_cb(self, pipe, session, transport): def get_transport(_): return transport get_transport =", "for item in stream.read(count=5, test_kw=1): if item is ...: break", "MockTransport._connections[stream.id] = stream with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat def", "if platform.python_implementation().lower() == \"pypy\": gc.collect() assert result == result_data assert", "in range(6)] ) def test_read_while_stream_open(self, stream): iter_data = iter(range(10)) def", "stream.refresh.call_count == 1 def test_heartbeat(self, stream): stream.save = mock.MagicMock() with", "- 10 assert stream.is_busy is False assert stream.refresh.call_count == 1", "test_old_connection_context(self, stream): MockTransport._connections[stream.id] = stream with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() #", "def closed(self): return self._closed class MockStation(ZeroStation): def __init__(self): pass class", "i in \"packet\"] ) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count", "transport) # init def test_init_with_transport_cb(self, pipe, session, transport): def get_transport(_):", "stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count ==", "in stream.read(test_kw=1): if item is ...: break result.append(item) assert result", "stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 0 assert stream.heartbeat_fail_delay ==", "in compliance with the License. # You may obtain a", "[] assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert", "assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 # ack", "12345 def test_busy_false(self, stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation)", "software # distributed under the License is distributed on an", "stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self, stream): stream.save = mock.MagicMock() stream.closed = True", "stream assert stream.closed is False stream.save.assert_not_called() # close def test_close(self,", "test_kw=1) for _ in range(5)] ) def test_read_none(self, stream): iter_data", "MockStation() @fixture def stream(self, pipe, session, transport): return ZeroStream(pipe, session,", "from pytest import fixture from tethys.core.pipes.pipe_zero import ZeroPipe from tethys.core.sessions.sess_zero", "pass connect = mock.MagicMock() disconnect = mock.MagicMock() class MockSession(ZeroSession): closing_mode", "5 result_data = list(filter(lambda x: x is not None, data))", "1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in range(5)] )", "MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream): MockTransport._connections[stream.id] = stream with stream.connection_context(): MockTransport.connect.assert_not_called()", "import ZeroStream from tethys.core.transports.transport_zero import ZeroTransport class MockTransport(ZeroTransport): def __init__(self):", "MockTransport.disconnect.assert_not_called() # heartbeat def test_heartbeat_fail_delay(self, stream): assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY", "in range(5)] ) def test_read_when_station_changed(self, stream, station): iter_data = iter(range(10))", "== 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1)", "test_write_when_closed(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context", "\"packet\", test_kw=1) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1", "assert stream.close(save=False) is stream assert stream.closed is True stream.save.assert_not_called() #", "= True assert stream.open() is stream assert stream.closed is False", "stream): stream.open = mock.MagicMock() stream.close = mock.MagicMock() with stream: stream.open.assert_called_once_with(save=False)", "stream.closed = True assert stream.open(save=False) is stream assert stream.closed is", "= mock.MagicMock() assert stream.close(save=False) is stream assert stream.closed is True", "= mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1000 stream.heartbeat_ts = time.time() assert stream.is_busy", "= mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1 stream.heartbeat_ts =", "result == list(range(5)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count ==", "ZeroStation from tethys.core.streams.stream_zero import ZeroStream from tethys.core.transports.transport_zero import ZeroTransport class", "stream): stream.save = mock.MagicMock() with patch(\"time.time\", lambda: 12345): stream.heartbeat() assert", "= mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1 stream.heartbeat_ts = time.time() - 10", "test_ack_closed(self, stream): stream.closed = True stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1)", "ZeroPipe from tethys.core.sessions.sess_zero import ZeroSession from tethys.core.stations.station_zero import ZeroStation from", "result == result_data assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count ==", "= iter(data) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration:", "= ZeroStream(pipe, session, get_transport) assert stream.transport == transport # conn", "is stream assert stream.closed is True stream.save.assert_not_called() # read def", "side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", many=True, test_kw=1) stream.transport.send.assert_has_calls(", "# close def test_close(self, stream): stream.save = mock.MagicMock() assert stream.close()", "1 def test_heartbeat(self, stream): stream.save = mock.MagicMock() with patch(\"time.time\", lambda:", "for item in stream.read(wait_timeout=1, test_kw=1): if item is ...: break", "ZeroTransport class MockTransport(ZeroTransport): def __init__(self): pass connect = mock.MagicMock() disconnect", "assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 def test_write_many(self,", "stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in range(6)] ) def", "{}, \"\", None] + [None, \"packet\"] * 5 result_data =", "test_read(self, stream): data = [\"packet\", 0, {}, \"\", None] +", "MockSession() return session @fixture def transport(self): return MockTransport() @fixture def", "test_open(self, stream): stream.save = mock.MagicMock() stream.closed = True assert stream.open()", "stream.closed = True if item is ...: break result.append(item) assert", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1) def test_ack_closed(self, stream): stream.closed =", "stream.save = mock.MagicMock() stream.redirect_to(station) assert stream.station == station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False)", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "test_read_while_sess_open(self, stream): stream.session._closed = True iter_data = iter([0, 1, 2,", "stream.save = mock.MagicMock() stream.closed = True assert stream.open() is stream", "MockTransport(ZeroTransport): def __init__(self): pass connect = mock.MagicMock() disconnect = mock.MagicMock()", "test_open_no_commit(self, stream): stream.save = mock.MagicMock() stream.closed = True assert stream.open(save=False)", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "patch, call from pytest import fixture from tethys.core.pipes.pipe_zero import ZeroPipe", "with patch(\"time.time\", lambda: 12345): stream.heartbeat() assert stream.heartbeat_ts == 12345 stream.save.assert_called_once_with(save_dependency=False)", "item is ...: break result.append(item) if platform.python_implementation().lower() == \"pypy\": gc.collect()", "stream assert stream.closed is True stream.save.assert_not_called() # read def test_read(self,", "1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in data] )", "to in writing, software # distributed under the License is", "context def test_new_connection_context(self, stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def", "mock.MagicMock() stream.write(\"packet\", many=True, test_kw=1) stream.transport.send.assert_has_calls( [call(stream, i, test_kw=1) for i", "stream.closed is False stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self, stream): stream.save = mock.MagicMock()", "assert result == result_data assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count", "# See the License for the specific language governing permissions", "pass class TestZeroStream: @staticmethod def teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture def", "def test_heartbeat_fail_delay(self, stream): assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station = mock.MagicMock(spec=ZeroStation)", "iter_data = iter(data) def recv_cb(*_, **__): try: return next(iter_data) except", "assert stream.transport == transport # conn context def test_new_connection_context(self, stream):", "True if item is ...: break result.append(item) assert result ==", "= mock.MagicMock() stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count ==", "1 assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 def", "test_read_n_packets(self, stream): iter_data = iter([None, \"packet\"] + [\"packet\"] * 10)", "or agreed to in writing, software # distributed under the", "mock.MagicMock(spec=ZeroPipe) return pipe @fixture def session(self): session = MockSession() return", "required by applicable law or agreed to in writing, software", "import ZeroStation from tethys.core.streams.stream_zero import ZeroStream from tethys.core.transports.transport_zero import ZeroTransport", "# heartbeat def test_heartbeat_fail_delay(self, stream): assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station", "stream.closed is True stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self, stream): stream.save = mock.MagicMock()", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "in \"packet\"] ) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count ==", "for _ in range(5)] ) def test_read_while_sess_open(self, stream): stream.session._closed =", "test_new_connection_context(self, stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream):", "with the License. # You may obtain a copy of", "test_kw=1) stream.transport.ack.assert_not_called() # redirect def test_redirect(self, stream, station): station.save =", "1 assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_has_calls(", "= [] for item in stream.read(count=5, test_kw=1): if item is", "mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(wait_timeout=1, test_kw=1): if", "* 5 assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1", "for _ in range(5)] ) def test_read_when_station_changed(self, stream, station): iter_data", "@fixture def transport(self): return MockTransport() @fixture def station(self): return MockStation()", "assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 0", "stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1000", "get_transport(_): return transport get_transport = mock.MagicMock(side_effect=get_transport) stream = ZeroStream(pipe, session,", "= MockSession() return session @fixture def transport(self): return MockTransport() @fixture", "[call(stream, i, test_kw=1) for i in \"packet\"] ) assert stream.connection_context.call_count", "= [\"packet\", 0, {}, \"\", None] + [None, \"packet\"] *", "closed(self): return self._closed class MockStation(ZeroStation): def __init__(self): pass class TestZeroStream:", "import patch, call from pytest import fixture from tethys.core.pipes.pipe_zero import", "ZeroSession from tethys.core.stations.station_zero import ZeroStation from tethys.core.streams.stream_zero import ZeroStream from", "range(5)] ) def test_read_when_station_changed(self, stream, station): iter_data = iter(range(10)) def", "data = [\"packet\", 0, {}, \"\", None] + [None, \"packet\"]", "connection_context.__exit__.assert_not_called() def test_write_out(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock(", "for item in stream.read(test_kw=1): if item is ...: break result.append(item)", "is stream assert stream.closed is True stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self, stream):", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "def test_busy_true(self, stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay", "= iter([None, \"packet\"] + [\"packet\"] * 10) def recv_cb(*_, **__):", "distributed under the License is distributed on an \"AS IS\"", "def __init__(self): pass connect = mock.MagicMock() disconnect = mock.MagicMock() class", "Inc. All Rights Reserved. # # Licensed under the Apache", "mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.recv = mock.MagicMock(side_effect=recv_cb)", "stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1) # write def test_write(self, stream): connection_context =", "test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1) def test_ack_closed(self, stream): stream.closed = True", "**__): try: return next(iter_data) except StopIteration: return ... connection_context =", "MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture def pipe(self): pipe = mock.MagicMock(spec=ZeroPipe) return pipe", "station if item is ...: break result.append(item) assert result ==", "10 assert stream.is_busy is False assert stream.refresh.call_count == 1 def", "fixture from tethys.core.pipes.pipe_zero import ZeroPipe from tethys.core.sessions.sess_zero import ZeroSession from", "iter_data = iter([None, \"packet\"] + [\"packet\"] * 10) def recv_cb(*_,", "time.time() - 10 assert stream.is_busy is False assert stream.refresh.call_count ==", "test_kw=1) stream.transport.send.assert_has_calls( [call(stream, i, test_kw=1) for i in \"packet\"] )", "write def test_write(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock(", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "test_busy_false(self, stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay =", "1 stream.heartbeat_ts = time.time() - 10 assert stream.is_busy is False", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "assert stream.closed is True stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self, stream): stream.save =", "session, transport): def get_transport(_): return transport get_transport = mock.MagicMock(side_effect=get_transport) stream", "= 1000 stream.heartbeat_ts = time.time() assert stream.is_busy is True assert", "not use this file except in compliance with the License.", "self._closed = False @property def closed(self): return self._closed class MockStation(ZeroStation):", "[\"packet\"] * 5 assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count ==", "[] for item in stream.read(test_kw=1): if item == 4: stream.station", "writing, software # distributed under the License is distributed on", "mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1000 stream.heartbeat_ts = time.time() assert stream.is_busy is", "gc import platform import time from unittest import mock from", "result == [\"packet\"] * 5 assert stream.connection_context.call_count == 1 assert", "for item in stream.read(test_kw=1): if item == 4: stream.station =", "you may not use this file except in compliance with", "10) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration: return", ") stream.transport.send = mock.MagicMock() stream.closed = True stream.pipe.node_b = \"<out>\"", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "= \"<out>\" stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count ==", "def test_close(self, stream): stream.save = mock.MagicMock() assert stream.close() is stream", "1000 stream.heartbeat_ts = time.time() assert stream.is_busy is True assert stream.refresh.call_count", "time.time() assert stream.is_busy is True assert stream.refresh.call_count == 1 def", "is stream assert stream.closed is False stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self, stream):", "def test_read_none(self, stream): iter_data = iter([None, \"packet\"] + [\"packet\"] *", "@fixture def stream(self, pipe, session, transport): return ZeroStream(pipe, session, transport)", "stream.closed = True stream.pipe.node_b = \"<out>\" stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\",", "CONDITIONS OF ANY KIND, either express or implied. # See", "if item is ...: break result.append(item) if platform.python_implementation().lower() == \"pypy\":", "session(self): session = MockSession() return session @fixture def transport(self): return", "= mock.MagicMock() stream.closed = True assert stream.open() is stream assert", "in range(5)] ) def test_read_while_sess_open(self, stream): stream.session._closed = True iter_data", "def test_redirect(self, stream, station): station.save = mock.MagicMock() station.stream_lock_ttl = 0", "list(range(4)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert", "tethys.core.pipes.pipe_zero import ZeroPipe from tethys.core.sessions.sess_zero import ZeroSession from tethys.core.stations.station_zero import", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "[None, \"packet\"] * 5 result_data = list(filter(lambda x: x is", "__init__(self): pass class TestZeroStream: @staticmethod def teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture", "mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called() # redirect def test_redirect(self, stream, station):", "def teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture def pipe(self): pipe = mock.MagicMock(spec=ZeroPipe)", "stream.heartbeat_ts == 12345 stream.save.assert_called_once_with(save_dependency=False) # open def test_open(self, stream): stream.save", "governing permissions and # limitations under the License. import gc", "if item is ...: break result.append(item) assert result == list(range(4))", "def test_read(self, stream): data = [\"packet\", 0, {}, \"\", None]", "stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result", "test_read_none(self, stream): iter_data = iter([None, \"packet\"] + [\"packet\"] * 10)", "stream.open = mock.MagicMock() stream.close = mock.MagicMock() with stream: stream.open.assert_called_once_with(save=False) stream.close.assert_not_called()", "def test_read_while_sess_open(self, stream): stream.session._closed = True iter_data = iter([0, 1,", "== 1 assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1", "assert connection_context.__exit__.call_count == 1 def test_write_when_closed(self, stream): connection_context = mock.MagicMock()", "stream.is_busy is False assert stream.refresh.call_count == 1 def test_busy_true(self, stream):", "if item == 4: stream.station = station if item is", "connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 def test_write_when_closed(self, stream):", "License. import gc import platform import time from unittest import", "list(range(5)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert", "True assert stream.open() is stream assert stream.closed is False stream.save.assert_called_once_with(save_dependency=False)", "stream, station): station.save = mock.MagicMock() station.stream_lock_ttl = 0 stream.save =", "= 0 assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay = 12345 assert", "under the License. import gc import platform import time from", ") stream.transport.send = mock.MagicMock() stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert", "assert stream.closed is True stream.save.assert_not_called() # read def test_read(self, stream):", "test_close_no_commit(self, stream): stream.save = mock.MagicMock() assert stream.close(save=False) is stream assert", "MockStation(ZeroStation): def __init__(self): pass class TestZeroStream: @staticmethod def teardown_method(): MockTransport.connect.reset_mock()", "= mock.MagicMock() with patch(\"time.time\", lambda: 12345): stream.heartbeat() assert stream.heartbeat_ts ==", "stream.save = mock.MagicMock() stream.closed = True assert stream.open(save=False) is stream", "= mock.MagicMock() class MockSession(ZeroSession): closing_mode = None def __init__(self): self._closed", "assert stream.heartbeat_fail_delay == 12345 def test_busy_false(self, stream): stream.refresh = mock.MagicMock()", "OR CONDITIONS OF ANY KIND, either express or implied. #", "class TestZeroStream: @staticmethod def teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture def pipe(self):", "= mock.MagicMock() assert stream.close() is stream assert stream.closed is True", "transport): def get_transport(_): return transport get_transport = mock.MagicMock(side_effect=get_transport) stream =", "connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", many=True, test_kw=1) stream.transport.send.assert_has_calls( [call(stream,", "wait_timeout=None, test_kw=1) for _ in range(5)] ) def test_read_while_sess_open(self, stream):", "return ZeroStream(pipe, session, transport) # init def test_init_with_transport_cb(self, pipe, session,", "the License is distributed on an \"AS IS\" BASIS, #", "break result.append(item) if platform.python_implementation().lower() == \"pypy\": gc.collect() assert result ==", "mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1000 stream.heartbeat_ts = time.time()", "result = [] for item in stream.read(wait_timeout=1, test_kw=1): if item", "__init__(self): self._closed = False @property def closed(self): return self._closed class", "def session(self): session = MockSession() return session @fixture def transport(self):", "connection_context ) stream.transport.send = mock.MagicMock() stream.closed = True stream.write(\"packet\", test_kw=1)", "def test_ack_closed(self, stream): stream.closed = True stream.transport.ack = mock.MagicMock() stream.ack(\"message\",", "assert result == [] assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count", "stream.save = mock.MagicMock() with patch(\"time.time\", lambda: 12345): stream.heartbeat() assert stream.heartbeat_ts", "True stream.pipe.node_b = \"<out>\" stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert", "item is ...: break result.append(item) assert result == list(range(4)) assert", "stream, station): iter_data = iter(range(10)) def recv_cb(*_, **__): try: return", "stream): MockTransport._connections[stream.id] = stream with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat", "is False assert stream.refresh.call_count == 1 def test_busy_true(self, stream): stream.refresh", "test_kw=1) def test_ack_closed(self, stream): stream.closed = True stream.transport.ack = mock.MagicMock()", ") assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert", "* 5 result_data = list(filter(lambda x: x is not None,", "= iter(range(10)) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration:", "# open/close context def test_context(self, stream): stream.open = mock.MagicMock() stream.close", "break result.append(item) assert result == list(range(4)) assert stream.connection_context.call_count == 1", "is stream assert stream.closed is False stream.save.assert_not_called() # close def", "= mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called() # redirect def test_redirect(self, stream,", "1, 2, 3, None, 4]) def recv_cb(*_, **__): try: return", "connection_context ) stream.transport.send = mock.MagicMock() stream.closed = True stream.pipe.node_b =", "mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1 stream.heartbeat_ts = time.time()", "stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(wait_timeout=1,", "== stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 0 assert stream.heartbeat_fail_delay", "test_redirect(self, stream, station): station.save = mock.MagicMock() station.stream_lock_ttl = 0 stream.save", "... connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context )", "data)) iter_data = iter(data) def recv_cb(*_, **__): try: return next(iter_data)", "stream.station == station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) # open/close context def test_context(self,", "law or agreed to in writing, software # distributed under", "connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1)", "== 1 assert connection_context.__exit__.call_count == 1 def test_write_when_closed(self, stream): connection_context", "= mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 0 assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay", "connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 def test_write_many(self, stream):", "+ [\"packet\"] * 10) def recv_cb(*_, **__): try: return next(iter_data)", "return transport get_transport = mock.MagicMock(side_effect=get_transport) stream = ZeroStream(pipe, session, get_transport)", "test_kw=1) # write def test_write(self, stream): connection_context = mock.MagicMock() stream.connection_context", "break result.append(item) assert result == [\"packet\"] * 5 assert stream.connection_context.call_count", "result.append(item) assert result == list(range(5)) assert stream.connection_context.call_count == 1 assert", "class MockSession(ZeroSession): closing_mode = None def __init__(self): self._closed = False", "pipe = mock.MagicMock(spec=ZeroPipe) return pipe @fixture def session(self): session =", "station(self): return MockStation() @fixture def stream(self, pipe, session, transport): return", "open def test_open(self, stream): stream.save = mock.MagicMock() stream.closed = True", "result_data = list(filter(lambda x: x is not None, data)) iter_data", "1 assert connection_context.__exit__.call_count == 1 def test_write_when_closed(self, stream): connection_context =", "_ in range(5)] ) def test_read_none(self, stream): iter_data = iter([None,", "== 1 # ack def test_ack(self, stream): stream.transport.ack = mock.MagicMock()", "MockSession(ZeroSession): closing_mode = None def __init__(self): self._closed = False @property", "iter([0, 1, 2, 3, None, 4]) def recv_cb(*_, **__): try:", "result = [] for item in stream.read(test_kw=1): if item ==", "wait_timeout=None, test_kw=1) for _ in range(5)] ) def test_read_none(self, stream):", "disconnect = mock.MagicMock() class MockSession(ZeroSession): closing_mode = None def __init__(self):", "get_transport = mock.MagicMock(side_effect=get_transport) stream = ZeroStream(pipe, session, get_transport) assert stream.transport", "if item is ...: break result.append(item) assert result == list(range(5))", "assert stream.open() is stream assert stream.closed is False stream.save.assert_called_once_with(save_dependency=False) def", "is ...: break result.append(item) assert result == list(range(5)) assert stream.connection_context.call_count", "item in stream.read(test_kw=1): if item is ...: break result.append(item) assert", "stream.save.assert_called_once_with(save_dependency=False) # open def test_open(self, stream): stream.save = mock.MagicMock() stream.closed", "stream): iter_data = iter([None, \"packet\"] + [\"packet\"] * 10) def", "1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in range(6)] )", "for _ in range(6)] ) def test_read_while_stream_open(self, stream): iter_data =", "connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1)", "\"<out>\" stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count == 1", "connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 # ack def", "session @fixture def transport(self): return MockTransport() @fixture def station(self): return", "stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat def test_heartbeat_fail_delay(self, stream): assert stream.heartbeat_fail_delay", "not None, data)) iter_data = iter(data) def recv_cb(*_, **__): try:", "may obtain a copy of the License at # #", ") def test_read_n_packets(self, stream): iter_data = iter([None, \"packet\"] + [\"packet\"]", "test_heartbeat(self, stream): stream.save = mock.MagicMock() with patch(\"time.time\", lambda: 12345): stream.heartbeat()", "def test_busy_false(self, stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay", "1 assert connection_context.__exit__.call_count == 1 # ack def test_ack(self, stream):", "False @property def closed(self): return self._closed class MockStation(ZeroStation): def __init__(self):", "\"pypy\": gc.collect() assert result == result_data assert stream.connection_context.call_count == 1", "MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat def test_heartbeat_fail_delay(self, stream): assert stream.heartbeat_fail_delay ==", "stream): data = [\"packet\", 0, {}, \"\", None] + [None,", "except StopIteration: return ... connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock(", "\"message\", test_kw=1) def test_ack_closed(self, stream): stream.closed = True stream.transport.ack =", "mock.MagicMock() disconnect = mock.MagicMock() class MockSession(ZeroSession): closing_mode = None def", "stream.heartbeat_ts = time.time() - 10 assert stream.is_busy is False assert", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "init def test_init_with_transport_cb(self, pipe, session, transport): def get_transport(_): return transport", "ZeroStream(pipe, session, get_transport) assert stream.transport == transport # conn context", "limitations under the License. import gc import platform import time", "platform.python_implementation().lower() == \"pypy\": gc.collect() assert result == result_data assert stream.connection_context.call_count", "station.save = mock.MagicMock() station.stream_lock_ttl = 0 stream.save = mock.MagicMock() stream.redirect_to(station)", "= [] for item in stream.read(test_kw=1): if item is ...:", "tethys.core.streams.stream_zero import ZeroStream from tethys.core.transports.transport_zero import ZeroTransport class MockTransport(ZeroTransport): def", "1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for", "stream): iter_data = iter(range(10)) def recv_cb(*_, **__): try: return next(iter_data)", "may not use this file except in compliance with the", ") stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result = [] for item in", ") def test_read_while_stream_open(self, stream): iter_data = iter(range(10)) def recv_cb(*_, **__):", "== list(range(4)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1", "stream.station.heartbeat_fail_delay = 12345 assert stream.heartbeat_fail_delay == 12345 def test_busy_false(self, stream):", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "False assert stream.refresh.call_count == 1 def test_busy_true(self, stream): stream.refresh =", "this file except in compliance with the License. # You", "[] for item in stream.read(test_kw=1): if item == 4: stream.closed", "assert stream.closed is False stream.save.assert_not_called() # close def test_close(self, stream):", "def station(self): return MockStation() @fixture def stream(self, pipe, session, transport):", "item in stream.read(test_kw=1): if item == 4: stream.station = station", "stream.closed is False stream.save.assert_not_called() # close def test_close(self, stream): stream.save", "= 12345 assert stream.heartbeat_fail_delay == 12345 def test_busy_false(self, stream): stream.refresh", "unittest import mock from unittest.mock import patch, call from pytest", "connect = mock.MagicMock() disconnect = mock.MagicMock() class MockSession(ZeroSession): closing_mode =", "False stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self, stream): stream.save = mock.MagicMock() stream.closed =", "side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\",", "# read def test_read(self, stream): data = [\"packet\", 0, {},", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream): MockTransport._connections[stream.id]", "= station if item is ...: break result.append(item) assert result", "= [] for item in stream.read(wait_timeout=1, test_kw=1): if item is", "for i in \"packet\"] ) assert stream.connection_context.call_count == 1 assert", "stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def test_write_out(self, stream): connection_context = mock.MagicMock() stream.connection_context", "# # Licensed under the Apache License, Version 2.0 (the", "mock.MagicMock() stream.closed = True assert stream.open(save=False) is stream assert stream.closed", "patch(\"time.time\", lambda: 12345): stream.heartbeat() assert stream.heartbeat_ts == 12345 stream.save.assert_called_once_with(save_dependency=False) #", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 0 assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay =", "stream.is_busy is True assert stream.refresh.call_count == 1 def test_heartbeat(self, stream):", "True stream.save.assert_not_called() # read def test_read(self, stream): data = [\"packet\",", "def test_ack(self, stream): stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\",", "station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) # open/close context def test_context(self, stream): stream.open", "== 1 def test_write_when_closed(self, stream): connection_context = mock.MagicMock() stream.connection_context =", "x is not None, data)) iter_data = iter(data) def recv_cb(*_,", "is not None, data)) iter_data = iter(data) def recv_cb(*_, **__):", "mock.MagicMock() assert stream.close(save=False) is stream assert stream.closed is True stream.save.assert_not_called()", "1 def test_write_when_closed(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock(", "mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(test_kw=1): if item", "stream.read(test_kw=1): if item is ...: break result.append(item) assert result ==", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if item is ...: break result.append(item) assert result == []", "stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count == 1 assert", "= mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.recv =", "4: stream.station = station if item is ...: break result.append(item)", "stream.transport.send.assert_has_calls( [call(stream, i, test_kw=1) for i in \"packet\"] ) assert", "== 1 assert connection_context.__exit__.call_count == 1 # ack def test_ack(self,", "result.append(item) assert result == [\"packet\"] * 5 assert stream.connection_context.call_count ==", "test_kw=1) for _ in range(5)] ) def test_read_while_sess_open(self, stream): stream.session._closed", "== station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) # open/close context def test_context(self, stream):", "stream.station.heartbeat_fail_delay = 1000 stream.heartbeat_ts = time.time() assert stream.is_busy is True", "stream.station.heartbeat_fail_delay = 1 stream.heartbeat_ts = time.time() - 10 assert stream.is_busy", "def test_context(self, stream): stream.open = mock.MagicMock() stream.close = mock.MagicMock() with", "assert stream.open(save=False) is stream assert stream.closed is False stream.save.assert_not_called() #", "== 4: stream.closed = True if item is ...: break", "mock.MagicMock() stream.closed = True stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called()", "iter(data) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration: return", "4: stream.closed = True if item is ...: break result.append(item)", "item in stream.read(test_kw=1): if item == 4: stream.closed = True", "def test_write_many(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda:", "* 10) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration:", "language governing permissions and # limitations under the License. import", "import ZeroTransport class MockTransport(ZeroTransport): def __init__(self): pass connect = mock.MagicMock()", "stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 0 assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY", "...: break result.append(item) if platform.python_implementation().lower() == \"pypy\": gc.collect() assert result", "if item == 4: stream.closed = True if item is", "== [\"packet\"] * 5 assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "session = MockSession() return session @fixture def transport(self): return MockTransport()", "StopIteration: return ... connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda:", "= mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result =", "stream.station.heartbeat_fail_delay = 0 assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay = 12345", "is True stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self, stream): stream.save = mock.MagicMock() assert", "def __init__(self): pass class TestZeroStream: @staticmethod def teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock()", "= mock.MagicMock() disconnect = mock.MagicMock() class MockSession(ZeroSession): closing_mode = None", "def __init__(self): self._closed = False @property def closed(self): return self._closed", "...: break result.append(item) assert result == [\"packet\"] * 5 assert", "= mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(count=5, test_kw=1):", "stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(count=5,", "the License. import gc import platform import time from unittest", "MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream): MockTransport._connections[stream.id] = stream with", "or implied. # See the License for the specific language", "for item in stream.read(test_kw=1): if item == 4: stream.closed =", "Rights Reserved. # # Licensed under the Apache License, Version", "MockTransport.disconnect.reset_mock() @fixture def pipe(self): pipe = mock.MagicMock(spec=ZeroPipe) return pipe @fixture", "2020 Konstruktor, Inc. All Rights Reserved. # # Licensed under", "def test_write_when_closed(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda:", "test_write_out(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "class MockTransport(ZeroTransport): def __init__(self): pass connect = mock.MagicMock() disconnect =", "connection_context.__exit__.call_count == 1 # ack def test_ack(self, stream): stream.transport.ack =", "wait_timeout=None, test_kw=1) for _ in range(5)] ) def test_read_when_station_changed(self, stream,", "result = [] for item in stream.read(count=5, test_kw=1): if item", "side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.closed = True stream.write(\"packet\",", "[] for item in stream.read(test_kw=1): if item is ...: break", "is ...: break result.append(item) assert result == [] assert stream.connection_context.call_count", "= mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(wait_timeout=1, test_kw=1):", "mock.MagicMock() station.stream_lock_ttl = 0 stream.save = mock.MagicMock() stream.redirect_to(station) assert stream.station", "stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self, stream): stream.save = mock.MagicMock() assert stream.close(save=False) is", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= mock.MagicMock() stream.closed = True assert stream.open(save=False) is stream assert", "stream): stream.save = mock.MagicMock() stream.closed = True assert stream.open(save=False) is", "is True stream.save.assert_not_called() # read def test_read(self, stream): data =", "permissions and # limitations under the License. import gc import", "mock.MagicMock(side_effect=get_transport) stream = ZeroStream(pipe, session, get_transport) assert stream.transport == transport", "def get_transport(_): return transport get_transport = mock.MagicMock(side_effect=get_transport) stream = ZeroStream(pipe,", "and # limitations under the License. import gc import platform", "@fixture def session(self): session = MockSession() return session @fixture def", "0, {}, \"\", None] + [None, \"packet\"] * 5 result_data", "test_kw=1) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert", "heartbeat def test_heartbeat_fail_delay(self, stream): assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station =", "1 def test_write_many(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock(", "= mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", test_kw=1)", "(the \"License\"); # you may not use this file except", "many=True, test_kw=1) stream.transport.send.assert_has_calls( [call(stream, i, test_kw=1) for i in \"packet\"]", "test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count", "from unittest.mock import patch, call from pytest import fixture from", "# you may not use this file except in compliance", "== [] assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1", "assert result == list(range(4)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count", "== result_data assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1", "= 0 stream.save = mock.MagicMock() stream.redirect_to(station) assert stream.station == station", "stream.transport.ack.assert_not_called() # redirect def test_redirect(self, stream, station): station.save = mock.MagicMock()", "try: return next(iter_data) except StopIteration: return ... connection_context = mock.MagicMock()", "stream(self, pipe, session, transport): return ZeroStream(pipe, session, transport) # init", "return pipe @fixture def session(self): session = MockSession() return session", "def test_init_with_transport_cb(self, pipe, session, transport): def get_transport(_): return transport get_transport", "stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(test_kw=1):", "def test_open_no_commit(self, stream): stream.save = mock.MagicMock() stream.closed = True assert", "stream.heartbeat() assert stream.heartbeat_ts == 12345 stream.save.assert_called_once_with(save_dependency=False) # open def test_open(self,", "stream.transport.send = mock.MagicMock() stream.write(\"packet\", many=True, test_kw=1) stream.transport.send.assert_has_calls( [call(stream, i, test_kw=1)", "is ...: break result.append(item) assert result == [\"packet\"] * 5", "stream.read(test_kw=1): if item == 4: stream.station = station if item", "tethys.core.stations.station_zero import ZeroStation from tethys.core.streams.stream_zero import ZeroStream from tethys.core.transports.transport_zero import", "# # Unless required by applicable law or agreed to", "data] ) def test_read_n_packets(self, stream): iter_data = iter([None, \"packet\"] +", "stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def test_write_out(self, stream): connection_context = mock.MagicMock()", "@staticmethod def teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture def pipe(self): pipe =", "result.append(item) if platform.python_implementation().lower() == \"pypy\": gc.collect() assert result == result_data", "stream.read(wait_timeout=1, test_kw=1): if item is ...: break result.append(item) assert result", "from tethys.core.sessions.sess_zero import ZeroSession from tethys.core.stations.station_zero import ZeroStation from tethys.core.streams.stream_zero", "# open def test_open(self, stream): stream.save = mock.MagicMock() stream.closed =", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.closed", "...: break result.append(item) assert result == list(range(5)) assert stream.connection_context.call_count ==", "stream): stream.session._closed = True iter_data = iter([0, 1, 2, 3,", "stream.heartbeat_fail_delay == 12345 def test_busy_false(self, stream): stream.refresh = mock.MagicMock() stream.station", "connection_context.__exit__.call_count == 1 def test_write_when_closed(self, stream): connection_context = mock.MagicMock() stream.connection_context", "Version 2.0 (the \"License\"); # you may not use this", "\"packet\"] ) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1", "== 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]", "def test_open(self, stream): stream.save = mock.MagicMock() stream.closed = True assert", "iter_data = iter(range(10)) def recv_cb(*_, **__): try: return next(iter_data) except", "== 1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1) # write def test_write(self, stream):", "def test_read_while_stream_open(self, stream): iter_data = iter(range(10)) def recv_cb(*_, **__): try:", "assert result == list(range(5)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count", "time from unittest import mock from unittest.mock import patch, call", "return ... connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context", "for _ in data] ) def test_read_n_packets(self, stream): iter_data =", "= time.time() assert stream.is_busy is True assert stream.refresh.call_count == 1", "= time.time() - 10 assert stream.is_busy is False assert stream.refresh.call_count", "stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1000 stream.heartbeat_ts", "in stream.read(test_kw=1): if item == 4: stream.station = station if", "_ in data] ) def test_read_n_packets(self, stream): iter_data = iter([None,", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "= mock.MagicMock(spec=ZeroPipe) return pipe @fixture def session(self): session = MockSession()", "connection_context.__exit__.call_count == 1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1) # write def test_write(self,", "= mock.MagicMock() stream.close = mock.MagicMock() with stream: stream.open.assert_called_once_with(save=False) stream.close.assert_not_called() stream.close.assert_called_once_with(save=False)", "transport): return ZeroStream(pipe, session, transport) # init def test_init_with_transport_cb(self, pipe,", "with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream): MockTransport._connections[stream.id] =", "stream.transport.send = mock.MagicMock() stream.closed = True stream.pipe.node_b = \"<out>\" stream.write(\"packet\",", "stream.read(test_kw=1): if item is ...: break result.append(item) if platform.python_implementation().lower() ==", "= stream with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat def test_heartbeat_fail_delay(self,", "by applicable law or agreed to in writing, software #", "stream): stream.save = mock.MagicMock() assert stream.close(save=False) is stream assert stream.closed", "_ in range(6)] ) def test_read_while_stream_open(self, stream): iter_data = iter(range(10))", "test_kw=1) for i in \"packet\"] ) assert stream.connection_context.call_count == 1", "connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.recv", "item in stream.read(count=5, test_kw=1): if item is ...: break result.append(item)", "result.append(item) assert result == list(range(4)) assert stream.connection_context.call_count == 1 assert", "connection_context.__exit__.call_count == 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in", "item is ...: break result.append(item) assert result == [] assert", "pipe, session, transport): def get_transport(_): return transport get_transport = mock.MagicMock(side_effect=get_transport)", "stream.save = mock.MagicMock() assert stream.close() is stream assert stream.closed is", "return self._closed class MockStation(ZeroStation): def __init__(self): pass class TestZeroStream: @staticmethod", "stream.save.assert_not_called() # close def test_close(self, stream): stream.save = mock.MagicMock() assert", "test_kw=1) for _ in range(6)] ) def test_read_while_stream_open(self, stream): iter_data", "tethys.core.sessions.sess_zero import ZeroSession from tethys.core.stations.station_zero import ZeroStation from tethys.core.streams.stream_zero import", "stream): stream.closed = True stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called()", "TestZeroStream: @staticmethod def teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture def pipe(self): pipe", "= list(filter(lambda x: x is not None, data)) iter_data =", "for _ in range(5)] ) def test_read_none(self, stream): iter_data =", "gc.collect() assert result == result_data assert stream.connection_context.call_count == 1 assert", "def test_heartbeat(self, stream): stream.save = mock.MagicMock() with patch(\"time.time\", lambda: 12345):", "tethys.core.transports.transport_zero import ZeroTransport class MockTransport(ZeroTransport): def __init__(self): pass connect =", "MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream): MockTransport._connections[stream.id] = stream with stream.connection_context():", "mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1) def test_ack_closed(self, stream): stream.closed", "True stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called() # redirect def", "mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", many=True, test_kw=1)", "2, 3, None, 4]) def recv_cb(*_, **__): try: return next(iter_data)", "item in stream.read(wait_timeout=1, test_kw=1): if item is ...: break result.append(item)", "mock.MagicMock() stream.closed = True assert stream.open() is stream assert stream.closed", "mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.closed = True", "= False @property def closed(self): return self._closed class MockStation(ZeroStation): def", "= mock.MagicMock() stream.write(\"packet\", many=True, test_kw=1) stream.transport.send.assert_has_calls( [call(stream, i, test_kw=1) for", "...: break result.append(item) assert result == [] assert stream.connection_context.call_count ==", "# ack def test_ack(self, stream): stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1)", "stream.closed = True stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def", "stream.pipe.node_b = \"<out>\" stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count", "wait_timeout=None, test_kw=1) for _ in data] ) def test_read_n_packets(self, stream):", "is False stream.save.assert_not_called() # close def test_close(self, stream): stream.save =", "def test_write_out(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda:", "= True assert stream.open(save=False) is stream assert stream.closed is False", "@property def closed(self): return self._closed class MockStation(ZeroStation): def __init__(self): pass", "def test_read_n_packets(self, stream): iter_data = iter([None, \"packet\"] + [\"packet\"] *", "test_close(self, stream): stream.save = mock.MagicMock() assert stream.close() is stream assert", "assert connection_context.__exit__.call_count == 1 # ack def test_ack(self, stream): stream.transport.ack", "mock.MagicMock() stream.redirect_to(station) assert stream.station == station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) # open/close", "\"packet\"] * 5 result_data = list(filter(lambda x: x is not", "== 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in range(6)]", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "1 assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 #", "item == 4: stream.closed = True if item is ...:", "Unless required by applicable law or agreed to in writing,", "is ...: break result.append(item) assert result == list(range(4)) assert stream.connection_context.call_count", "None, data)) iter_data = iter(data) def recv_cb(*_, **__): try: return", "connection_context ) stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result = [] for item", "mock.MagicMock() assert stream.close() is stream assert stream.closed is True stream.save.assert_called_once_with(save_dependency=False)", "mock.MagicMock() with patch(\"time.time\", lambda: 12345): stream.heartbeat() assert stream.heartbeat_ts == 12345", "station): station.save = mock.MagicMock() station.stream_lock_ttl = 0 stream.save = mock.MagicMock()", "assert stream.station == station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) # open/close context def", "4]) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration: return", "stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1 stream.heartbeat_ts", "from tethys.core.stations.station_zero import ZeroStation from tethys.core.streams.stream_zero import ZeroStream from tethys.core.transports.transport_zero", "the specific language governing permissions and # limitations under the", "ZeroStream(pipe, session, transport) # init def test_init_with_transport_cb(self, pipe, session, transport):", "result.append(item) assert result == [] assert stream.connection_context.call_count == 1 assert", "def test_write(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda:", "assert stream.is_busy is True assert stream.refresh.call_count == 1 def test_heartbeat(self,", "1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1) # write def test_write(self, stream): connection_context", "applicable law or agreed to in writing, software # distributed", "stream.refresh.call_count == 1 def test_busy_true(self, stream): stream.refresh = mock.MagicMock() stream.station", "stream.open() is stream assert stream.closed is False stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self,", "stream = ZeroStream(pipe, session, get_transport) assert stream.transport == transport #", "platform import time from unittest import mock from unittest.mock import", "mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream,", ") stream.transport.send = mock.MagicMock() stream.write(\"packet\", many=True, test_kw=1) stream.transport.send.assert_has_calls( [call(stream, i,", "[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)] ) def test_read_while_sess_open(self,", "if item is ...: break result.append(item) assert result == [\"packet\"]", "closing_mode = None def __init__(self): self._closed = False @property def", "test_kw=1): if item is ...: break result.append(item) assert result ==", "assert connection_context.__exit__.call_count == 1 def test_write_many(self, stream): connection_context = mock.MagicMock()", "side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.closed = True stream.pipe.node_b", "def test_read_when_station_changed(self, stream, station): iter_data = iter(range(10)) def recv_cb(*_, **__):", "[] for item in stream.read(wait_timeout=1, test_kw=1): if item is ...:", "in writing, software # distributed under the License is distributed", "assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_has_calls( [call(stream,", "stream.read(test_kw=1): if item == 4: stream.closed = True if item", "wait_timeout=1, test_kw=1) # write def test_write(self, stream): connection_context = mock.MagicMock()", "x: x is not None, data)) iter_data = iter(data) def", "stream): stream.refresh = mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1", "mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(count=5, test_kw=1): if", "@fixture def pipe(self): pipe = mock.MagicMock(spec=ZeroPipe) return pipe @fixture def", "assert connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 def test_write_when_closed(self,", "1 # ack def test_ack(self, stream): stream.transport.ack = mock.MagicMock() stream.ack(\"message\",", "# conn context def test_new_connection_context(self, stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called()", "= mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1) def test_ack_closed(self, stream):", "# Copyright 2020 Konstruktor, Inc. All Rights Reserved. # #", "mock.MagicMock() stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1) assert stream.connection_context.call_count == 1", "import ZeroPipe from tethys.core.sessions.sess_zero import ZeroSession from tethys.core.stations.station_zero import ZeroStation", "conn context def test_new_connection_context(self, stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream)", "= mock.MagicMock(side_effect=recv_cb) result = [] for item in stream.read(test_kw=1): if", "class MockStation(ZeroStation): def __init__(self): pass class TestZeroStream: @staticmethod def teardown_method():", "is ...: break result.append(item) if platform.python_implementation().lower() == \"pypy\": gc.collect() assert", "item is ...: break result.append(item) assert result == list(range(5)) assert", "stream.save.assert_called_once_with(save_dependency=False) # open/close context def test_context(self, stream): stream.open = mock.MagicMock()", ") def test_read_when_station_changed(self, stream, station): iter_data = iter(range(10)) def recv_cb(*_,", "mock.MagicMock() class MockSession(ZeroSession): closing_mode = None def __init__(self): self._closed =", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "True assert stream.open(save=False) is stream assert stream.closed is False stream.save.assert_not_called()", "5 assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1 assert", "transport # conn context def test_new_connection_context(self, stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream)", "result == [] assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count ==", "= mock.MagicMock() stream.closed = True stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called()", "station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) # open/close context def test_context(self, stream): stream.open =", "connection_context.__enter__.call_count == 1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= mock.MagicMock() stream.closed = True stream.pipe.node_b = \"<out>\" stream.write(\"packet\", test_kw=1)", "= mock.MagicMock(side_effect=get_transport) stream = ZeroStream(pipe, session, get_transport) assert stream.transport ==", ") def test_read_none(self, stream): iter_data = iter([None, \"packet\"] + [\"packet\"]", "get_transport) assert stream.transport == transport # conn context def test_new_connection_context(self,", "def test_close_no_commit(self, stream): stream.save = mock.MagicMock() assert stream.close(save=False) is stream", "mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock()", "stream.transport.send = mock.MagicMock() stream.closed = True stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called()", "[] for item in stream.read(count=5, test_kw=1): if item is ...:", "= True stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called() # redirect", "3, None, 4]) def recv_cb(*_, **__): try: return next(iter_data) except", "stream assert stream.closed is False stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self, stream): stream.save", "is False stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self, stream): stream.save = mock.MagicMock() stream.closed", "assert stream.refresh.call_count == 1 def test_heartbeat(self, stream): stream.save = mock.MagicMock()", "[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)] ) def test_read_when_station_changed(self,", "item in stream.read(test_kw=1): if item is ...: break result.append(item) if", "the License for the specific language governing permissions and #", "read def test_read(self, stream): data = [\"packet\", 0, {}, \"\",", "context def test_context(self, stream): stream.open = mock.MagicMock() stream.close = mock.MagicMock()", "test_kw=1) for _ in range(5)] ) def test_read_when_station_changed(self, stream, station):", "connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def test_write_out(self, stream): connection_context = mock.MagicMock() stream.connection_context =", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "item is ...: break result.append(item) assert result == [\"packet\"] *", "teardown_method(): MockTransport.connect.reset_mock() MockTransport.disconnect.reset_mock() @fixture def pipe(self): pipe = mock.MagicMock(spec=ZeroPipe) return", "stream with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat def test_heartbeat_fail_delay(self, stream):", "Konstruktor, Inc. All Rights Reserved. # # Licensed under the", "stream.open(save=False) is stream assert stream.closed is False stream.save.assert_not_called() # close", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "def pipe(self): pipe = mock.MagicMock(spec=ZeroPipe) return pipe @fixture def session(self):", "import gc import platform import time from unittest import mock", "mock.MagicMock() stream.closed = True stream.pipe.node_b = \"<out>\" stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream,", "= mock.MagicMock() stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1000 stream.heartbeat_ts =", "stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1 stream.heartbeat_ts = time.time() -", "return next(iter_data) except StopIteration: return ... connection_context = mock.MagicMock() stream.connection_context", "iter(range(10)) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration: return", "None def __init__(self): self._closed = False @property def closed(self): return", "== 12345 stream.save.assert_called_once_with(save_dependency=False) # open def test_open(self, stream): stream.save =", "= mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\", many=True,", "stream.transport.recv.assert_has_calls( [call(stream, wait_timeout=None, test_kw=1) for _ in data] ) def", "test_ack(self, stream): stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1)", "test_heartbeat_fail_delay(self, stream): assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay", "import fixture from tethys.core.pipes.pipe_zero import ZeroPipe from tethys.core.sessions.sess_zero import ZeroSession", ") def test_read_while_sess_open(self, stream): stream.session._closed = True iter_data = iter([0,", "...: break result.append(item) assert result == list(range(4)) assert stream.connection_context.call_count ==", "= iter([0, 1, 2, 3, None, 4]) def recv_cb(*_, **__):", "stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def test_write_out(self, stream): connection_context", "12345 assert stream.heartbeat_fail_delay == 12345 def test_busy_false(self, stream): stream.refresh =", "from tethys.core.streams.stream_zero import ZeroStream from tethys.core.transports.transport_zero import ZeroTransport class MockTransport(ZeroTransport):", "iter_data = iter([0, 1, 2, 3, None, 4]) def recv_cb(*_,", "= mock.MagicMock() station.stream_lock_ttl = 0 stream.save = mock.MagicMock() stream.redirect_to(station) assert", "1 assert connection_context.__exit__.call_count == 1 stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1) # write", "assert stream.heartbeat_ts == 12345 stream.save.assert_called_once_with(save_dependency=False) # open def test_open(self, stream):", "session, transport): return ZeroStream(pipe, session, transport) # init def test_init_with_transport_cb(self,", "wait_timeout=None, test_kw=1) for _ in range(6)] ) def test_read_while_stream_open(self, stream):", "# redirect def test_redirect(self, stream, station): station.save = mock.MagicMock() station.stream_lock_ttl", "# limitations under the License. import gc import platform import", "return MockTransport() @fixture def station(self): return MockStation() @fixture def stream(self,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "__init__(self): pass connect = mock.MagicMock() disconnect = mock.MagicMock() class MockSession(ZeroSession):", "test_write(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context", "i, test_kw=1) for i in \"packet\"] ) assert stream.connection_context.call_count ==", "is True assert stream.refresh.call_count == 1 def test_heartbeat(self, stream): stream.save", "stream): assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay =", "== \"pypy\": gc.collect() assert result == result_data assert stream.connection_context.call_count ==", "break result.append(item) assert result == list(range(5)) assert stream.connection_context.call_count == 1", "True stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self, stream): stream.save = mock.MagicMock() assert stream.close(save=False)", "assert stream.closed is False stream.save.assert_called_once_with(save_dependency=False) def test_open_no_commit(self, stream): stream.save =", "== 1 def test_write_many(self, stream): connection_context = mock.MagicMock() stream.connection_context =", "None] + [None, \"packet\"] * 5 result_data = list(filter(lambda x:", "0 assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay = 12345 assert stream.heartbeat_fail_delay", "ZeroStream from tethys.core.transports.transport_zero import ZeroTransport class MockTransport(ZeroTransport): def __init__(self): pass", "stream.heartbeat_ts = time.time() assert stream.is_busy is True assert stream.refresh.call_count ==", "test_write_many(self, stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context", "\"License\"); # you may not use this file except in", "in stream.read(count=5, test_kw=1): if item is ...: break result.append(item) assert", "stream): stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1) def", "# write def test_write(self, stream): connection_context = mock.MagicMock() stream.connection_context =", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "range(6)] ) def test_read_while_stream_open(self, stream): iter_data = iter(range(10)) def recv_cb(*_,", "stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay = 12345 assert stream.heartbeat_fail_delay == 12345", "assert stream.is_busy is False assert stream.refresh.call_count == 1 def test_busy_true(self,", "stream.write(\"packet\", many=True, test_kw=1) stream.transport.send.assert_has_calls( [call(stream, i, test_kw=1) for i in", "= True stream.pipe.node_b = \"<out>\" stream.write(\"packet\", test_kw=1) stream.transport.send.assert_called_once_with(stream, \"packet\", test_kw=1)", "stream): connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context )", "session, get_transport) assert stream.transport == transport # conn context def", "# distributed under the License is distributed on an \"AS", "test_context(self, stream): stream.open = mock.MagicMock() stream.close = mock.MagicMock() with stream:", "connection_context = mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send", "import ZeroSession from tethys.core.stations.station_zero import ZeroStation from tethys.core.streams.stream_zero import ZeroStream", "== 12345 def test_busy_false(self, stream): stream.refresh = mock.MagicMock() stream.station =", "# Unless required by applicable law or agreed to in", "ack def test_ack(self, stream): stream.transport.ack = mock.MagicMock() stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_called_once_with(stream,", "assert stream.close() is stream assert stream.closed is True stream.save.assert_called_once_with(save_dependency=False) def", "_ in range(5)] ) def test_read_while_sess_open(self, stream): stream.session._closed = True", "def test_new_connection_context(self, stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self,", "stream.close(save=False) is stream assert stream.closed is True stream.save.assert_not_called() # read", "connection_context.__exit__.call_count == 1 def test_write_many(self, stream): connection_context = mock.MagicMock() stream.connection_context", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "\"\", None] + [None, \"packet\"] * 5 result_data = list(filter(lambda", "stream): stream.save = mock.MagicMock() assert stream.close() is stream assert stream.closed", "assert stream.refresh.call_count == 1 def test_busy_true(self, stream): stream.refresh = mock.MagicMock()", "mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result = []", "stream.ack(\"message\", test_kw=1) stream.transport.ack.assert_not_called() # redirect def test_redirect(self, stream, station): station.save", "stream.save = mock.MagicMock() assert stream.close(save=False) is stream assert stream.closed is", "side_effect=lambda: connection_context ) stream.transport.recv = mock.MagicMock(side_effect=recv_cb) result = [] for", "assert result == [\"packet\"] * 5 assert stream.connection_context.call_count == 1", "stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send = mock.MagicMock() stream.write(\"packet\",", "You may obtain a copy of the License at #", "== list(range(5)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count == 1", "stream.station = station if item is ...: break result.append(item) assert", "0 stream.save = mock.MagicMock() stream.redirect_to(station) assert stream.station == station station.save.assert_called_once_with(save_dependency=False)", "range(5)] ) def test_read_none(self, stream): iter_data = iter([None, \"packet\"] +", "lambda: 12345): stream.heartbeat() assert stream.heartbeat_ts == 12345 stream.save.assert_called_once_with(save_dependency=False) # open", "transport get_transport = mock.MagicMock(side_effect=get_transport) stream = ZeroStream(pipe, session, get_transport) assert", "import platform import time from unittest import mock from unittest.mock", "stream assert stream.closed is True stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self, stream): stream.save", "result == list(range(4)) assert stream.connection_context.call_count == 1 assert connection_context.__enter__.call_count ==", "stream.transport.ack.assert_called_once_with(stream, \"message\", test_kw=1) def test_ack_closed(self, stream): stream.closed = True stream.transport.ack", "Copyright 2020 Konstruktor, Inc. All Rights Reserved. # # Licensed", "open/close context def test_context(self, stream): stream.open = mock.MagicMock() stream.close =", "None, 4]) def recv_cb(*_, **__): try: return next(iter_data) except StopIteration:", "12345 stream.save.assert_called_once_with(save_dependency=False) # open def test_open(self, stream): stream.save = mock.MagicMock()", "_ in range(5)] ) def test_read_when_station_changed(self, stream, station): iter_data =", "= mock.MagicMock() stream.connection_context = mock.MagicMock( side_effect=lambda: connection_context ) stream.transport.send =", "unittest.mock import patch, call from pytest import fixture from tethys.core.pipes.pipe_zero", "def transport(self): return MockTransport() @fixture def station(self): return MockStation() @fixture", "stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 1000 stream.heartbeat_ts = time.time() assert", "= True if item is ...: break result.append(item) assert result", "station): iter_data = iter(range(10)) def recv_cb(*_, **__): try: return next(iter_data)", "the Apache License, Version 2.0 (the \"License\"); # you may", "== 1 def test_heartbeat(self, stream): stream.save = mock.MagicMock() with patch(\"time.time\",", "True assert stream.refresh.call_count == 1 def test_heartbeat(self, stream): stream.save =", "stream.close() is stream assert stream.closed is True stream.save.assert_called_once_with(save_dependency=False) def test_close_no_commit(self,", "item == 4: stream.station = station if item is ...:", "True stream.write(\"packet\", test_kw=1) stream.transport.send.assert_not_called() stream.connection_context.assert_not_called() connection_context.__enter__.assert_not_called() connection_context.__exit__.assert_not_called() def test_write_out(self, stream):", "= mock.MagicMock() stream.redirect_to(station) assert stream.station == station station.save.assert_called_once_with(save_dependency=False) stream.save.assert_called_once_with(save_dependency=False) #", "stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream): MockTransport._connections[stream.id] = stream", "import time from unittest import mock from unittest.mock import patch,", "stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station = mock.MagicMock(spec=ZeroStation) stream.station.heartbeat_fail_delay = 0 assert", "range(5)] ) def test_read_while_sess_open(self, stream): stream.session._closed = True iter_data =", "return session @fixture def transport(self): return MockTransport() @fixture def station(self):", "== stream.DEFAULT_HEARTBEAT_FAIL_DELAY stream.station.heartbeat_fail_delay = 12345 assert stream.heartbeat_fail_delay == 12345 def", "from tethys.core.pipes.pipe_zero import ZeroPipe from tethys.core.sessions.sess_zero import ZeroSession from tethys.core.stations.station_zero" ]
[ "copy ## of the Agreement prior to using this Licensed", "warranties, ## including, without limitation, warranties of merchantability and ##", "bound by all of the terms ## of the Agreement,", "pre_obj('pci_io', 'memory-space') self.o.hfs = pre_obj('hfs$', 'hostfs') self.o.phys_mem.map = [ [0x00000000,", "self.o.uart0.console = None elif connector == 'uart1': self.o.uart1.link = None", "any derivatives thereof are provided on an \"as ## is\"", "UARTs here either [0x890, self.o.empty, 0, 0x0, 0x8], [0x898, self.o.empty,", "Virtutech Simics Software ## License Agreement (the \"Agreement\"), and are", "if not, please contact Virtutech for a copy ## of", "return self.map_offset def set_map_offset(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value", "pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev = self.o.irq self.o.uart1.irq_level = 3 self.o.uart1.xmit_time =", "\"Agreement\"), and are being distributed under ## the Agreement. You", "parse_obj) self.o.cpu = [] self.map_offset = 0xf0000000 self.time_of_day = \"2006-06-06", "= pre_obj('of', 'ppc-of') self.o.of.cpu = self.o.cpu[0] self.o.of.memory_megs = self.memory_megs self.o.of.entry_point", "1], # no UARTs here either [0x890, self.o.empty, 0, 0x0,", "import time # Generic Simple System for PPC64 Processors class", "0x0, 0x100000]] self.o.pci_io.map = [ [0x020, self.o.irq, 0, 0x20, 0x1],", "self.memory_megs * 0x100000 self.o.ram = pre_obj('memory', 'ram') self.o.ram.image = self.o.ram_image", "of the terms ## of the Agreement, and use of", "0, 0x0, 0x100000]] self.o.pci_io.map = [ [0x020, self.o.irq, 0, 0x20,", "0, 0x0, 0x8, None, 0, 1], # no UARTs here", "None elif connector == 'uart1': self.o.uart1.link = None self.o.uart1.console =", "a copy of the Agreement with ## this Licensed Software;", "'uart0': if link: self.o.uart0.link = link else: self.o.uart0.console = console", "* import time # Generic Simple System for PPC64 Processors", "of RAM in megabytes.'], ['map_offset', Sim_Attr_Optional, 'i', 'Base address for", "self.o.uart0, self.o.uart0.name] self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name] def connect_serial(self, connector,", "you agree to be bound by all of the terms", "limitation, warranties of merchantability and ## fitness for a particular", "'hostfs') self.o.phys_mem.map = [ [0x00000000, self.o.ram, 0, 0x0, self.memory_megs *", "True, 'multi' : False}, 'uart1' : {'type' : 'serial', 'direction'", "0, 0x0, 0x8], # two NS16550, at the traditional addresses", "to the terms ## the Agreement. ## ## This Source", "not work'], ['time_of_day', Sim_Attr_Optional, 's', 'Date and time to initialize", "False}} def __init__(self, parse_obj): component_object.__init__(self, parse_obj) self.o.cpu = [] self.map_offset", "return self.o.cpu[0] def get_processors(self): return self.o.cpu ppc64_simple_attributes = [ ['cpu_frequency',", "particular purpose and non-infringement. from sim_core import * from components", "self.o.uart0.link = None self.o.uart0.console = None elif connector == 'uart1':", "Agreement prior to using this Licensed Software. ## ## By", "derivatives thereof and disclaims all implied warranties, ## including, without", "connector): if connector == 'uart0': self.o.uart0.link = None self.o.uart0.console =", "self.o.uart1.link = None self.o.uart1.console = None def get_clock(self): return self.o.cpu[0]", "[cpu] self.o.irq = pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev = self.o.pic self.o.uart0 =", "connectors = { 'uart0' : {'type' : 'serial', 'direction' :", "component_object.__init__(self, parse_obj) self.o.cpu = [] self.map_offset = 0xf0000000 self.time_of_day =", "= { 'uart0' : {'type' : 'serial', 'direction' : 'down',", ": 'serial', 'direction' : 'down', 'empty_ok' : True, 'hotplug' :", "You should have received a copy of the Agreement with", "agree to be bound by all of the terms ##", "# Linux probes for UARTs at 0x2e8 and 0x3e8 too,", "0x0, 0x8]] def add_connector_info(self): self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name] self.connector_info['uart1']", "time # Generic Simple System for PPC64 Processors class ppc64_simple_base_component(component_object):", "= self.memory_megs self.o.of.entry_point = 0x7000000 self.o.of.map_offset = self.map_offset self.o.of.time_of_day =", "Simple System for PPC64 Processors class ppc64_simple_base_component(component_object): basename = 'system'", "not, please contact Virtutech for a copy ## of the", "0x100000 self.o.ram = pre_obj('memory', 'ram') self.o.ram.image = self.o.ram_image self.o.pic =", "self.o.irq, 0, 0xa0, 0x1], [0x0a1, self.o.irq, 0, 0xa1, 0x1], #", "disconnect_serial(self, connector): if connector == 'uart0': self.o.uart0.link = None self.o.uart0.console", "= None self.o.uart1.console = None def get_clock(self): return self.o.cpu[0] def", "' \\ 'Offsets at 4 GB and above will not", "= self.o.irq self.o.uart0.irq_level = 4 self.o.uart0.xmit_time = 1000 self.o.uart1 =", "0x8], # two NS16550, at the traditional addresses [0x2f8, self.o.uart1,", "Sim_Set_Illegal_Value try: time.strptime(val, \"%Y-%m-%d %H:%M:%S %Z\") except Exception, msg: SIM_attribute_error(str(msg))", "self.o.of.cpu = self.o.cpu[0] self.o.of.memory_megs = self.memory_megs self.o.of.entry_point = 0x7000000 self.o.of.map_offset", "0, 0x10], [self.map_offset + 0x0fc00000, self.o.pic, 0, 0x0, 0x100000]] self.o.pci_io.map", "'image') self.o.ram_image.size = self.memory_megs * 0x100000 self.o.ram = pre_obj('memory', 'ram')", "and any derivatives thereof are provided on an \"as ##", "self.o.cpu[0] self.o.of.memory_megs = self.memory_megs self.o.of.entry_point = 0x7000000 self.o.of.map_offset = self.map_offset", "= [None, self.o.uart0, self.o.uart0.name] self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name] def", "'ppc-broadcast-bus') self.o.empty = pre_obj('empty', 'empty-device') self.o.pci_io = pre_obj('pci_io', 'memory-space') self.o.hfs", "0x0, 0x8], [0x3f8, self.o.uart0, 0, 0x0, 0x8, None, 0, 1],", "be bound by all of the terms ## of the", "System for PPC64 Processors class ppc64_simple_base_component(component_object): basename = 'system' connectors", "= None elif connector == 'uart1': self.o.uart1.link = None self.o.uart1.console", "Licensed Software; if not, please contact Virtutech for a copy", "two NS16550, at the traditional addresses [0x2f8, self.o.uart1, 0, 0x0,", "Simics Software ## License Agreement (the \"Agreement\"), and are being", "a subset of Licensed ## Software pursuant to the terms", "self.freq_mhz = val return Sim_Set_Ok def get_memory_megs(self, idx): return self.memory_megs", "= None def get_clock(self): return self.o.cpu[0] def get_processors(self): return self.o.cpu", "amount of RAM in megabytes.'], ['map_offset', Sim_Attr_Optional, 'i', 'Base address", "def get_map_offset(self, idx): return self.map_offset def set_map_offset(self, val, idx): if", "get_cpu_frequency(self, idx): return self.freq_mhz def set_cpu_frequency(self, val, idx): if self.obj.configured:", "License Agreement (the \"Agreement\"), and are being distributed under ##", "import * import time # Generic Simple System for PPC64", "and are being distributed under ## the Agreement. You should", "pre_obj('empty', 'empty-device') self.o.pci_io = pre_obj('pci_io', 'memory-space') self.o.hfs = pre_obj('hfs$', 'hostfs')", "self.o.uart1, 0, 0x0, 0x8, None, 0, 1], [0x3e8, self.o.empty, 0,", "self.time_of_day self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty = pre_obj('empty', 'empty-device') self.o.pci_io", "0, 0x0, 0x8], [0x3f8, self.o.uart0, 0, 0x0, 0x8, None, 0,", "## Copyright 2005-2007 Virtutech AB ## ## The contents herein", "[0x0a1, self.o.irq, 0, 0xa1, 0x1], # Linux probes for UARTs", "Licensed Software. ## ## By using this Source Code, you", "'uart1': self.o.uart1.link = None self.o.uart1.console = None def get_clock(self): return", "= 1000 self.o.of = pre_obj('of', 'ppc-of') self.o.of.cpu = self.o.cpu[0] self.o.of.memory_megs", "the Agreement. You should have received a copy of the", "= \"2006-06-06 06:06:06 UTC\" def get_cpu_frequency(self, idx): return self.freq_mhz def", "Code, you agree to be bound by all of the", "\"as ## is\" basis. Virtutech makes no warranties with respect", "06:06:06 UTC\" def get_cpu_frequency(self, idx): return self.freq_mhz def set_cpu_frequency(self, val,", "= pre_obj('phys_mem', 'memory-space') self.o.ram_image = pre_obj('memory_image', 'image') self.o.ram_image.size = self.memory_megs", "with ## this Licensed Software; if not, please contact Virtutech", "'system' connectors = { 'uart0' : {'type' : 'serial', 'direction'", "connector == 'uart1': if link: self.o.uart1.link = link else: self.o.uart1.console", "set_cpu_frequency(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz = val", "have received a copy of the Agreement with ## this", "self.map_offset = val return Sim_Set_Ok def get_time_of_day(self, idx): return self.time_of_day", "RAM in megabytes.'], ['map_offset', Sim_Attr_Optional, 'i', 'Base address for device", "link: self.o.uart0.link = link else: self.o.uart0.console = console elif connector", "is\" basis. Virtutech makes no warranties with respect to the", "self.o.uart0.irq_dev = self.o.irq self.o.uart0.irq_level = 4 self.o.uart0.xmit_time = 1000 self.o.uart1", "0x8]] def add_connector_info(self): self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name] self.connector_info['uart1'] =", "except Exception, msg: SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value self.time_of_day = val return", "0x20, 0x1], [0x021, self.o.irq, 0, 0x21, 0x1], [0x0a0, self.o.irq, 0,", "ppc64_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency in MHz.'],", "Sim_Attr_Required, 'f', 'Processor frequency in MHz.'], ['memory_megs', Sim_Attr_Required, 'i', 'The", "self.o.irq self.o.uart1.irq_level = 3 self.o.uart1.xmit_time = 1000 self.o.of = pre_obj('of',", "By using this Source Code, you agree to be bound", "self.map_offset self.o.of.time_of_day = self.time_of_day self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty =", "'Base address for device mappings. ' \\ 'Offsets at 4", "self.o.uart1, self.o.uart1.name] def connect_serial(self, connector, link, console): if connector ==", "self.o.pci_io, 0, 0x0, 0x100000], [self.map_offset + 0x0f660000, self.o.hfs, 0, 0,", "Licensed ## Software pursuant to the terms of the Virtutech", "\"2006-06-06 06:06:06 UTC\" def get_cpu_frequency(self, idx): return self.freq_mhz def set_cpu_frequency(self,", "pre_obj('memory', 'ram') self.o.ram.image = self.o.ram_image self.o.pic = pre_obj('pic$', 'open-pic') self.o.pic.irq_devs", "= pre_obj('hfs$', 'hostfs') self.o.phys_mem.map = [ [0x00000000, self.o.ram, 0, 0x0,", "Sim_Attr_Optional, 'i', 'Base address for device mappings. ' \\ 'Offsets", "'down', 'empty_ok' : True, 'hotplug' : True, 'multi' : False}}", "= pre_obj('empty', 'empty-device') self.o.pci_io = pre_obj('pci_io', 'memory-space') self.o.hfs = pre_obj('hfs$',", "= self.memory_megs * 0x100000 self.o.ram = pre_obj('memory', 'ram') self.o.ram.image =", "subject to the terms ## the Agreement. ## ## This", "0x100000], [self.map_offset + 0x0f660000, self.o.hfs, 0, 0, 0x10], [self.map_offset +", "[0x020, self.o.irq, 0, 0x20, 0x1], [0x021, self.o.irq, 0, 0x21, 0x1],", "for a copy ## of the Agreement prior to using", "0x7000000 self.o.of.map_offset = self.map_offset self.o.of.time_of_day = self.time_of_day self.o.broadcast_bus = pre_obj('broadcast_bus',", "0x1], # Linux probes for UARTs at 0x2e8 and 0x3e8", "'s', 'Date and time to initialize the OpenFirmware RTC to']]", "0, 0x0, 0x8]] def add_connector_info(self): self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name]", "[] self.map_offset = 0xf0000000 self.time_of_day = \"2006-06-06 06:06:06 UTC\" def", "provide # empty mappings there [0x2e8, self.o.empty, 0, 0x0, 0x8],", "\\ 'Offsets at 4 GB and above will not work'],", "idx): return self.time_of_day def set_time_of_day(self, val, idx): if self.obj.configured: return", "Software ## License Agreement (the \"Agreement\"), and are being distributed", "## ## By using this Source Code, you agree to", "for a particular purpose and non-infringement. from sim_core import *", "None def get_clock(self): return self.o.cpu[0] def get_processors(self): return self.o.cpu ppc64_simple_attributes", "0x0, 0x8], # two NS16550, at the traditional addresses [0x2f8,", "[ [0x00000000, self.o.ram, 0, 0x0, self.memory_megs * 0x100000], [self.map_offset +", "1000 self.o.uart1 = pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev = self.o.irq self.o.uart1.irq_level =", "Sim_Set_Illegal_Value self.map_offset = val return Sim_Set_Ok def get_time_of_day(self, idx): return", "= 0x7000000 self.o.of.map_offset = self.map_offset self.o.of.time_of_day = self.time_of_day self.o.broadcast_bus =", "warranties with respect to the Source ## Code or any", "herein are Source Code which are a subset of Licensed", "self.o.cpu ppc64_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency in", "self.o.irq.irq_dev = self.o.pic self.o.uart0 = pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev = self.o.irq", "'The amount of RAM in megabytes.'], ['map_offset', Sim_Attr_Optional, 'i', 'Base", "at 4 GB and above will not work'], ['time_of_day', Sim_Attr_Optional,", "add_objects(self, cpu): self.o.phys_mem = pre_obj('phys_mem', 'memory-space') self.o.ram_image = pre_obj('memory_image', 'image')", "idx): if self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz = val return Sim_Set_Ok", "self.o.ram_image = pre_obj('memory_image', 'image') self.o.ram_image.size = self.memory_megs * 0x100000 self.o.ram", "terms ## of the Agreement, and use of this Source", "[ [0x020, self.o.irq, 0, 0x20, 0x1], [0x021, self.o.irq, 0, 0x21,", "'uart1' : {'type' : 'serial', 'direction' : 'down', 'empty_ok' :", "return Sim_Set_Illegal_Value try: time.strptime(val, \"%Y-%m-%d %H:%M:%S %Z\") except Exception, msg:", "0, 0x0, 0x8], [0x898, self.o.empty, 0, 0x0, 0x8]] def add_connector_info(self):", "cpu): self.o.phys_mem = pre_obj('phys_mem', 'memory-space') self.o.ram_image = pre_obj('memory_image', 'image') self.o.ram_image.size", "'multi' : False}} def __init__(self, parse_obj): component_object.__init__(self, parse_obj) self.o.cpu =", "['map_offset', Sim_Attr_Optional, 'i', 'Base address for device mappings. ' \\", "'uart0': self.o.uart0.link = None self.o.uart0.console = None elif connector ==", "0x1], [0x0a1, self.o.irq, 0, 0xa1, 0x1], # Linux probes for", "mappings there [0x2e8, self.o.empty, 0, 0x0, 0x8], # two NS16550,", "+ 0x08000000, self.o.pci_io, 0, 0x0, 0x100000], [self.map_offset + 0x0f660000, self.o.hfs,", "self.o.pci_io.map = [ [0x020, self.o.irq, 0, 0x20, 0x1], [0x021, self.o.irq,", "NS16550, at the traditional addresses [0x2f8, self.o.uart1, 0, 0x0, 0x8,", "[self.map_offset + 0x0f660000, self.o.hfs, 0, 0, 0x10], [self.map_offset + 0x0fc00000,", "= [ [0x00000000, self.o.ram, 0, 0x0, self.memory_megs * 0x100000], [self.map_offset", "+ 0x0f660000, self.o.hfs, 0, 0, 0x10], [self.map_offset + 0x0fc00000, self.o.pic,", "please contact Virtutech for a copy ## of the Agreement", "## the Agreement. You should have received a copy of", "Source Code is subject to the terms ## the Agreement.", "Generic Simple System for PPC64 Processors class ppc64_simple_base_component(component_object): basename =", "'ppc-of') self.o.of.cpu = self.o.cpu[0] self.o.of.memory_megs = self.memory_megs self.o.of.entry_point = 0x7000000", "or any derivatives thereof and disclaims all implied warranties, ##", "Processors class ppc64_simple_base_component(component_object): basename = 'system' connectors = { 'uart0'", "being distributed under ## the Agreement. You should have received", "will not work'], ['time_of_day', Sim_Attr_Optional, 's', 'Date and time to", "= val return Sim_Set_Ok def get_map_offset(self, idx): return self.map_offset def", "self.time_of_day = val return Sim_Set_Ok def add_objects(self, cpu): self.o.phys_mem =", "0, 0x0, self.memory_megs * 0x100000], [self.map_offset + 0x08000000, self.o.pci_io, 0,", "self.o.uart0.xmit_time = 1000 self.o.uart1 = pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev = self.o.irq", "to the terms of the Virtutech Simics Software ## License", "return self.time_of_day def set_time_of_day(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value", "'hotplug' : True, 'multi' : False}, 'uart1' : {'type' :", "self.o.of.map_offset = self.map_offset self.o.of.time_of_day = self.time_of_day self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus')", "and non-infringement. from sim_core import * from components import *", "pre_obj('memory_image', 'image') self.o.ram_image.size = self.memory_megs * 0x100000 self.o.ram = pre_obj('memory',", "copy of the Agreement with ## this Licensed Software; if", "of the Agreement, and use of this Source Code is", "contents herein are Source Code which are a subset of", "__init__(self, parse_obj): component_object.__init__(self, parse_obj) self.o.cpu = [] self.map_offset = 0xf0000000", "return Sim_Set_Illegal_Value self.time_of_day = val return Sim_Set_Ok def add_objects(self, cpu):", "2005-2007 Virtutech AB ## ## The contents herein are Source", "def get_cpu_frequency(self, idx): return self.freq_mhz def set_cpu_frequency(self, val, idx): if", "self.obj.configured: return Sim_Set_Illegal_Value self.map_offset = val return Sim_Set_Ok def get_time_of_day(self,", "0x0, 0x8, None, 0, 1], [0x3e8, self.o.empty, 0, 0x0, 0x8],", "= [None, self.o.uart1, self.o.uart1.name] def connect_serial(self, connector, link, console): if", "self.o.uart0, 0, 0x0, 0x8, None, 0, 1], # no UARTs", "def disconnect_serial(self, connector): if connector == 'uart0': self.o.uart0.link = None", "so provide # empty mappings there [0x2e8, self.o.empty, 0, 0x0,", "val return Sim_Set_Ok def add_objects(self, cpu): self.o.phys_mem = pre_obj('phys_mem', 'memory-space')", "the Agreement prior to using this Licensed Software. ## ##", "on an \"as ## is\" basis. Virtutech makes no warranties", "self.memory_megs self.o.of.entry_point = 0x7000000 self.o.of.map_offset = self.map_offset self.o.of.time_of_day = self.time_of_day", "## this Licensed Software; if not, please contact Virtutech for", "## Code or any derivatives thereof and disclaims all implied", "self.o.irq, 0, 0xa1, 0x1], # Linux probes for UARTs at", "thereof and disclaims all implied warranties, ## including, without limitation,", "MHz.'], ['memory_megs', Sim_Attr_Required, 'i', 'The amount of RAM in megabytes.'],", "this Licensed Software; if not, please contact Virtutech for a", "= [ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency in MHz.'], ['memory_megs',", "console elif connector == 'uart1': if link: self.o.uart1.link = link", "self.o.of = pre_obj('of', 'ppc-of') self.o.of.cpu = self.o.cpu[0] self.o.of.memory_megs = self.memory_megs", "self.o.pci_io = pre_obj('pci_io', 'memory-space') self.o.hfs = pre_obj('hfs$', 'hostfs') self.o.phys_mem.map =", "implied warranties, ## including, without limitation, warranties of merchantability and", "False}, 'uart1' : {'type' : 'serial', 'direction' : 'down', 'empty_ok'", "def connect_serial(self, connector, link, console): if connector == 'uart0': if", "Agreement (the \"Agreement\"), and are being distributed under ## the", "if connector == 'uart0': self.o.uart0.link = None self.o.uart0.console = None", "else: self.o.uart0.console = console elif connector == 'uart1': if link:", "get_clock(self): return self.o.cpu[0] def get_processors(self): return self.o.cpu ppc64_simple_attributes = [", "of this Source Code is subject to the terms ##", "Sim_Set_Ok def add_objects(self, cpu): self.o.phys_mem = pre_obj('phys_mem', 'memory-space') self.o.ram_image =", "from components import * import time # Generic Simple System", "Agreement with ## this Licensed Software; if not, please contact", "self.o.uart1.xmit_time = 1000 self.o.of = pre_obj('of', 'ppc-of') self.o.of.cpu = self.o.cpu[0]", "self.map_offset = 0xf0000000 self.time_of_day = \"2006-06-06 06:06:06 UTC\" def get_cpu_frequency(self,", "# no UARTs here either [0x890, self.o.empty, 0, 0x0, 0x8],", "this Source Code is subject to the terms ## the", "return Sim_Set_Illegal_Value self.freq_mhz = val return Sim_Set_Ok def get_memory_megs(self, idx):", "get_time_of_day(self, idx): return self.time_of_day def set_time_of_day(self, val, idx): if self.obj.configured:", "connect_serial(self, connector, link, console): if connector == 'uart0': if link:", "Code or any derivatives thereof and disclaims all implied warranties,", "0x8, None, 0, 1], # no UARTs here either [0x890,", "[0x2f8, self.o.uart1, 0, 0x0, 0x8, None, 0, 1], [0x3e8, self.o.empty,", "GB and above will not work'], ['time_of_day', Sim_Attr_Optional, 's', 'Date", "msg: SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value self.time_of_day = val return Sim_Set_Ok def", "pre_obj('hfs$', 'hostfs') self.o.phys_mem.map = [ [0x00000000, self.o.ram, 0, 0x0, self.memory_megs", "return Sim_Set_Ok def get_map_offset(self, idx): return self.map_offset def set_map_offset(self, val,", "= val return Sim_Set_Ok def get_time_of_day(self, idx): return self.time_of_day def", "self.o.pic.irq_devs = [cpu] self.o.irq = pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev = self.o.pic", "'i8259x2') self.o.irq.irq_dev = self.o.pic self.o.uart0 = pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev =", "self.o.uart1.irq_level = 3 self.o.uart1.xmit_time = 1000 self.o.of = pre_obj('of', 'ppc-of')", "Copyright 2005-2007 Virtutech AB ## ## The contents herein are", "## This Source Code and any derivatives thereof are provided", "Source ## Code or any derivatives thereof and disclaims all", "val return Sim_Set_Ok def get_map_offset(self, idx): return self.map_offset def set_map_offset(self,", "self.o.phys_mem = pre_obj('phys_mem', 'memory-space') self.o.ram_image = pre_obj('memory_image', 'image') self.o.ram_image.size =", "link: self.o.uart1.link = link else: self.o.uart1.console = console def disconnect_serial(self,", "idx): if self.obj.configured: return Sim_Set_Illegal_Value try: time.strptime(val, \"%Y-%m-%d %H:%M:%S %Z\")", "self.o.hfs, 0, 0, 0x10], [self.map_offset + 0x0fc00000, self.o.pic, 0, 0x0,", "0x3e8 too, so provide # empty mappings there [0x2e8, self.o.empty,", "self.o.ram, 0, 0x0, self.memory_megs * 0x100000], [self.map_offset + 0x08000000, self.o.pci_io,", "= self.map_offset self.o.of.time_of_day = self.time_of_day self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty", "None, 0, 1], [0x3e8, self.o.empty, 0, 0x0, 0x8], [0x3f8, self.o.uart0,", "Code which are a subset of Licensed ## Software pursuant", "this Source Code, you agree to be bound by all", "if connector == 'uart0': if link: self.o.uart0.link = link else:", "of the Agreement prior to using this Licensed Software. ##", "pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev = self.o.irq self.o.uart0.irq_level = 4 self.o.uart0.xmit_time =", "purpose and non-infringement. from sim_core import * from components import", "at 0x2e8 and 0x3e8 too, so provide # empty mappings", "[self.map_offset + 0x08000000, self.o.pci_io, 0, 0x0, 0x100000], [self.map_offset + 0x0f660000,", "0, 1], [0x3e8, self.o.empty, 0, 0x0, 0x8], [0x3f8, self.o.uart0, 0,", "basis. Virtutech makes no warranties with respect to the Source", "## of the Agreement prior to using this Licensed Software.", "## The contents herein are Source Code which are a", "idx): return self.memory_megs def set_memory_megs(self, val, idx): if self.obj.configured: return", "using this Source Code, you agree to be bound by", "pursuant to the terms of the Virtutech Simics Software ##", "sim_core import * from components import * import time #", "there [0x2e8, self.o.empty, 0, 0x0, 0x8], # two NS16550, at", "val return Sim_Set_Ok def get_memory_megs(self, idx): return self.memory_megs def set_memory_megs(self,", "= pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev = self.o.pic self.o.uart0 = pre_obj('uart0', 'NS16550')", "the Virtutech Simics Software ## License Agreement (the \"Agreement\"), and", "= self.time_of_day self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty = pre_obj('empty', 'empty-device')", "get_processors(self): return self.o.cpu ppc64_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor", "connector == 'uart0': if link: self.o.uart0.link = link else: self.o.uart0.console", "self.o.irq self.o.uart0.irq_level = 4 self.o.uart0.xmit_time = 1000 self.o.uart1 = pre_obj('uart1',", "of merchantability and ## fitness for a particular purpose and", "address for device mappings. ' \\ 'Offsets at 4 GB", "device mappings. ' \\ 'Offsets at 4 GB and above", "to the Source ## Code or any derivatives thereof and", "* 0x100000], [self.map_offset + 0x08000000, self.o.pci_io, 0, 0x0, 0x100000], [self.map_offset", "get_memory_megs(self, idx): return self.memory_megs def set_memory_megs(self, val, idx): if self.obj.configured:", "above will not work'], ['time_of_day', Sim_Attr_Optional, 's', 'Date and time", "= 'system' connectors = { 'uart0' : {'type' : 'serial',", "%Z\") except Exception, msg: SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value self.time_of_day = val", "respect to the Source ## Code or any derivatives thereof", "= link else: self.o.uart1.console = console def disconnect_serial(self, connector): if", "and use of this Source Code is subject to the", "pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty = pre_obj('empty', 'empty-device') self.o.pci_io = pre_obj('pci_io', 'memory-space')", "self.obj.configured: return Sim_Set_Illegal_Value try: time.strptime(val, \"%Y-%m-%d %H:%M:%S %Z\") except Exception,", "None, 0, 1], # no UARTs here either [0x890, self.o.empty,", "0x0, 0x8, None, 0, 1], # no UARTs here either", "subset of Licensed ## Software pursuant to the terms of", "import * from components import * import time # Generic", "= pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev = self.o.irq self.o.uart1.irq_level = 3 self.o.uart1.xmit_time", "of Licensed ## Software pursuant to the terms of the", "terms of the Virtutech Simics Software ## License Agreement (the", "if self.obj.configured: return Sim_Set_Illegal_Value try: time.strptime(val, \"%Y-%m-%d %H:%M:%S %Z\") except", "'NS16550') self.o.uart0.irq_dev = self.o.irq self.o.uart0.irq_level = 4 self.o.uart0.xmit_time = 1000", "self.o.uart1.irq_dev = self.o.irq self.o.uart1.irq_level = 3 self.o.uart1.xmit_time = 1000 self.o.of", "* from components import * import time # Generic Simple", "for device mappings. ' \\ 'Offsets at 4 GB and", "= 0xf0000000 self.time_of_day = \"2006-06-06 06:06:06 UTC\" def get_cpu_frequency(self, idx):", "0, 0x20, 0x1], [0x021, self.o.irq, 0, 0x21, 0x1], [0x0a0, self.o.irq,", "[None, self.o.uart0, self.o.uart0.name] self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name] def connect_serial(self,", "idx): if self.obj.configured: return Sim_Set_Illegal_Value self.map_offset = val return Sim_Set_Ok", "and ## fitness for a particular purpose and non-infringement. from", "val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.map_offset = val return", "self.o.ram = pre_obj('memory', 'ram') self.o.ram.image = self.o.ram_image self.o.pic = pre_obj('pic$',", "using this Licensed Software. ## ## By using this Source", "4 self.o.uart0.xmit_time = 1000 self.o.uart1 = pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev =", "1000 self.o.of = pre_obj('of', 'ppc-of') self.o.of.cpu = self.o.cpu[0] self.o.of.memory_megs =", "'empty_ok' : True, 'hotplug' : True, 'multi' : False}} def", "0x0, 0x8], [0x898, self.o.empty, 0, 0x0, 0x8]] def add_connector_info(self): self.connector_info['uart0']", "'NS16550') self.o.uart1.irq_dev = self.o.irq self.o.uart1.irq_level = 3 self.o.uart1.xmit_time = 1000", "== 'uart0': if link: self.o.uart0.link = link else: self.o.uart0.console =", "4 GB and above will not work'], ['time_of_day', Sim_Attr_Optional, 's',", "an \"as ## is\" basis. Virtutech makes no warranties with", "the terms of the Virtutech Simics Software ## License Agreement", "if link: self.o.uart0.link = link else: self.o.uart0.console = console elif", "[0x0a0, self.o.irq, 0, 0xa0, 0x1], [0x0a1, self.o.irq, 0, 0xa1, 0x1],", "fitness for a particular purpose and non-infringement. from sim_core import", "if self.obj.configured: return Sim_Set_Illegal_Value self.memory_megs = val return Sim_Set_Ok def", "'uart1': if link: self.o.uart1.link = link else: self.o.uart1.console = console", "%H:%M:%S %Z\") except Exception, msg: SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value self.time_of_day =", "time.strptime(val, \"%Y-%m-%d %H:%M:%S %Z\") except Exception, msg: SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value", "else: self.o.uart1.console = console def disconnect_serial(self, connector): if connector ==", "self.time_of_day = \"2006-06-06 06:06:06 UTC\" def get_cpu_frequency(self, idx): return self.freq_mhz", "'uart0' : {'type' : 'serial', 'direction' : 'down', 'empty_ok' :", "non-infringement. from sim_core import * from components import * import", "# two NS16550, at the traditional addresses [0x2f8, self.o.uart1, 0,", "received a copy of the Agreement with ## this Licensed", "0, 1], # no UARTs here either [0x890, self.o.empty, 0,", "AB ## ## The contents herein are Source Code which", "including, without limitation, warranties of merchantability and ## fitness for", "'multi' : False}, 'uart1' : {'type' : 'serial', 'direction' :", "= 4 self.o.uart0.xmit_time = 1000 self.o.uart1 = pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev", "of the Virtutech Simics Software ## License Agreement (the \"Agreement\"),", "self.o.uart1 = pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev = self.o.irq self.o.uart1.irq_level = 3", "the Agreement. ## ## This Source Code and any derivatives", "def set_time_of_day(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value try: time.strptime(val,", "## the Agreement. ## ## This Source Code and any", "self.map_offset def set_map_offset(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.map_offset", "= pre_obj('memory', 'ram') self.o.ram.image = self.o.ram_image self.o.pic = pre_obj('pic$', 'open-pic')", "link else: self.o.uart0.console = console elif connector == 'uart1': if", "a copy ## of the Agreement prior to using this", "Linux probes for UARTs at 0x2e8 and 0x3e8 too, so", "class ppc64_simple_base_component(component_object): basename = 'system' connectors = { 'uart0' :", "provided on an \"as ## is\" basis. Virtutech makes no", "Sim_Set_Illegal_Value self.memory_megs = val return Sim_Set_Ok def get_map_offset(self, idx): return", "self.o.pic = pre_obj('pic$', 'open-pic') self.o.pic.irq_devs = [cpu] self.o.irq = pre_obj('irq$',", "no warranties with respect to the Source ## Code or", "if self.obj.configured: return Sim_Set_Illegal_Value self.map_offset = val return Sim_Set_Ok def", "'empty-device') self.o.pci_io = pre_obj('pci_io', 'memory-space') self.o.hfs = pre_obj('hfs$', 'hostfs') self.o.phys_mem.map", "from sim_core import * from components import * import time", "= [cpu] self.o.irq = pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev = self.o.pic self.o.uart0", "+ 0x0fc00000, self.o.pic, 0, 0x0, 0x100000]] self.o.pci_io.map = [ [0x020,", "return Sim_Set_Ok def get_time_of_day(self, idx): return self.time_of_day def set_time_of_day(self, val,", "Sim_Attr_Optional, 's', 'Date and time to initialize the OpenFirmware RTC", "None self.o.uart1.console = None def get_clock(self): return self.o.cpu[0] def get_processors(self):", "0x100000]] self.o.pci_io.map = [ [0x020, self.o.irq, 0, 0x20, 0x1], [0x021,", "'hotplug' : True, 'multi' : False}} def __init__(self, parse_obj): component_object.__init__(self,", "self.o.uart0.irq_level = 4 self.o.uart0.xmit_time = 1000 self.o.uart1 = pre_obj('uart1', 'NS16550')", "self.o.uart1.console = None def get_clock(self): return self.o.cpu[0] def get_processors(self): return", "0x1], [0x0a0, self.o.irq, 0, 0xa0, 0x1], [0x0a1, self.o.irq, 0, 0xa1,", "0x21, 0x1], [0x0a0, self.o.irq, 0, 0xa0, 0x1], [0x0a1, self.o.irq, 0,", "self.o.of.time_of_day = self.time_of_day self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty = pre_obj('empty',", "This Source Code and any derivatives thereof are provided on", "= pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev = self.o.irq self.o.uart0.irq_level = 4 self.o.uart0.xmit_time", "def set_map_offset(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.map_offset =", "idx): if self.obj.configured: return Sim_Set_Illegal_Value self.memory_megs = val return Sim_Set_Ok", "pre_obj('phys_mem', 'memory-space') self.o.ram_image = pre_obj('memory_image', 'image') self.o.ram_image.size = self.memory_megs *", "= self.o.irq self.o.uart1.irq_level = 3 self.o.uart1.xmit_time = 1000 self.o.of =", "under ## the Agreement. You should have received a copy", "self.o.uart0.link = link else: self.o.uart0.console = console elif connector ==", "val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz = val return", "elif connector == 'uart1': self.o.uart1.link = None self.o.uart1.console = None", "the Agreement, and use of this Source Code is subject", "'down', 'empty_ok' : True, 'hotplug' : True, 'multi' : False},", "addresses [0x2f8, self.o.uart1, 0, 0x0, 0x8, None, 0, 1], [0x3e8,", "## is\" basis. Virtutech makes no warranties with respect to", "prior to using this Licensed Software. ## ## By using", "are Source Code which are a subset of Licensed ##", "Code is subject to the terms ## the Agreement. ##", "if self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz = val return Sim_Set_Ok def", "= val return Sim_Set_Ok def get_memory_megs(self, idx): return self.memory_megs def", "'Processor frequency in MHz.'], ['memory_megs', Sim_Attr_Required, 'i', 'The amount of", "self.o.ram.image = self.o.ram_image self.o.pic = pre_obj('pic$', 'open-pic') self.o.pic.irq_devs = [cpu]", "self.memory_megs * 0x100000], [self.map_offset + 0x08000000, self.o.pci_io, 0, 0x0, 0x100000],", "def get_clock(self): return self.o.cpu[0] def get_processors(self): return self.o.cpu ppc64_simple_attributes =", "= console def disconnect_serial(self, connector): if connector == 'uart0': self.o.uart0.link", "distributed under ## the Agreement. You should have received a", "## including, without limitation, warranties of merchantability and ## fitness", "0x2e8 and 0x3e8 too, so provide # empty mappings there", "self.o.empty = pre_obj('empty', 'empty-device') self.o.pci_io = pre_obj('pci_io', 'memory-space') self.o.hfs =", "0x8], [0x898, self.o.empty, 0, 0x0, 0x8]] def add_connector_info(self): self.connector_info['uart0'] =", "Code and any derivatives thereof are provided on an \"as", "Software pursuant to the terms of the Virtutech Simics Software", "use of this Source Code is subject to the terms", "any derivatives thereof and disclaims all implied warranties, ## including,", "by all of the terms ## of the Agreement, and", ": 'down', 'empty_ok' : True, 'hotplug' : True, 'multi' :", "def get_memory_megs(self, idx): return self.memory_megs def set_memory_megs(self, val, idx): if", "Sim_Set_Ok def get_map_offset(self, idx): return self.map_offset def set_map_offset(self, val, idx):", "True, 'multi' : False}} def __init__(self, parse_obj): component_object.__init__(self, parse_obj) self.o.cpu", "self.o.ram_image.size = self.memory_megs * 0x100000 self.o.ram = pre_obj('memory', 'ram') self.o.ram.image", "the Source ## Code or any derivatives thereof and disclaims", "= pre_obj('pci_io', 'memory-space') self.o.hfs = pre_obj('hfs$', 'hostfs') self.o.phys_mem.map = [", "= 3 self.o.uart1.xmit_time = 1000 self.o.of = pre_obj('of', 'ppc-of') self.o.of.cpu", "return Sim_Set_Ok def add_objects(self, cpu): self.o.phys_mem = pre_obj('phys_mem', 'memory-space') self.o.ram_image", "Software. ## ## By using this Source Code, you agree", "self.o.uart1.name] def connect_serial(self, connector, link, console): if connector == 'uart0':", "for UARTs at 0x2e8 and 0x3e8 too, so provide #", "'empty_ok' : True, 'hotplug' : True, 'multi' : False}, 'uart1'", "self.o.of.entry_point = 0x7000000 self.o.of.map_offset = self.map_offset self.o.of.time_of_day = self.time_of_day self.o.broadcast_bus", "= pre_obj('pic$', 'open-pic') self.o.pic.irq_devs = [cpu] self.o.irq = pre_obj('irq$', 'i8259x2')", "Virtutech AB ## ## The contents herein are Source Code", "self.o.hfs = pre_obj('hfs$', 'hostfs') self.o.phys_mem.map = [ [0x00000000, self.o.ram, 0,", "def get_processors(self): return self.o.cpu ppc64_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required, 'f',", "pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev = self.o.pic self.o.uart0 = pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev", "probes for UARTs at 0x2e8 and 0x3e8 too, so provide", "ppc64_simple_base_component(component_object): basename = 'system' connectors = { 'uart0' : {'type'", "warranties of merchantability and ## fitness for a particular purpose", "0x0, 0x100000], [self.map_offset + 0x0f660000, self.o.hfs, 0, 0, 0x10], [self.map_offset", "== 'uart1': self.o.uart1.link = None self.o.uart1.console = None def get_clock(self):", "SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value self.time_of_day = val return Sim_Set_Ok def add_objects(self,", "[0x021, self.o.irq, 0, 0x21, 0x1], [0x0a0, self.o.irq, 0, 0xa0, 0x1],", "the Agreement with ## this Licensed Software; if not, please", "Sim_Set_Illegal_Value self.freq_mhz = val return Sim_Set_Ok def get_memory_megs(self, idx): return", "[self.map_offset + 0x0fc00000, self.o.pic, 0, 0x0, 0x100000]] self.o.pci_io.map = [", "{ 'uart0' : {'type' : 'serial', 'direction' : 'down', 'empty_ok'", ": {'type' : 'serial', 'direction' : 'down', 'empty_ok' : True,", "def add_objects(self, cpu): self.o.phys_mem = pre_obj('phys_mem', 'memory-space') self.o.ram_image = pre_obj('memory_image',", "'f', 'Processor frequency in MHz.'], ['memory_megs', Sim_Attr_Required, 'i', 'The amount", "'Offsets at 4 GB and above will not work'], ['time_of_day',", "contact Virtutech for a copy ## of the Agreement prior", "components import * import time # Generic Simple System for", "all of the terms ## of the Agreement, and use", "'direction' : 'down', 'empty_ok' : True, 'hotplug' : True, 'multi'", "self.o.uart0.name] self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name] def connect_serial(self, connector, link,", "0, 0x21, 0x1], [0x0a0, self.o.irq, 0, 0xa0, 0x1], [0x0a1, self.o.irq,", "UARTs at 0x2e8 and 0x3e8 too, so provide # empty", "= self.o.ram_image self.o.pic = pre_obj('pic$', 'open-pic') self.o.pic.irq_devs = [cpu] self.o.irq", "== 'uart0': self.o.uart0.link = None self.o.uart0.console = None elif connector", "frequency in MHz.'], ['memory_megs', Sim_Attr_Required, 'i', 'The amount of RAM", "return self.o.cpu ppc64_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency", "pre_obj('of', 'ppc-of') self.o.of.cpu = self.o.cpu[0] self.o.of.memory_megs = self.memory_megs self.o.of.entry_point =", "= pre_obj('memory_image', 'image') self.o.ram_image.size = self.memory_megs * 0x100000 self.o.ram =", "with respect to the Source ## Code or any derivatives", "= self.o.pic self.o.uart0 = pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev = self.o.irq self.o.uart0.irq_level", "0, 0, 0x10], [self.map_offset + 0x0fc00000, self.o.pic, 0, 0x0, 0x100000]]", "return self.freq_mhz def set_cpu_frequency(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value", "val, idx): if self.obj.configured: return Sim_Set_Illegal_Value try: time.strptime(val, \"%Y-%m-%d %H:%M:%S", "0x100000], [self.map_offset + 0x08000000, self.o.pci_io, 0, 0x0, 0x100000], [self.map_offset +", "self.o.irq, 0, 0x20, 0x1], [0x021, self.o.irq, 0, 0x21, 0x1], [0x0a0,", "add_connector_info(self): self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name] self.connector_info['uart1'] = [None, self.o.uart1,", "self.time_of_day def set_time_of_day(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value try:", "are being distributed under ## the Agreement. You should have", "0, 0xa1, 0x1], # Linux probes for UARTs at 0x2e8", "self.o.ram_image self.o.pic = pre_obj('pic$', 'open-pic') self.o.pic.irq_devs = [cpu] self.o.irq =", "are a subset of Licensed ## Software pursuant to the", "a particular purpose and non-infringement. from sim_core import * from", "'memory-space') self.o.hfs = pre_obj('hfs$', 'hostfs') self.o.phys_mem.map = [ [0x00000000, self.o.ram,", "0x0f660000, self.o.hfs, 0, 0, 0x10], [self.map_offset + 0x0fc00000, self.o.pic, 0,", "traditional addresses [0x2f8, self.o.uart1, 0, 0x0, 0x8, None, 0, 1],", "[None, self.o.uart1, self.o.uart1.name] def connect_serial(self, connector, link, console): if connector", "['memory_megs', Sim_Attr_Required, 'i', 'The amount of RAM in megabytes.'], ['map_offset',", "self.o.cpu = [] self.map_offset = 0xf0000000 self.time_of_day = \"2006-06-06 06:06:06", "Virtutech makes no warranties with respect to the Source ##", "'i', 'Base address for device mappings. ' \\ 'Offsets at", "[0x898, self.o.empty, 0, 0x0, 0x8]] def add_connector_info(self): self.connector_info['uart0'] = [None,", "def add_connector_info(self): self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name] self.connector_info['uart1'] = [None,", "for PPC64 Processors class ppc64_simple_base_component(component_object): basename = 'system' connectors =", "= link else: self.o.uart0.console = console elif connector == 'uart1':", "self.o.empty, 0, 0x0, 0x8], [0x3f8, self.o.uart0, 0, 0x0, 0x8, None,", "self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz = val return Sim_Set_Ok def get_memory_megs(self,", "self.o.irq, 0, 0x21, 0x1], [0x0a0, self.o.irq, 0, 0xa0, 0x1], [0x0a1,", "## Software pursuant to the terms of the Virtutech Simics", "0x8, None, 0, 1], [0x3e8, self.o.empty, 0, 0x0, 0x8], [0x3f8,", "megabytes.'], ['map_offset', Sim_Attr_Optional, 'i', 'Base address for device mappings. '", "set_map_offset(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.map_offset = val", "mappings. ' \\ 'Offsets at 4 GB and above will", "['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency in MHz.'], ['memory_megs', Sim_Attr_Required, 'i',", "makes no warranties with respect to the Source ## Code", "self.obj.configured: return Sim_Set_Illegal_Value self.memory_megs = val return Sim_Set_Ok def get_map_offset(self,", "0xa1, 0x1], # Linux probes for UARTs at 0x2e8 and", "self.o.uart1.console = console def disconnect_serial(self, connector): if connector == 'uart0':", "elif connector == 'uart1': if link: self.o.uart1.link = link else:", "Source Code, you agree to be bound by all of", "of the Agreement with ## this Licensed Software; if not,", "derivatives thereof are provided on an \"as ## is\" basis.", "to be bound by all of the terms ## of", "[0x00000000, self.o.ram, 0, 0x0, self.memory_megs * 0x100000], [self.map_offset + 0x08000000,", "None self.o.uart0.console = None elif connector == 'uart1': self.o.uart1.link =", ": True, 'hotplug' : True, 'multi' : False}, 'uart1' :", "val return Sim_Set_Ok def get_time_of_day(self, idx): return self.time_of_day def set_time_of_day(self,", "0xf0000000 self.time_of_day = \"2006-06-06 06:06:06 UTC\" def get_cpu_frequency(self, idx): return", "[0x890, self.o.empty, 0, 0x0, 0x8], [0x898, self.o.empty, 0, 0x0, 0x8]]", "self.freq_mhz def set_cpu_frequency(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz", "to using this Licensed Software. ## ## By using this", "self.o.empty, 0, 0x0, 0x8]] def add_connector_info(self): self.connector_info['uart0'] = [None, self.o.uart0,", "if link: self.o.uart1.link = link else: self.o.uart1.console = console def", "['time_of_day', Sim_Attr_Optional, 's', 'Date and time to initialize the OpenFirmware", "thereof are provided on an \"as ## is\" basis. Virtutech", ": True, 'multi' : False}} def __init__(self, parse_obj): component_object.__init__(self, parse_obj)", "return Sim_Set_Ok def get_memory_megs(self, idx): return self.memory_megs def set_memory_megs(self, val,", "return self.memory_megs def set_memory_megs(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value", "self.o.pic, 0, 0x0, 0x100000]] self.o.pci_io.map = [ [0x020, self.o.irq, 0,", "in MHz.'], ['memory_megs', Sim_Attr_Required, 'i', 'The amount of RAM in", "'serial', 'direction' : 'down', 'empty_ok' : True, 'hotplug' : True,", "def set_memory_megs(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.memory_megs =", "'open-pic') self.o.pic.irq_devs = [cpu] self.o.irq = pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev =", "idx): return self.freq_mhz def set_cpu_frequency(self, val, idx): if self.obj.configured: return", "= None self.o.uart0.console = None elif connector == 'uart1': self.o.uart1.link", "## ## The contents herein are Source Code which are", "## fitness for a particular purpose and non-infringement. from sim_core", "Agreement, and use of this Source Code is subject to", "which are a subset of Licensed ## Software pursuant to", "connector == 'uart1': self.o.uart1.link = None self.o.uart1.console = None def", "= val return Sim_Set_Ok def add_objects(self, cpu): self.o.phys_mem = pre_obj('phys_mem',", "'memory-space') self.o.ram_image = pre_obj('memory_image', 'image') self.o.ram_image.size = self.memory_megs * 0x100000", "try: time.strptime(val, \"%Y-%m-%d %H:%M:%S %Z\") except Exception, msg: SIM_attribute_error(str(msg)) return", "self.o.pic self.o.uart0 = pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev = self.o.irq self.o.uart0.irq_level =", "True, 'hotplug' : True, 'multi' : False}} def __init__(self, parse_obj):", "link, console): if connector == 'uart0': if link: self.o.uart0.link =", "0x10], [self.map_offset + 0x0fc00000, self.o.pic, 0, 0x0, 0x100000]] self.o.pci_io.map =", "= console elif connector == 'uart1': if link: self.o.uart1.link =", "self.memory_megs def set_memory_megs(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.memory_megs", "and above will not work'], ['time_of_day', Sim_Attr_Optional, 's', 'Date and", "no UARTs here either [0x890, self.o.empty, 0, 0x0, 0x8], [0x898,", "basename = 'system' connectors = { 'uart0' : {'type' :", "## License Agreement (the \"Agreement\"), and are being distributed under", "[ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency in MHz.'], ['memory_megs', Sim_Attr_Required,", "return Sim_Set_Illegal_Value self.map_offset = val return Sim_Set_Ok def get_time_of_day(self, idx):", "Software; if not, please contact Virtutech for a copy ##", "self.o.empty, 0, 0x0, 0x8], [0x898, self.o.empty, 0, 0x0, 0x8]] def", "0xa0, 0x1], [0x0a1, self.o.irq, 0, 0xa1, 0x1], # Linux probes", "PPC64 Processors class ppc64_simple_base_component(component_object): basename = 'system' connectors = {", "= self.o.cpu[0] self.o.of.memory_megs = self.memory_megs self.o.of.entry_point = 0x7000000 self.o.of.map_offset =", "link else: self.o.uart1.console = console def disconnect_serial(self, connector): if connector", "empty mappings there [0x2e8, self.o.empty, 0, 0x0, 0x8], # two", "Exception, msg: SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value self.time_of_day = val return Sim_Set_Ok", "connector == 'uart0': self.o.uart0.link = None self.o.uart0.console = None elif", "The contents herein are Source Code which are a subset", "0, 0xa0, 0x1], [0x0a1, self.o.irq, 0, 0xa1, 0x1], # Linux", "0, 0x0, 0x8, None, 0, 1], [0x3e8, self.o.empty, 0, 0x0,", "should have received a copy of the Agreement with ##", "self.o.irq = pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev = self.o.pic self.o.uart0 = pre_obj('uart0',", "Agreement. You should have received a copy of the Agreement", "return Sim_Set_Illegal_Value self.memory_megs = val return Sim_Set_Ok def get_map_offset(self, idx):", "0x8], [0x3f8, self.o.uart0, 0, 0x0, 0x8, None, 0, 1], #", "Source Code which are a subset of Licensed ## Software", "## ## This Source Code and any derivatives thereof are", "parse_obj): component_object.__init__(self, parse_obj) self.o.cpu = [] self.map_offset = 0xf0000000 self.time_of_day", "self.o.uart0 = pre_obj('uart0', 'NS16550') self.o.uart0.irq_dev = self.o.irq self.o.uart0.irq_level = 4", "0x08000000, self.o.pci_io, 0, 0x0, 0x100000], [self.map_offset + 0x0f660000, self.o.hfs, 0,", "'i', 'The amount of RAM in megabytes.'], ['map_offset', Sim_Attr_Optional, 'i',", "* 0x100000 self.o.ram = pre_obj('memory', 'ram') self.o.ram.image = self.o.ram_image self.o.pic", "self.memory_megs = val return Sim_Set_Ok def get_map_offset(self, idx): return self.map_offset", "(the \"Agreement\"), and are being distributed under ## the Agreement.", "Source Code and any derivatives thereof are provided on an", "are provided on an \"as ## is\" basis. Virtutech makes", "in megabytes.'], ['map_offset', Sim_Attr_Optional, 'i', 'Base address for device mappings.", "True, 'hotplug' : True, 'multi' : False}, 'uart1' : {'type'", "self.o.uart1.link = link else: self.o.uart1.console = console def disconnect_serial(self, connector):", "terms ## the Agreement. ## ## This Source Code and", "'ram') self.o.ram.image = self.o.ram_image self.o.pic = pre_obj('pic$', 'open-pic') self.o.pic.irq_devs =", "disclaims all implied warranties, ## including, without limitation, warranties of", "Sim_Set_Illegal_Value self.time_of_day = val return Sim_Set_Ok def add_objects(self, cpu): self.o.phys_mem", "self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty = pre_obj('empty', 'empty-device') self.o.pci_io =", "this Licensed Software. ## ## By using this Source Code,", "0, 0x0, 0x100000], [self.map_offset + 0x0f660000, self.o.hfs, 0, 0, 0x10],", "Virtutech for a copy ## of the Agreement prior to", "the terms ## of the Agreement, and use of this", "[0x2e8, self.o.empty, 0, 0x0, 0x8], # two NS16550, at the", "0x0, self.memory_megs * 0x100000], [self.map_offset + 0x08000000, self.o.pci_io, 0, 0x0,", "and 0x3e8 too, so provide # empty mappings there [0x2e8,", "here either [0x890, self.o.empty, 0, 0x0, 0x8], [0x898, self.o.empty, 0,", "console): if connector == 'uart0': if link: self.o.uart0.link = link", "self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name] self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name]", "self.o.cpu[0] def get_processors(self): return self.o.cpu ppc64_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required,", "{'type' : 'serial', 'direction' : 'down', 'empty_ok' : True, 'hotplug'", "self.o.uart0.console = console elif connector == 'uart1': if link: self.o.uart1.link", "too, so provide # empty mappings there [0x2e8, self.o.empty, 0,", "and disclaims all implied warranties, ## including, without limitation, warranties", "# Generic Simple System for PPC64 Processors class ppc64_simple_base_component(component_object): basename", "1], [0x3e8, self.o.empty, 0, 0x0, 0x8], [0x3f8, self.o.uart0, 0, 0x0,", "pre_obj('pic$', 'open-pic') self.o.pic.irq_devs = [cpu] self.o.irq = pre_obj('irq$', 'i8259x2') self.o.irq.irq_dev", "= [ [0x020, self.o.irq, 0, 0x20, 0x1], [0x021, self.o.irq, 0,", "= [] self.map_offset = 0xf0000000 self.time_of_day = \"2006-06-06 06:06:06 UTC\"", "connector, link, console): if connector == 'uart0': if link: self.o.uart0.link", "Agreement. ## ## This Source Code and any derivatives thereof", ": False}} def __init__(self, parse_obj): component_object.__init__(self, parse_obj) self.o.cpu = []", "self.o.of.memory_megs = self.memory_megs self.o.of.entry_point = 0x7000000 self.o.of.map_offset = self.map_offset self.o.of.time_of_day", "## By using this Source Code, you agree to be", ": True, 'multi' : False}, 'uart1' : {'type' : 'serial',", "Sim_Attr_Required, 'i', 'The amount of RAM in megabytes.'], ['map_offset', Sim_Attr_Optional,", "work'], ['time_of_day', Sim_Attr_Optional, 's', 'Date and time to initialize the", "the traditional addresses [0x2f8, self.o.uart1, 0, 0x0, 0x8, None, 0,", "# empty mappings there [0x2e8, self.o.empty, 0, 0x0, 0x8], #", "= pre_obj('broadcast_bus', 'ppc-broadcast-bus') self.o.empty = pre_obj('empty', 'empty-device') self.o.pci_io = pre_obj('pci_io',", "3 self.o.uart1.xmit_time = 1000 self.o.of = pre_obj('of', 'ppc-of') self.o.of.cpu =", "def get_time_of_day(self, idx): return self.time_of_day def set_time_of_day(self, val, idx): if", "the terms ## the Agreement. ## ## This Source Code", "[0x3f8, self.o.uart0, 0, 0x0, 0x8, None, 0, 1], # no", "at the traditional addresses [0x2f8, self.o.uart1, 0, 0x0, 0x8, None,", "val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.memory_megs = val return", "idx): return self.map_offset def set_map_offset(self, val, idx): if self.obj.configured: return", "\"%Y-%m-%d %H:%M:%S %Z\") except Exception, msg: SIM_attribute_error(str(msg)) return Sim_Set_Illegal_Value self.time_of_day", "self.o.phys_mem.map = [ [0x00000000, self.o.ram, 0, 0x0, self.memory_megs * 0x100000],", "[0x3e8, self.o.empty, 0, 0x0, 0x8], [0x3f8, self.o.uart0, 0, 0x0, 0x8,", "set_memory_megs(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.memory_megs = val", "## of the Agreement, and use of this Source Code", "== 'uart1': if link: self.o.uart1.link = link else: self.o.uart1.console =", "merchantability and ## fitness for a particular purpose and non-infringement.", ": True, 'hotplug' : True, 'multi' : False}} def __init__(self,", "0x1], [0x021, self.o.irq, 0, 0x21, 0x1], [0x0a0, self.o.irq, 0, 0xa0,", "either [0x890, self.o.empty, 0, 0x0, 0x8], [0x898, self.o.empty, 0, 0x0,", "is subject to the terms ## the Agreement. ## ##", "def __init__(self, parse_obj): component_object.__init__(self, parse_obj) self.o.cpu = [] self.map_offset =", "self.o.empty, 0, 0x0, 0x8], # two NS16550, at the traditional", ": False}, 'uart1' : {'type' : 'serial', 'direction' : 'down',", "UTC\" def get_cpu_frequency(self, idx): return self.freq_mhz def set_cpu_frequency(self, val, idx):", "Sim_Set_Ok def get_memory_megs(self, idx): return self.memory_megs def set_memory_megs(self, val, idx):", "= 1000 self.o.uart1 = pre_obj('uart1', 'NS16550') self.o.uart1.irq_dev = self.o.irq self.o.uart1.irq_level", "console def disconnect_serial(self, connector): if connector == 'uart0': self.o.uart0.link =", "set_time_of_day(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value try: time.strptime(val, \"%Y-%m-%d", "Sim_Set_Ok def get_time_of_day(self, idx): return self.time_of_day def set_time_of_day(self, val, idx):", "0x0fc00000, self.o.pic, 0, 0x0, 0x100000]] self.o.pci_io.map = [ [0x020, self.o.irq,", "without limitation, warranties of merchantability and ## fitness for a", "get_map_offset(self, idx): return self.map_offset def set_map_offset(self, val, idx): if self.obj.configured:", "def set_cpu_frequency(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz =", "all implied warranties, ## including, without limitation, warranties of merchantability", "self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name] def connect_serial(self, connector, link, console):" ]
[ "2020-02-15 06:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('course',", "'0001_initial'), ] operations = [ migrations.AddField( model_name='subject', name='Number_Of_Questions', field=models.IntegerField(default=0), ),", "by Django 3.0.1 on 2020-02-15 06:02 from django.db import migrations,", "Generated by Django 3.0.1 on 2020-02-15 06:02 from django.db import", "dependencies = [ ('course', '0001_initial'), ] operations = [ migrations.AddField(", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('course', '0001_initial'),", "models class Migration(migrations.Migration): dependencies = [ ('course', '0001_initial'), ] operations", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "('course', '0001_initial'), ] operations = [ migrations.AddField( model_name='subject', name='Number_Of_Questions', field=models.IntegerField(default=0),", "on 2020-02-15 06:02 from django.db import migrations, models class Migration(migrations.Migration):", "= [ ('course', '0001_initial'), ] operations = [ migrations.AddField( model_name='subject',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('course', '0001_initial'), ]", "[ ('course', '0001_initial'), ] operations = [ migrations.AddField( model_name='subject', name='Number_Of_Questions',", "# Generated by Django 3.0.1 on 2020-02-15 06:02 from django.db", "Django 3.0.1 on 2020-02-15 06:02 from django.db import migrations, models", "Migration(migrations.Migration): dependencies = [ ('course', '0001_initial'), ] operations = [", "] operations = [ migrations.AddField( model_name='subject', name='Number_Of_Questions', field=models.IntegerField(default=0), ), ]", "class Migration(migrations.Migration): dependencies = [ ('course', '0001_initial'), ] operations =", "3.0.1 on 2020-02-15 06:02 from django.db import migrations, models class", "06:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies =" ]
[ "import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const import ( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH,", "check_payload=False, ) def test_get_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) token", "data = self.update_row( url=DAG_URL + idx + \"/\", payload_to_check=payload_to_check, change=data,", "{**self.payload, **data} token = self.create_service_user() self.items_to_check = [ \"config\", \"name\",", "check_payload=True, token=token, ) def test_manual_dag_planner_user(self): with open(CASE_PATH) as f: payload", "= {**self.payload, **data} token = self.create_planner() self.items_to_check = [ \"config\",", "= self.create_planner() self.items_to_check = [ \"config\", \"name\", \"description\", \"schema\", \"instance_id\",", "( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, ) from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock", "from internal modules from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const", "= json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send =", "# Import from internal modules from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL", "def test_manual_dag_service_user(self): with open(CASE_PATH) as f: payload = json.load(f) data", "Import from internal modules from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from", "\"config\", \"name\", \"description\", \"schema\", \"instance_id\", \"state\", ] idx = self.create_new_row(", "\"state\", ] idx = self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token,", "self.create_service_user() data = self.get_one_row( url=DAG_URL + idx + \"/\", token=token,", "\"name\", \"description\", \"schema\", \"instance_id\", \"state\", ] idx = self.create_new_row( url=DAG_URL,", "check_payload=True, token=token, ) class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN,", "TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) with open(CASE_PATH)", "test_manual_dag_service_user(self): with open(CASE_PATH) as f: payload = json.load(f) data =", "\"schema\", \"instance_id\", \"state\", ] idx = self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send,", "payload = json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT, ) payload_to_check", "url=DAG_URL + idx + \"/\", payload_to_check=payload_to_check, change=data, token=token, check_payload=False, )", "check_payload=False, payload=self.payload, ) instance_data = self.get_one_row( url=INSTANCE_URL + self.payload[\"instance_id\"] +", "EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, ) from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock):", "**data} token = self.create_service_user() data = self.update_row( url=DAG_URL + idx", "= dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload, **data} token", "idx = self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) def", "= self.create_service_user() data = self.get_one_row( url=DAG_URL + idx + \"/\",", "data=payload[\"data\"], state=EXEC_STATE_CORRECT, ) payload_to_check = {**self.payload, **data} token = self.create_service_user()", "url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self):", "= {**self.payload, **data} token = self.create_service_user() self.items_to_check = [ \"config\",", "state=EXEC_STATE_CORRECT, ) payload_to_check = {**self.payload, **data} token = self.create_service_user() data", "self.get_one_row( url=DAG_URL + idx + \"/\", token=token, check_payload=False, payload=self.payload, )", "payload_to_check = {**self.payload, **data} token = self.create_service_user() data = self.update_row(", "\"\"\" # Import from libraries import json # Import from", "{**self.payload, **data} token = self.create_service_user() data = self.update_row( url=DAG_URL +", "import json # Import from internal modules from cornflow.shared.const import", "json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload,", "self.create_service_user() self.items_to_check = [ \"config\", \"name\", \"description\", \"schema\", \"instance_id\", \"state\",", "change=data, token=token, check_payload=False, ) def test_get_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model,", "data = dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload, **data}", "Unit test for the DAG endpoints \"\"\" # Import from", "INSTANCE_URL, ) from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self):", "test_manual_dag_planner_user(self): with open(CASE_PATH) as f: payload = json.load(f) data =", "cornflow.tests.const import ( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, ) from cornflow.tests.unit.test_executions", "dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT, ) payload_to_check = {**self.payload, **data} token =", "payload_to_check=payload_to_check, change=data, token=token, check_payload=False, ) def test_get_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN,", "model=self.model, payload=payload_to_send, check_payload=True, token=token, ) def test_manual_dag_planner_user(self): with open(CASE_PATH) as", "self.create_service_user() data = self.update_row( url=DAG_URL + idx + \"/\", payload_to_check=payload_to_check,", "token = self.create_service_user() data = self.get_one_row( url=DAG_URL + idx +", ") def test_manual_dag_planner_user(self): with open(CASE_PATH) as f: payload = json.load(f)", "open(CASE_PATH) as f: payload = json.load(f) data = dict( data=payload[\"data\"],", "token = self.create_service_user() self.items_to_check = [ \"config\", \"name\", \"description\", \"schema\",", "TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self): with open(CASE_PATH) as f: payload", "= dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT, ) payload_to_check = {**self.payload, **data} token", "from cornflow.tests.const import ( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, ) from", "token=token, check_payload=False, payload=self.payload, ) instance_data = self.get_one_row( url=INSTANCE_URL + self.payload[\"instance_id\"]", "EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const import ( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL,", "self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def", "payload_to_send = {**self.payload, **data} token = self.create_planner() self.items_to_check = [", "data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload, **data} token = self.create_service_user()", "= self.update_row( url=DAG_URL + idx + \"/\", payload_to_check=payload_to_check, change=data, token=token,", "{**self.payload, **data} token = self.create_planner() self.items_to_check = [ \"config\", \"name\",", "self.payload) with open(CASE_PATH) as f: payload = json.load(f) data =", ") payload_to_send = {**self.payload, **data} token = self.create_planner() self.items_to_check =", "self.create_planner() self.items_to_check = [ \"config\", \"name\", \"description\", \"schema\", \"instance_id\", \"state\",", "token=token, ) def test_manual_dag_planner_user(self): with open(CASE_PATH) as f: payload =", "+ idx + \"/\", token=token, check_payload=False, payload=self.payload, ) instance_data =", "import ( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, ) from cornflow.tests.unit.test_executions import", "state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload, **data} token = self.create_planner() self.items_to_check", "endpoints \"\"\" # Import from libraries import json # Import", "CASE_PATH, INSTANCE_URL, ) from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock): def", ") payload_to_send = {**self.payload, **data} token = self.create_service_user() self.items_to_check =", "= self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) with open(CASE_PATH) as f: payload =", "\"/\", token=token, check_payload=False, payload=self.payload, ) instance_data = self.get_one_row( url=INSTANCE_URL +", "modules from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const import (", "+ \"/\", token=token, check_payload=False, payload=self.payload, ) instance_data = self.get_one_row( url=INSTANCE_URL", "f: payload = json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL, )", "json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT, ) payload_to_check = {**self.payload,", "self.payload) token = self.create_service_user() data = self.get_one_row( url=DAG_URL + idx", "class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) with", "as f: payload = json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL,", "instance_data = self.get_one_row( url=INSTANCE_URL + self.payload[\"instance_id\"] + \"/data/\", payload=dict(), check_payload=False,", "url=DAG_URL + idx + \"/\", token=token, check_payload=False, payload=self.payload, ) instance_data", "token=token, ) class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model,", "self.update_row( url=DAG_URL + idx + \"/\", payload_to_check=payload_to_check, change=data, token=token, check_payload=False,", "] idx = self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, )", "token = self.create_planner() self.items_to_check = [ \"config\", \"name\", \"description\", \"schema\",", "DAG endpoints \"\"\" # Import from libraries import json #", "\"\"\" Unit test for the DAG endpoints \"\"\" # Import", "from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self): with open(CASE_PATH)", "data = dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT, ) payload_to_check = {**self.payload, **data}", "= {**self.payload, **data} token = self.create_service_user() data = self.update_row( url=DAG_URL", "self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) with open(CASE_PATH) as f: payload = json.load(f)", "\"/\", payload_to_check=payload_to_check, change=data, token=token, check_payload=False, ) def test_get_dag(self): idx =", "payload=payload_to_send, check_payload=True, token=token, ) class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self): idx =", "payload_to_send = {**self.payload, **data} token = self.create_service_user() self.items_to_check = [", "# Import from libraries import json # Import from internal", "def test_put_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) with open(CASE_PATH) as", "TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self): with open(CASE_PATH) as f: payload = json.load(f)", "self.model, self.payload) with open(CASE_PATH) as f: payload = json.load(f) data", "+ self.payload[\"instance_id\"] + \"/data/\", payload=dict(), check_payload=False, ) self.assertEqual(data[\"data\"], instance_data[\"data\"]) self.assertEqual(data[\"config\"],", "\"description\", \"schema\", \"instance_id\", \"state\", ] idx = self.create_new_row( url=DAG_URL, model=self.model,", "dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload, **data} token =", ") def test_get_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) token =", "from libraries import json # Import from internal modules from", "= self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) def test_manual_dag_planner_user(self):", "with open(CASE_PATH) as f: payload = json.load(f) data = dict(", "**data} token = self.create_planner() self.items_to_check = [ \"config\", \"name\", \"description\",", "json # Import from internal modules from cornflow.shared.const import EXEC_STATE_CORRECT,", "self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) def test_manual_dag_planner_user(self): with", "idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) with open(CASE_PATH) as f: payload", "the DAG endpoints \"\"\" # Import from libraries import json", "model=self.model, payload=payload_to_send, check_payload=True, token=token, ) class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self): idx", "url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) def test_manual_dag_planner_user(self): with open(CASE_PATH)", "self.payload[\"instance_id\"] + \"/data/\", payload=dict(), check_payload=False, ) self.assertEqual(data[\"data\"], instance_data[\"data\"]) self.assertEqual(data[\"config\"], self.payload[\"config\"])", "DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, ) from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock class", "Import from libraries import json # Import from internal modules", "= self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock):", "libraries import json # Import from internal modules from cornflow.shared.const", "[ \"config\", \"name\", \"description\", \"schema\", \"instance_id\", \"state\", ] idx =", "payload=self.payload, ) instance_data = self.get_one_row( url=INSTANCE_URL + self.payload[\"instance_id\"] + \"/data/\",", "= self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) token = self.create_service_user() data = self.get_one_row(", "cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self): with open(CASE_PATH) as", "import TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self): with open(CASE_PATH) as f:", "= json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT, ) payload_to_check =", "= self.create_service_user() data = self.update_row( url=DAG_URL + idx + \"/\",", "self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) token = self.create_service_user() data = self.get_one_row( url=DAG_URL", "payload = json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send", "internal modules from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const import", "token = self.create_service_user() data = self.update_row( url=DAG_URL + idx +", "data = self.get_one_row( url=DAG_URL + idx + \"/\", token=token, check_payload=False,", "def test_get_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) token = self.create_service_user()", "\"instance_id\", \"state\", ] idx = self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True,", "self.get_one_row( url=INSTANCE_URL + self.payload[\"instance_id\"] + \"/data/\", payload=dict(), check_payload=False, ) self.assertEqual(data[\"data\"],", "idx + \"/\", token=token, check_payload=False, payload=self.payload, ) instance_data = self.get_one_row(", "= self.create_service_user() self.items_to_check = [ \"config\", \"name\", \"description\", \"schema\", \"instance_id\",", ") instance_data = self.get_one_row( url=INSTANCE_URL + self.payload[\"instance_id\"] + \"/data/\", payload=dict(),", "for the DAG endpoints \"\"\" # Import from libraries import", "idx + \"/\", payload_to_check=payload_to_check, change=data, token=token, check_payload=False, ) def test_get_dag(self):", "+ \"/data/\", payload=dict(), check_payload=False, ) self.assertEqual(data[\"data\"], instance_data[\"data\"]) self.assertEqual(data[\"config\"], self.payload[\"config\"]) return", "def test_manual_dag_planner_user(self): with open(CASE_PATH) as f: payload = json.load(f) data", "test for the DAG endpoints \"\"\" # Import from libraries", "class TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self): with open(CASE_PATH) as f: payload =", "self.items_to_check = [ \"config\", \"name\", \"description\", \"schema\", \"instance_id\", \"state\", ]", ") class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock): def test_put_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)", "idx = self.create_new_row( url=DAG_URL, model=self.model, payload=payload_to_send, check_payload=True, token=token, ) class", "from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const import ( DAG_URL,", "test_get_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) token = self.create_service_user() data", "= self.get_one_row( url=DAG_URL + idx + \"/\", token=token, check_payload=False, payload=self.payload,", "cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const import ( DAG_URL, EXECUTION_URL_NORUN,", "+ \"/\", payload_to_check=payload_to_check, change=data, token=token, check_payload=False, ) def test_get_dag(self): idx", ") from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock class TestDagEndpoint(TestExecutionsDetailEndpointMock): def test_manual_dag_service_user(self): with", "data=payload[\"data\"], state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload, **data} token = self.create_planner()", "as f: payload = json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT,", "f: payload = json.load(f) data = dict( data=payload[\"data\"], state=EXEC_STATE_CORRECT, )", "token=token, check_payload=False, ) def test_get_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)", "url=INSTANCE_URL + self.payload[\"instance_id\"] + \"/data/\", payload=dict(), check_payload=False, ) self.assertEqual(data[\"data\"], instance_data[\"data\"])", "= [ \"config\", \"name\", \"description\", \"schema\", \"instance_id\", \"state\", ] idx", "= self.get_one_row( url=INSTANCE_URL + self.payload[\"instance_id\"] + \"/data/\", payload=dict(), check_payload=False, )", "EXEC_STATE_MANUAL from cornflow.tests.const import ( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, )", "payload=payload_to_send, check_payload=True, token=token, ) def test_manual_dag_planner_user(self): with open(CASE_PATH) as f:", "test_put_dag(self): idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) with open(CASE_PATH) as f:", ") payload_to_check = {**self.payload, **data} token = self.create_service_user() data =", "+ idx + \"/\", payload_to_check=payload_to_check, change=data, token=token, check_payload=False, ) def", "idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload) token = self.create_service_user() data =", "self.model, self.payload) token = self.create_service_user() data = self.get_one_row( url=DAG_URL +", "state=EXEC_STATE_MANUAL, ) payload_to_send = {**self.payload, **data} token = self.create_service_user() self.items_to_check", "**data} token = self.create_service_user() self.items_to_check = [ \"config\", \"name\", \"description\"," ]
[ "3, [512, 512, 2048], stage=5, block='b') # num_rois, 7, 7,", "BatchNormalization(name=bn_name_base + '1')(shortcut) x = layers.add([x, shortcut]) x = Activation('relu')(x)", "+ '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1),", "from keras.engine import InputSpec, Layer from keras.initializers import random_normal from", "2048 -> num_rois, 1, 1, 2048 x = TimeDistributed(AveragePooling2D((7, 7)),", "x, mask=None): assert self.built, 'Layer must be built before being", "is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def", "name=bn_name_base + '2c')(x) shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'),", "super(BatchNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def identity_block(input_tensor, kernel_size, filters,", "= identity_block(x, 3, [128, 128, 512], stage=3, block='c') x =", "input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) self.gamma = self.add_weight(shape,", "2048 x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b')", "be built before being called' input_shape = K.int_shape(x) reduction_axes =", "x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') #", "classifier_layers(x): # num_rois, 14, 14, 1024 -> num_rois, 7, 7,", "x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base", "= self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights is not None:", "keras.initializers import random_normal from keras.layers import (Activation, Add, AveragePooling2D, Conv2D,", "Activation('relu')(x) return x def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2,", "block + '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'),", "# 75,75,512 -> 38,38,1024 x = conv_block(x, 3, [256, 256,", "+ '2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) shortcut =", "= regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights super(BatchNormalization, self).__init__(**kwargs)", "def __init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs):", "broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed def get_config(self): config =", "strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)", "x_normed def get_config(self): config = {'epsilon': self.epsilon, 'axis': self.axis, 'gamma_regularizer':", "'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None, 'beta_regularizer': self.beta_regularizer.get_config()", "(1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base", "[128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128,", "= identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b') # num_rois,", "stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4,", "= BatchNormalization(name=bn_name_base + '1')(shortcut) x = layers.add([x, shortcut]) x =", "padding=\"same\")(x) # 150,150,64 -> 150,150,256 x = conv_block(x, 3, [64,", "3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3,", "[64, 64, 256], stage=2, block='c') # 150,150,256 -> 75,75,512 x", "x = Activation('relu')(x) return x def classifier_layers(x): # num_rois, 14,", "TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x) x = TimeDistributed(BatchNormalization(),", "256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256,", "#-------------------------------------------------------------# import keras.backend as K from keras import backend as", "2)): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' +", "self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights super(BatchNormalization, self).__init__(**kwargs) def build(self,", "epsilon self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer)", "= TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x =", "x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) x = Add()([x, input_tensor])", "3))(img_input) x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) x", "x = MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x) # 150,150,64 ->", "+ '2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) x = layers.add([x,", "shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '1')(input_tensor)", "= identity_block(x, 3, [64, 64, 256], stage=2, block='c') # 150,150,256", "return x_normed def get_config(self): config = {'epsilon': self.epsilon, 'axis': self.axis,", "= 'res' + str(stage) + block + '_branch' bn_name_base =", "num_rois, 7, 7, 2048 x = conv_block_td(x, 3, [512, 512,", "len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed =", "= super(BatchNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def identity_block(input_tensor, kernel_size,", "= MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x) # 150,150,64 -> 150,150,256", "= Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x) x =", "name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x) x", "x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x) x = Activation('relu')(x) x", "name=conv_name_base + '2b')(x) x = BatchNormalization(name=bn_name_base + '2b')(x) x =", "x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x", "name=bn_name_base + '1')(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x)", "broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed = K.batch_normalization(", "K from keras import backend as K from keras import", "name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base + '2a')(x) x =", "self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None, 'beta_regularizer': self.beta_regularizer.get_config() if", "inputs # 600,600,3 -> 300,300,64 x = ZeroPadding2D((3, 3))(img_input) x", "call(self, x, mask=None): assert self.built, 'Layer must be built before", "(1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base +", "kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)", "= K.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: broadcast_running_mean", "1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024],", "2048 x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c')", "Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base +", "stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3,", "= conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))", "x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x", "x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x)", "input_shape = K.int_shape(x) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape =", "gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init)", "= (input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta", "#-----------------------------------# img_input = inputs # 600,600,3 -> 300,300,64 x =", "+ '2a')(input_tensor) x = BatchNormalization(name=bn_name_base + '2a')(x) x = Activation('relu')(x)", "3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3,", "(kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base", "**kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init)", "x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x", "600,600,3 -> 300,300,64 x = ZeroPadding2D((3, 3))(img_input) x = Conv2D(64,", "kernel_initializer='normal'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)", "conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): filters1, filters2, filters3", "= True def call(self, x, mask=None): assert self.built, 'Layer must", "TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x =", "weights super(BatchNormalization, self).__init__(**kwargs) def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape", "256, 1024], stage=4, block='f') # 最终获得一个38,38,1024的共享特征层 return x def identity_block_td(input_tensor,", "x = ZeroPadding2D((3, 3))(img_input) x = Conv2D(64, (7, 7), strides=(2,", "identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x,", "K.int_shape(x) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] *", "block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')", "else None} base_config = super(BatchNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items()))", "name='conv1')(x) x = BatchNormalization(name='bn_conv1')(x) x = Activation('relu')(x) # 300,300,64 ->", "self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.axis", "= 'bn' + str(stage) + block + '_branch' x =", "being called' input_shape = K.int_shape(x) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis]", "import keras.backend as K from keras import backend as K", "'1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut) x = Add()([x,", "x = BatchNormalization(name='bn_conv1')(x) x = Activation('relu')(x) # 300,300,64 -> 150,150,64", "= initializers.get(gamma_init) self.epsilon = epsilon self.axis = axis self.gamma_regularizer =", "K.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: broadcast_running_mean =", "Activation('relu')(x) x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x)", "strides=(2, 2)): filters1, filters2, filters3 = filters conv_name_base = 'res'", "'bn' + str(stage) + block + '_branch' x = Conv2D(filters1,", "stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4,", "initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.axis = axis", "None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, x,", "1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base", "'2c')(x) shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base +", "[1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]:", "block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')", "identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x,", "self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer,", "shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor)", "'_branch' x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor)", "TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter3,", "self.epsilon, 'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None, 'beta_regularizer':", "broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed", "x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x) x", "= TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut) x = Add()([x, shortcut]) x", "None} base_config = super(BatchNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def", "= self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta = self.add_weight(shape, initializer=self.beta_init,", "'2a')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'),", "def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): nb_filter1, nb_filter2,", "strides=(2, 2), name='conv1')(x) x = BatchNormalization(name='bn_conv1')(x) x = Activation('relu')(x) #", "name='{}_beta'.format(self.name), trainable=False) self.running_mean = self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std =", "x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor)", "= conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', strides=(2, 2))", "150,150,256 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a',", "kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x) x = BatchNormalization(name=bn_name_base + '2b')(x) x", "256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64,", "1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base + '2a')(x)", "broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed =", "= Activation('relu')(x) return x def ResNet50(inputs): #-----------------------------------# # 假设输入进来的图片是600,600,3 #-----------------------------------#", "self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights", "assert self.built, 'Layer must be built before being called' input_shape", "kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) shortcut", "base_config = super(BatchNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def identity_block(input_tensor,", "block, strides=(2, 2)): filters1, filters2, filters3 = filters conv_name_base =", "{'epsilon': self.epsilon, 'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,", "TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2,", "def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) self.gamma", "axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True", "BatchNormalization(name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1,", "def get_config(self): config = {'epsilon': self.epsilon, 'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config()", "kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base +", "* len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed", "+ '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same',", "self.built = True def call(self, x, mask=None): assert self.built, 'Layer", "x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x) x", "= Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x", "x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x", "-> 150,150,256 x = conv_block(x, 3, [64, 64, 256], stage=2,", "x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x) x = Activation('relu')(x) x", "(input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta =", "x def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): nb_filter1,", "= input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x,", "1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024],", "self.beta_regularizer else None} base_config = super(BatchNormalization, self).get_config() return dict(list(base_config.items()) +", "+ list(config.items())) def identity_block(input_tensor, kernel_size, filters, stage, block): filters1, filters2,", "= TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x) x =", "x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c') #", "3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3,", "if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x, self.running_mean, self.running_std,", "'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None} base_config = super(BatchNormalization, self).get_config()", "1024 -> num_rois, 7, 7, 2048 x = conv_block_td(x, 3,", "150,150,64 x = MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x) # 150,150,64", "[512, 512, 2048], stage=5, block='c') # num_rois, 7, 7, 2048", "stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3,", "self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, x, mask=None):", "x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x) x", "512, 2048], stage=5, block='b') # num_rois, 7, 7, 2048 ->", "x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base", "2)) # num_rois, 7, 7, 2048 -> num_rois, 7, 7,", "stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4,", "x = Add()([x, shortcut]) x = Activation('relu')(x) return x def", "identity_block(x, 3, [64, 64, 256], stage=2, block='c') # 150,150,256 ->", "initializers, layers, regularizers from keras.engine import InputSpec, Layer from keras.initializers", "def call(self, x, mask=None): assert self.built, 'Layer must be built", "3, [64, 64, 256], stage=2, block='c') # 150,150,256 -> 75,75,512", "'1')(shortcut) x = layers.add([x, shortcut]) x = Activation('relu')(x) return x", "block='f') # 最终获得一个38,38,1024的共享特征层 return x def identity_block_td(input_tensor, kernel_size, filters, stage,", "= K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization(", "K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma,", "[256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256,", "= Add()([x, input_tensor]) x = Activation('relu')(x) return x def conv_block_td(input_tensor,", "broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization( x, broadcast_running_mean, broadcast_running_std,", "TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(),", "= Activation('relu')(x) return x def conv_block(input_tensor, kernel_size, filters, stage, block,", "1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base +", "trainable=False) self.running_std = self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights is", "= BatchNormalization(name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3,", "x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1,", "+ '2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) x =", "identity_block(input_tensor, kernel_size, filters, stage, block): filters1, filters2, filters3 = filters", "# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048", "self.running_std = self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights is not", "BatchNormalization(Layer): def __init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None,", "def identity_block_td(input_tensor, kernel_size, filters, stage, block): nb_filter1, nb_filter2, nb_filter3 =", "2048 -> num_rois, 7, 7, 2048 x = identity_block_td(x, 3,", "+ block + '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides,", "[256, 256, 1024], stage=4, block='f') # 最终获得一个38,38,1024的共享特征层 return x def", "def classifier_layers(x): # num_rois, 14, 14, 1024 -> num_rois, 7,", "1), kernel_initializer='normal'), name=conv_name_base + '2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base +", "filters, stage, block): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base =", "= Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base", "shortcut]) x = Activation('relu')(x) return x def ResNet50(inputs): #-----------------------------------# #", "'2c')(x) shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base +", "regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False)", "num_rois, 1, 1, 2048 x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x) return", "self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.axis = axis self.gamma_regularizer", "regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean = self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std", "block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')", "7, 7, 2048 x = conv_block_td(x, 3, [512, 512, 2048],", "= K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta,", "super(BatchNormalization, self).__init__(**kwargs) def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape =", "x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) x =", "self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std = self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False)", "block='c') # num_rois, 7, 7, 2048 -> num_rois, 1, 1,", "+ '_branch' bn_name_base = 'bn' + str(stage) + block +", "name=conv_name_base + '2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) shortcut", "nb_filter3 = filters conv_name_base = 'res' + str(stage) + block", "block): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' +", "2), padding=\"same\")(x) # 150,150,64 -> 150,150,256 x = conv_block(x, 3,", "kernel_size, filters, stage, block, strides=(2, 2)): nb_filter1, nb_filter2, nb_filter3 =", "block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')", "K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape)", "kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x) x = BatchNormalization(name=bn_name_base +", "[256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256,", "Add()([x, shortcut]) x = Activation('relu')(x) return x def classifier_layers(x): #", "layers, regularizers from keras.engine import InputSpec, Layer from keras.initializers import", "padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x) x = BatchNormalization(name=bn_name_base + '2b')(x)", "3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3,", "beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init =", "3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3,", "padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base +", "ZeroPadding2D) class BatchNormalization(Layer): def __init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one',", "x = Activation('relu')(x) return x def conv_block_td(input_tensor, kernel_size, filters, stage,", "+ '_branch' x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base", "+ '_branch' x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base +", "strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base +", "= TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1),", "+ '2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) shortcut = Conv2D(filters3,", "512, 2048], stage=5, block='c') # num_rois, 7, 7, 2048 ->", "+ '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x) x =", "keras.backend as K from keras import backend as K from", "stage, block, strides=(2, 2)): filters1, filters2, filters3 = filters conv_name_base", "block): filters1, filters2, filters3 = filters conv_name_base = 'res' +", "= Activation('relu')(x) x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base +", "keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D) class", "x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x", "del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis]", "block + '_branch' bn_name_base = 'bn' + str(stage) + block", "128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128,", "[128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128,", "2)): filters1, filters2, filters3 = filters conv_name_base = 'res' +", "= Add()([x, shortcut]) x = Activation('relu')(x) return x def classifier_layers(x):", "x def ResNet50(inputs): #-----------------------------------# # 假设输入进来的图片是600,600,3 #-----------------------------------# img_input = inputs", "list(config.items())) def identity_block(input_tensor, kernel_size, filters, stage, block): filters1, filters2, filters3", "self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True", "= layers.add([x, input_tensor]) x = Activation('relu')(x) return x def conv_block(input_tensor,", "= layers.add([x, shortcut]) x = Activation('relu')(x) return x def ResNet50(inputs):", "1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base +", "3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3,", "75,75,512 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')", "BatchNormalization(name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size,", "BatchNormalization(name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x)", "MaxPooling2D, TimeDistributed, ZeroPadding2D) class BatchNormalization(Layer): def __init__(self, epsilon=1e-3, axis=-1, weights=None,", "stage=2, block='c') # 150,150,256 -> 75,75,512 x = conv_block(x, 3,", "return dict(list(base_config.items()) + list(config.items())) def identity_block(input_tensor, kernel_size, filters, stage, block):", "x = Activation('relu')(x) return x def conv_block(input_tensor, kernel_size, filters, stage,", "if self.gamma_regularizer else None, 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None}", "else: broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta", "mask=None): assert self.built, 'Layer must be built before being called'", "self.beta, self.gamma, epsilon=self.epsilon) else: broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std =", "name=bn_name_base + '2c')(x) x = Add()([x, input_tensor]) x = Activation('relu')(x)", "+ '1')(shortcut) x = layers.add([x, shortcut]) x = Activation('relu')(x) return", "x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x", "'2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x) x = Activation('relu')(x)", "stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2,", "self.initial_weights = weights super(BatchNormalization, self).__init__(**kwargs) def build(self, input_shape): self.input_spec =", "'2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x", "get_config(self): config = {'epsilon': self.epsilon, 'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if", "# 600,600,3 -> 300,300,64 x = ZeroPadding2D((3, 3))(img_input) x =", "256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256],", "self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean = self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name),", "512, 2048], stage=5, block='a', strides=(2, 2)) # num_rois, 7, 7,", "Activation('relu')(x) # 300,300,64 -> 150,150,64 x = MaxPooling2D((3, 3), strides=(2,", "strides=(2, 2)): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res'", "regularizers.get(beta_regularizer) self.initial_weights = weights super(BatchNormalization, self).__init__(**kwargs) def build(self, input_shape): self.input_spec", "kernel_size, filters, stage, block): filters1, filters2, filters3 = filters conv_name_base", "name=conv_name_base + '2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) shortcut =", "= TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x) x =", "img_input = inputs # 600,600,3 -> 300,300,64 x = ZeroPadding2D((3,", "7, 7, 2048 -> num_rois, 1, 1, 2048 x =", "K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed def", "stage=5, block='c') # num_rois, 7, 7, 2048 -> num_rois, 1,", "stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64,", "return x def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):", "initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name),", "initializer='one', name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights is not None: self.set_weights(self.initial_weights) del", "# 150,150,64 -> 150,150,256 x = conv_block(x, 3, [64, 64,", "range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon)", "256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256,", "kernel_size, filters, stage, block): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base", "if self.beta_regularizer else None} base_config = super(BatchNormalization, self).get_config() return dict(list(base_config.items())", "stage=3, block='d') # 75,75,512 -> 38,38,1024 x = conv_block(x, 3,", "2048], stage=5, block='a', strides=(2, 2)) # num_rois, 7, 7, 2048", "block + '_branch' x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),", "3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3,", "x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x", "Add()([x, input_tensor]) x = Activation('relu')(x) return x def conv_block_td(input_tensor, kernel_size,", "7, 2048 x = conv_block_td(x, 3, [512, 512, 2048], stage=5,", "self.initial_weights self.built = True def call(self, x, mask=None): assert self.built,", "self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean = self.add_weight(shape,", "'2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) x = Add()([x,", "x def identity_block_td(input_tensor, kernel_size, filters, stage, block): nb_filter1, nb_filter2, nb_filter3", "(kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(),", "block + '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base", "= Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base +", "identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x,", "block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')", "broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization( x, broadcast_running_mean,", "= identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x =", "'_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base +", "identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b') # num_rois, 7,", "-> 38,38,1024 x = conv_block(x, 3, [256, 256, 1024], stage=4,", "= identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x =", "3, [128, 128, 512], stage=3, block='d') # 75,75,512 -> 38,38,1024", "300,300,64 x = ZeroPadding2D((3, 3))(img_input) x = Conv2D(64, (7, 7),", "stage=4, block='f') # 最终获得一个38,38,1024的共享特征层 return x def identity_block_td(input_tensor, kernel_size, filters,", "+ '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return", "= Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base +", "= ZeroPadding2D((3, 3))(img_input) x = Conv2D(64, (7, 7), strides=(2, 2),", "= identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x =", "= epsilon self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer =", "str(stage) + block + '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1),", "# 最终获得一个38,38,1024的共享特征层 return x def identity_block_td(input_tensor, kernel_size, filters, stage, block):", "x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x", "# num_rois, 7, 7, 2048 -> num_rois, 1, 1, 2048", "= K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma,", "-> 300,300,64 x = ZeroPadding2D((3, 3))(img_input) x = Conv2D(64, (7,", "+ '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base +", "x = BatchNormalization(name=bn_name_base + '2a')(x) x = Activation('relu')(x) x =", "identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x,", "(1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(name=bn_name_base", "+ str(stage) + block + '_branch' bn_name_base = 'bn' +", "keras import initializers, layers, regularizers from keras.engine import InputSpec, Layer", "name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size,", "num_rois, 7, 7, 2048 x = identity_block_td(x, 3, [512, 512,", "# 300,300,64 -> 150,150,64 x = MaxPooling2D((3, 3), strides=(2, 2),", "__init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking", "2048], stage=5, block='b') # num_rois, 7, 7, 2048 -> num_rois,", "7), strides=(2, 2), name='conv1')(x) x = BatchNormalization(name='bn_conv1')(x) x = Activation('relu')(x)", "= TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x) x = Activation('relu')(x) x =", "150,150,64 -> 150,150,256 x = conv_block(x, 3, [64, 64, 256],", "filters, stage, block): filters1, filters2, filters3 = filters conv_name_base =", "name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter3, (1,", "(1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x) x = BatchNormalization(name=bn_name_base +", "reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape)", "shape = (input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False)", "+ '2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x) x =", "+ '2c')(x) shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base", "(1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(),", "= filters conv_name_base = 'res' + str(stage) + block +", "128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128,", "layers.add([x, shortcut]) x = Activation('relu')(x) return x def ResNet50(inputs): #-----------------------------------#", "Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x) x = BatchNormalization(name=bn_name_base", "Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x)", "x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) shortcut = TimeDistributed(Conv2D(nb_filter3, (1,", "axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights", "x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', strides=(2,", "strides=(2, 2)) # num_rois, 7, 7, 2048 -> num_rois, 7,", "'1')(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) return x", "Layer from keras.initializers import random_normal from keras.layers import (Activation, Add,", "trainable=False) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built", "64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3,", "x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x", "dict(list(base_config.items()) + list(config.items())) def identity_block(input_tensor, kernel_size, filters, stage, block): filters1,", "= Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x =", "keras import backend as K from keras import initializers, layers,", "Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '1')(input_tensor) shortcut =", "= TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x", "x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base", "config = {'epsilon': self.epsilon, 'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer", "= Activation('relu')(x) return x def classifier_layers(x): # num_rois, 14, 14,", "regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights super(BatchNormalization, self).__init__(**kwargs) def", "num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048 x", "trainable=False) self.running_mean = self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std = self.add_weight(shape,", "self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init,", "+ '1')(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) return", "block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256],", "= identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c') # num_rois,", "= TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor) shortcut", "list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] =", "True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon", "'_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)", "input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x, self.running_mean,", "x def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): filters1,", "def identity_block(input_tensor, kernel_size, filters, stage, block): filters1, filters2, filters3 =", "x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') #", "filters1, filters2, filters3 = filters conv_name_base = 'res' + str(stage)", "weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init", "BatchNormalization(name=bn_name_base + '2c')(x) shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),", "stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3,", "Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)", "must be built before being called' input_shape = K.int_shape(x) reduction_axes", "stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4,", "return x def identity_block_td(input_tensor, kernel_size, filters, stage, block): nb_filter1, nb_filter2,", "MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x) # 150,150,64 -> 150,150,256 x", "+ block + '_branch' x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02),", "= weights super(BatchNormalization, self).__init__(**kwargs) def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)]", "= BatchNormalization(name=bn_name_base + '2c')(x) shortcut = Conv2D(filters3, (1, 1), strides=strides,", "kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut) x", "+ block + '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'),", "from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D)", "1, 1, 2048 x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x) return x", "None, 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None} base_config = super(BatchNormalization,", "name=conv_name_base + '2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) x =", "bn_name_base = 'bn' + str(stage) + block + '_branch' x", "block='c') # 150,150,256 -> 75,75,512 x = conv_block(x, 3, [128,", "self.epsilon = epsilon self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer", "TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x) x =", "+ block + '_branch' bn_name_base = 'bn' + str(stage) +", "'_branch' x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base +", "self).__init__(**kwargs) def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],)", "Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D) class BatchNormalization(Layer): def __init__(self,", "= K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed", "1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024],", "built before being called' input_shape = K.int_shape(x) reduction_axes = list(range(len(input_shape)))", "as K from keras import initializers, layers, regularizers from keras.engine", "self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon", "== range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma,", "x = layers.add([x, shortcut]) x = Activation('relu')(x) return x def", "InputSpec, Layer from keras.initializers import random_normal from keras.layers import (Activation,", "2048], stage=5, block='c') # num_rois, 7, 7, 2048 -> num_rois,", "256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256,", "'Layer must be built before being called' input_shape = K.int_shape(x)", "= BatchNormalization(name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x =", "x = BatchNormalization(name=bn_name_base + '2c')(x) shortcut = Conv2D(filters3, (1, 1),", "1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024],", "conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): nb_filter1, nb_filter2, nb_filter3", "x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: broadcast_running_mean = K.reshape(self.running_mean,", "'2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x) x = Activation('relu')(x)", "block + '_branch' x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base", "conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', strides=(2, 2)) #", "shortcut]) x = Activation('relu')(x) return x def classifier_layers(x): # num_rois,", "'2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02),", "150,150,256 -> 75,75,512 x = conv_block(x, 3, [128, 128, 512],", "if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built =", "ResNet50(inputs): #-----------------------------------# # 假设输入进来的图片是600,600,3 #-----------------------------------# img_input = inputs # 600,600,3", "epsilon=self.epsilon) else: broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape)", "-> 150,150,64 x = MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x) #", "shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut) x = Add()([x, shortcut])", "1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')", "epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking =", "Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x =", "return x def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):", "name='{}_gamma'.format(self.name), trainable=False) self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean", "'2a')(input_tensor) x = BatchNormalization(name=bn_name_base + '2a')(x) x = Activation('relu')(x) x", "512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512],", "= [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes) ==", "+ '2a')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size),", "backend as K from keras import initializers, layers, regularizers from", "300,300,64 -> 150,150,64 x = MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)", "= BatchNormalization(name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2,", "= conv_block(x, 3, [128, 128, 512], stage=3, block='a') x =", "(1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base", "kernel_size, filters, stage, block, strides=(2, 2)): filters1, filters2, filters3 =", "name='{}_running_mean'.format(self.name), trainable=False) self.running_std = self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights", "'2b')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'),", "TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(),", "+ '2b')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter3, (1, 1),", "identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x,", "-> num_rois, 7, 7, 2048 x = identity_block_td(x, 3, [512,", "x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)", "# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048", "= Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) x = BatchNormalization(name='bn_conv1')(x)", "Activation('relu')(x) return x def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2,", "broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma =", "= BatchNormalization(name='bn_conv1')(x) x = Activation('relu')(x) # 300,300,64 -> 150,150,64 x", "broadcast_shape) x_normed = K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon)", "1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base", "'res' + str(stage) + block + '_branch' bn_name_base = 'bn'", "'2a')(x) x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',", "x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'),", "# 假设输入进来的图片是600,600,3 #-----------------------------------# img_input = inputs # 600,600,3 -> 300,300,64", "[InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name),", "block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')", "x = BatchNormalization(name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x", "return x def ResNet50(inputs): #-----------------------------------# # 假设输入进来的图片是600,600,3 #-----------------------------------# img_input =", "#-----------------------------------# # 假设输入进来的图片是600,600,3 #-----------------------------------# img_input = inputs # 600,600,3 ->", "#-------------------------------------------------------------# # ResNet50的网络部分 #-------------------------------------------------------------# import keras.backend as K from keras", "'2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) shortcut = Conv2D(filters3, (1,", "def ResNet50(inputs): #-----------------------------------# # 假设输入进来的图片是600,600,3 #-----------------------------------# img_input = inputs #", "kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)", "def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): filters1, filters2,", "64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64,", "nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage)", "identity_block(x, 3, [128, 128, 512], stage=3, block='d') # 75,75,512 ->", "return x def classifier_layers(x): # num_rois, 14, 14, 1024 ->", "+ block + '_branch' x = Conv2D(filters1, (1, 1), strides=strides,", "= initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.axis =", "1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(name=bn_name_base +", "import initializers, layers, regularizers from keras.engine import InputSpec, Layer from", "epsilon=self.epsilon) return x_normed def get_config(self): config = {'epsilon': self.epsilon, 'axis':", "2), name='conv1')(x) x = BatchNormalization(name='bn_conv1')(x) x = Activation('relu')(x) # 300,300,64", "最终获得一个38,38,1024的共享特征层 return x def identity_block_td(input_tensor, kernel_size, filters, stage, block): nb_filter1,", "= Activation('relu')(x) return x def conv_block_td(input_tensor, kernel_size, filters, stage, block,", "self.beta_regularizer.get_config() if self.beta_regularizer else None} base_config = super(BatchNormalization, self).get_config() return", "(1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base", "'2c')(x) x = Add()([x, input_tensor]) x = Activation('relu')(x) return x", "K from keras import initializers, layers, regularizers from keras.engine import", "x_normed = K.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else:", "Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x) x = BatchNormalization(name=bn_name_base", "= conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x =", "del self.initial_weights self.built = True def call(self, x, mask=None): assert", "[64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x,", "75,75,512 -> 38,38,1024 x = conv_block(x, 3, [256, 256, 1024],", "conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x,", "num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048 x", "14, 1024 -> num_rois, 7, 7, 2048 x = conv_block_td(x,", "stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4,", "before being called' input_shape = K.int_shape(x) reduction_axes = list(range(len(input_shape))) del", "= self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std = self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name),", "= TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x) x", "identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x,", "7, 2048 -> num_rois, 7, 7, 2048 x = identity_block_td(x,", "strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base +", "broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta =", "kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base + '2a')(x) x", "block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')", "ZeroPadding2D((3, 3))(img_input) x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)", "(1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(),", "3, [512, 512, 2048], stage=5, block='a', strides=(2, 2)) # num_rois,", "x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b') #", "1024], stage=4, block='f') # 最终获得一个38,38,1024的共享特征层 return x def identity_block_td(input_tensor, kernel_size,", "[512, 512, 2048], stage=5, block='b') # num_rois, 7, 7, 2048", "x_normed = K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return", "block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')", "+ '2b')(x) x = BatchNormalization(name=bn_name_base + '2b')(x) x = Activation('relu')(x)", "[256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256,", "3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3,", "'_branch' bn_name_base = 'bn' + str(stage) + block + '_branch'", "+ str(stage) + block + '_branch' x = Conv2D(filters1, (1,", "512], stage=3, block='d') # 75,75,512 -> 38,38,1024 x = conv_block(x,", "+ '2c')(x) x = Add()([x, input_tensor]) x = Activation('relu')(x) return", "self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights super(BatchNormalization,", "假设输入进来的图片是600,600,3 #-----------------------------------# img_input = inputs # 600,600,3 -> 300,300,64 x", "= Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base +", "name=conv_name_base + '2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) x", "import InputSpec, Layer from keras.initializers import random_normal from keras.layers import", "= K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta,", "nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) +", "num_rois, 7, 7, 2048 -> num_rois, 1, 1, 2048 x", "from keras.initializers import random_normal from keras.layers import (Activation, Add, AveragePooling2D,", "= Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '1')(input_tensor) shortcut", "128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128,", "x def classifier_layers(x): # num_rois, 14, 14, 1024 -> num_rois,", "name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights", "= self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean = self.add_weight(shape, initializer='zero',", "= K.int_shape(x) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1]", "= list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis]", "+ '1')(input_tensor) shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut) x = layers.add([x,", "= TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) x = Add()([x, input_tensor]) x", "38,38,1024 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')", "block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')", "identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c') # num_rois, 7,", "layers.add([x, input_tensor]) x = Activation('relu')(x) return x def conv_block(input_tensor, kernel_size,", "keras.engine import InputSpec, Layer from keras.initializers import random_normal from keras.layers", "shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut) x = layers.add([x, shortcut]) x", "'2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) shortcut = TimeDistributed(Conv2D(nb_filter3,", "import random_normal from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D,", "'1')(input_tensor) shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut) x = layers.add([x, shortcut])", "self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights is not None: self.set_weights(self.initial_weights)", "K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization( x,", "TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) x = Add()([x, input_tensor]) x =", "1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024],", "trainable=False) self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean =", "initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean = self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False)", "= {'epsilon': self.epsilon, 'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else", "3, [512, 512, 2048], stage=5, block='c') # num_rois, 7, 7,", "x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x", "input_tensor]) x = Activation('relu')(x) return x def conv_block(input_tensor, kernel_size, filters,", "7, 7, 2048 x = identity_block_td(x, 3, [512, 512, 2048],", "import backend as K from keras import initializers, layers, regularizers", "= inputs # 600,600,3 -> 300,300,64 x = ZeroPadding2D((3, 3))(img_input)", "'2b')(x) x = BatchNormalization(name=bn_name_base + '2b')(x) x = Activation('relu')(x) x", "7, 2048 x = identity_block_td(x, 3, [512, 512, 2048], stage=5,", "x = BatchNormalization(name=bn_name_base + '2b')(x) x = Activation('relu')(x) x =", "(Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D) class BatchNormalization(Layer): def", "strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2,", "Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x)", "[128, 128, 512], stage=3, block='d') # 75,75,512 -> 38,38,1024 x", "x = Activation('relu')(x) # 300,300,64 -> 150,150,64 x = MaxPooling2D((3,", "self.gamma_regularizer else None, 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None} base_config", "BatchNormalization(name='bn_conv1')(x) x = Activation('relu')(x) # 300,300,64 -> 150,150,64 x =", "kernel_initializer='normal'), name=conv_name_base + '2c')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)", "+ str(stage) + block + '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1,", "gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init", "self).get_config() return dict(list(base_config.items()) + list(config.items())) def identity_block(input_tensor, kernel_size, filters, stage,", "identity_block_td(input_tensor, kernel_size, filters, stage, block): nb_filter1, nb_filter2, nb_filter3 = filters", "64, 256], stage=2, block='c') # 150,150,256 -> 75,75,512 x =", "[128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128,", "conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x,", "[64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64,", "256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256,", "self.gamma_regularizer.get_config() if self.gamma_regularizer else None, 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else", "3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3,", "regularizers from keras.engine import InputSpec, Layer from keras.initializers import random_normal", "256], stage=2, block='c') # 150,150,256 -> 75,75,512 x = conv_block(x,", "kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) x", "as K from keras import backend as K from keras", "self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std", "broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed def get_config(self): config", "ResNet50的网络部分 #-------------------------------------------------------------# import keras.backend as K from keras import backend", "Activation('relu')(x) return x def classifier_layers(x): # num_rois, 14, 14, 1024", "TimeDistributed, ZeroPadding2D) class BatchNormalization(Layer): def __init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero',", "256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256,", "x = Activation('relu')(x) return x def ResNet50(inputs): #-----------------------------------# # 假设输入进来的图片是600,600,3", "not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self,", "2048 x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a',", "= identity_block(x, 3, [128, 128, 512], stage=3, block='b') x =", "x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x def", "14, 14, 1024 -> num_rois, 7, 7, 2048 x =", "block='a', strides=(2, 2)) # num_rois, 7, 7, 2048 -> num_rois,", "broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma", "input_tensor]) x = Activation('relu')(x) return x def conv_block_td(input_tensor, kernel_size, filters,", "identity_block(x, 3, [256, 256, 1024], stage=4, block='f') # 最终获得一个38,38,1024的共享特征层 return", "block, strides=(2, 2)): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base =", "K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape)", "3, [256, 256, 1024], stage=4, block='f') # 最终获得一个38,38,1024的共享特征层 return x", "str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage)", "x = Activation('relu')(x) x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base", "name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x) x", "TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut) x = Add()([x, shortcut]) x =", "7, 2048 -> num_rois, 1, 1, 2048 x = TimeDistributed(AveragePooling2D((7,", "x = Add()([x, input_tensor]) x = Activation('relu')(x) return x def", "True def call(self, x, mask=None): assert self.built, 'Layer must be", "# 150,150,256 -> 75,75,512 x = conv_block(x, 3, [128, 128,", "512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512],", "kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(), name=bn_name_base", "name=conv_name_base + '1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut) x", "strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base + '2a')(x)", "[512, 512, 2048], stage=5, block='a', strides=(2, 2)) # num_rois, 7,", "= Activation('relu')(x) # 300,300,64 -> 150,150,64 x = MaxPooling2D((3, 3),", "3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x =", "x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') #", "-> num_rois, 7, 7, 2048 x = conv_block_td(x, 3, [512,", "= [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer,", "512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512],", "# ResNet50的网络部分 #-------------------------------------------------------------# import keras.backend as K from keras import", "(7, 7), strides=(2, 2), name='conv1')(x) x = BatchNormalization(name='bn_conv1')(x) x =", "broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed def get_config(self): config = {'epsilon':", "'2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02),", "'2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor])", "conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x", "TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x) shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides,", "strides=(2, 2), padding=\"same\")(x) # 150,150,64 -> 150,150,256 x = conv_block(x,", "x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x) x", "128, 512], stage=3, block='d') # 75,75,512 -> 38,38,1024 x =", "beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init =", "Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) x = BatchNormalization(name='bn_conv1')(x) x", "stage, block): filters1, filters2, filters3 = filters conv_name_base = 'res'", "filters conv_name_base = 'res' + str(stage) + block + '_branch'", "reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if", "x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed def get_config(self):", "self.gamma, epsilon=self.epsilon) else: broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std,", "Activation('relu')(x) return x def ResNet50(inputs): #-----------------------------------# # 假设输入进来的图片是600,600,3 #-----------------------------------# img_input", "initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std = self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False) if", "1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x) x = BatchNormalization(name=bn_name_base + '2c')(x)", "initializers.get(gamma_init) self.epsilon = epsilon self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer)", "stage, block, strides=(2, 2)): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base", "'bn' + str(stage) + block + '_branch' x = TimeDistributed(Conv2D(nb_filter1,", "stage=5, block='b') # num_rois, 7, 7, 2048 -> num_rois, 7,", "filters2, filters3 = filters conv_name_base = 'res' + str(stage) +", "7, 7, 2048 -> num_rois, 7, 7, 2048 x =", "Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D) class BatchNormalization(Layer): def __init__(self, epsilon=1e-3, axis=-1,", "= identity_block(x, 3, [64, 64, 256], stage=2, block='b') x =", "identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x,", "Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(name=bn_name_base", "block='d') # 75,75,512 -> 38,38,1024 x = conv_block(x, 3, [256,", "random_normal from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed,", "filters, stage, block, strides=(2, 2)): nb_filter1, nb_filter2, nb_filter3 = filters", "[256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256,", "+ '_branch' x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base", "+ '1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut) x =", "= identity_block(x, 3, [128, 128, 512], stage=3, block='d') # 75,75,512", "sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x, self.running_mean, self.running_std, self.beta,", "'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None, 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer", "stage, block): nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res'", "conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base", "import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D) class BatchNormalization(Layer):", "TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor) shortcut =", "build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) self.gamma =", "= regularizers.get(beta_regularizer) self.initial_weights = weights super(BatchNormalization, self).__init__(**kwargs) def build(self, input_shape):", "self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta = self.add_weight(shape,", "-> num_rois, 1, 1, 2048 x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x)", "from keras import backend as K from keras import initializers,", "-> 75,75,512 x = conv_block(x, 3, [128, 128, 512], stage=3,", "AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D) class BatchNormalization(Layer): def __init__(self, epsilon=1e-3,", "= identity_block(x, 3, [256, 256, 1024], stage=4, block='f') # 最终获得一个38,38,1024的共享特征层", "stage=5, block='a', strides=(2, 2)) # num_rois, 7, 7, 2048 ->", "= identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x =", "name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut) x =", "else None, 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None} base_config =", "= True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon =", "+ '2c')(x) shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02), name=conv_name_base", "str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1),", "filters3 = filters conv_name_base = 'res' + str(stage) + block", "= TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x) x = Activation('relu')(x) x =", "= Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x) x =", "self.built, 'Layer must be built before being called' input_shape =", "from keras import initializers, layers, regularizers from keras.engine import InputSpec,", "class BatchNormalization(Layer): def __init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None,", "[256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256,", "3), strides=(2, 2), padding=\"same\")(x) # 150,150,64 -> 150,150,256 x =", "kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut)", "called' input_shape = K.int_shape(x) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape", "broadcast_gamma, epsilon=self.epsilon) return x_normed def get_config(self): config = {'epsilon': self.epsilon,", "block='b') # num_rois, 7, 7, 2048 -> num_rois, 7, 7,", "self.running_mean = self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std = self.add_weight(shape, initializer='one',", "filters, stage, block, strides=(2, 2)): filters1, filters2, filters3 = filters", "= axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights =", "broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes)", "self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)", "x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x" ]
[ "def authrization_heder_token( api_key: str = Depends(APIKeyHeader(name=\"Authorization\")), ) -> str: try:", "fastapi import Depends, HTTPException from fastapi.security import APIKeyHeader from sqlalchemy.orm", "raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) return token async", "import APIKeyHeader from sqlalchemy.orm import Session from starlette import status", "ValueError: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) if token_prefix", "security from app.db.session import SessionLocal JWT_TOKEN_PREFIX = \"Token\" # noqa:", "raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) if token_prefix !=", "except ValueError: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) if", "import security from app.db.session import SessionLocal JWT_TOKEN_PREFIX = \"Token\" #", "user = crud.user.get_user_by_id(db, int(user_id)) if not user: raise HTTPException(status_code=404, detail=\"User", "api_key.split(\" \") except ValueError: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\",", "from typing import Generator from fastapi import Depends, HTTPException from", "token: str = Depends(authrization_heder_token), db: Session = Depends(get_db) ) ->", "from app import crud, models from app.core import security from", "<gh_stars>0 from typing import Generator from fastapi import Depends, HTTPException", ") -> models.User: user_id = security.get_user_id_from_token(token=token) user = crud.user.get_user_by_id(db, int(user_id))", "api_key: str = Depends(APIKeyHeader(name=\"Authorization\")), ) -> str: try: token_prefix, token", "authorization type\", ) if token_prefix != JWT_TOKEN_PREFIX: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN,", "import Depends, HTTPException from fastapi.security import APIKeyHeader from sqlalchemy.orm import", "def get_current_user( token: str = Depends(authrization_heder_token), db: Session = Depends(get_db)", "models from app.core import security from app.db.session import SessionLocal JWT_TOKEN_PREFIX", "= security.get_user_id_from_token(token=token) user = crud.user.get_user_by_id(db, int(user_id)) if not user: raise", "app.db.session import SessionLocal JWT_TOKEN_PREFIX = \"Token\" # noqa: S105 def", "JWT_TOKEN_PREFIX: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) return token", "= \"Token\" # noqa: S105 def get_db() -> Generator: db", "= SessionLocal() try: yield db finally: db.close() def authrization_heder_token( api_key:", "db finally: db.close() def authrization_heder_token( api_key: str = Depends(APIKeyHeader(name=\"Authorization\")), )", "if not user: raise HTTPException(status_code=404, detail=\"User not found\") return user", "Depends(APIKeyHeader(name=\"Authorization\")), ) -> str: try: token_prefix, token = api_key.split(\" \")", "security.get_user_id_from_token(token=token) user = crud.user.get_user_by_id(db, int(user_id)) if not user: raise HTTPException(status_code=404,", "async def get_current_user( token: str = Depends(authrization_heder_token), db: Session =", "finally: db.close() def authrization_heder_token( api_key: str = Depends(APIKeyHeader(name=\"Authorization\")), ) ->", "token_prefix, token = api_key.split(\" \") except ValueError: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN,", "str = Depends(APIKeyHeader(name=\"Authorization\")), ) -> str: try: token_prefix, token =", "= crud.user.get_user_by_id(db, int(user_id)) if not user: raise HTTPException(status_code=404, detail=\"User not", "HTTPException from fastapi.security import APIKeyHeader from sqlalchemy.orm import Session from", ") -> str: try: token_prefix, token = api_key.split(\" \") except", "str: try: token_prefix, token = api_key.split(\" \") except ValueError: raise", "HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) return token async def", "detail=\"unsupported authorization type\", ) if token_prefix != JWT_TOKEN_PREFIX: raise HTTPException(", "-> Generator: db = SessionLocal() try: yield db finally: db.close()", "type\", ) if token_prefix != JWT_TOKEN_PREFIX: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported", "get_db() -> Generator: db = SessionLocal() try: yield db finally:", "db.close() def authrization_heder_token( api_key: str = Depends(APIKeyHeader(name=\"Authorization\")), ) -> str:", "\"Token\" # noqa: S105 def get_db() -> Generator: db =", "status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) return token async def get_current_user(", "SessionLocal JWT_TOKEN_PREFIX = \"Token\" # noqa: S105 def get_db() ->", "db = SessionLocal() try: yield db finally: db.close() def authrization_heder_token(", "detail=\"unsupported authorization type\", ) return token async def get_current_user( token:", "def get_db() -> Generator: db = SessionLocal() try: yield db", "import crud, models from app.core import security from app.db.session import", "token_prefix != JWT_TOKEN_PREFIX: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", )", "= Depends(authrization_heder_token), db: Session = Depends(get_db) ) -> models.User: user_id", "typing import Generator from fastapi import Depends, HTTPException from fastapi.security", "Session = Depends(get_db) ) -> models.User: user_id = security.get_user_id_from_token(token=token) user", "Depends(get_db) ) -> models.User: user_id = security.get_user_id_from_token(token=token) user = crud.user.get_user_by_id(db,", ") if token_prefix != JWT_TOKEN_PREFIX: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization", "S105 def get_db() -> Generator: db = SessionLocal() try: yield", ") return token async def get_current_user( token: str = Depends(authrization_heder_token),", "Session from starlette import status from app import crud, models", "from fastapi.security import APIKeyHeader from sqlalchemy.orm import Session from starlette", "try: token_prefix, token = api_key.split(\" \") except ValueError: raise HTTPException(", "from app.core import security from app.db.session import SessionLocal JWT_TOKEN_PREFIX =", "return token async def get_current_user( token: str = Depends(authrization_heder_token), db:", "import SessionLocal JWT_TOKEN_PREFIX = \"Token\" # noqa: S105 def get_db()", "from app.db.session import SessionLocal JWT_TOKEN_PREFIX = \"Token\" # noqa: S105", "int(user_id)) if not user: raise HTTPException(status_code=404, detail=\"User not found\") return", "noqa: S105 def get_db() -> Generator: db = SessionLocal() try:", "token = api_key.split(\" \") except ValueError: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported", "!= JWT_TOKEN_PREFIX: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) return", "crud, models from app.core import security from app.db.session import SessionLocal", "Generator from fastapi import Depends, HTTPException from fastapi.security import APIKeyHeader", "yield db finally: db.close() def authrization_heder_token( api_key: str = Depends(APIKeyHeader(name=\"Authorization\")),", "app.core import security from app.db.session import SessionLocal JWT_TOKEN_PREFIX = \"Token\"", "# noqa: S105 def get_db() -> Generator: db = SessionLocal()", "status from app import crud, models from app.core import security", "app import crud, models from app.core import security from app.db.session", "fastapi.security import APIKeyHeader from sqlalchemy.orm import Session from starlette import", "crud.user.get_user_by_id(db, int(user_id)) if not user: raise HTTPException(status_code=404, detail=\"User not found\")", "SessionLocal() try: yield db finally: db.close() def authrization_heder_token( api_key: str", "user_id = security.get_user_id_from_token(token=token) user = crud.user.get_user_by_id(db, int(user_id)) if not user:", "if token_prefix != JWT_TOKEN_PREFIX: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\",", "get_current_user( token: str = Depends(authrization_heder_token), db: Session = Depends(get_db) )", "starlette import status from app import crud, models from app.core", "APIKeyHeader from sqlalchemy.orm import Session from starlette import status from", "-> str: try: token_prefix, token = api_key.split(\" \") except ValueError:", "Depends, HTTPException from fastapi.security import APIKeyHeader from sqlalchemy.orm import Session", "from sqlalchemy.orm import Session from starlette import status from app", "= Depends(APIKeyHeader(name=\"Authorization\")), ) -> str: try: token_prefix, token = api_key.split(\"", "import Session from starlette import status from app import crud,", "db: Session = Depends(get_db) ) -> models.User: user_id = security.get_user_id_from_token(token=token)", "from starlette import status from app import crud, models from", "-> models.User: user_id = security.get_user_id_from_token(token=token) user = crud.user.get_user_by_id(db, int(user_id)) if", "JWT_TOKEN_PREFIX = \"Token\" # noqa: S105 def get_db() -> Generator:", "HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) if token_prefix != JWT_TOKEN_PREFIX:", "Generator: db = SessionLocal() try: yield db finally: db.close() def", "status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", ) if token_prefix != JWT_TOKEN_PREFIX: raise", "type\", ) return token async def get_current_user( token: str =", "import status from app import crud, models from app.core import", "str = Depends(authrization_heder_token), db: Session = Depends(get_db) ) -> models.User:", "\") except ValueError: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization type\", )", "sqlalchemy.orm import Session from starlette import status from app import", "= api_key.split(\" \") except ValueError: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=\"unsupported authorization", "try: yield db finally: db.close() def authrization_heder_token( api_key: str =", "= Depends(get_db) ) -> models.User: user_id = security.get_user_id_from_token(token=token) user =", "from fastapi import Depends, HTTPException from fastapi.security import APIKeyHeader from", "Depends(authrization_heder_token), db: Session = Depends(get_db) ) -> models.User: user_id =", "authrization_heder_token( api_key: str = Depends(APIKeyHeader(name=\"Authorization\")), ) -> str: try: token_prefix,", "token async def get_current_user( token: str = Depends(authrization_heder_token), db: Session", "import Generator from fastapi import Depends, HTTPException from fastapi.security import", "models.User: user_id = security.get_user_id_from_token(token=token) user = crud.user.get_user_by_id(db, int(user_id)) if not", "authorization type\", ) return token async def get_current_user( token: str" ]
[ "address_overwrites: Dict[str, Address], development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version: Optional[str] =", "from typing import Dict, List, Tuple import structlog from eth_utils", "BlockNumber, ChainID, Optional from raiden_contracts.contract_manager import ( ContractDevEnvironment, ContractManager, contracts_precompiled_path,", "start_block = BlockNumber(0) else: start_block = BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for", "address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for c in contracts } #", "be overwritten by the additional parameters. Args: chain_id: The chain", "to look for deployed contracts. contracts: The list of contracts", "to_canonical_address from raiden.utils.typing import Address, BlockNumber, ChainID, Optional from raiden_contracts.contract_manager", "c: ( address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for c in contracts", "contracts: The list of contracts which should be considered address_overwrites:", "import structlog from eth_utils import to_canonical_address from raiden.utils.typing import Address,", "for deployed contracts. contracts: The list of contracts which should", "structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id: ChainID, contracts: List[str],", "The default contracts can be overwritten by the additional parameters.", "in the requested deployment. contracts_version: The version of the contracts", "import to_canonical_address from raiden.utils.typing import Address, BlockNumber, ChainID, Optional from", "default registry\", contracts_version=contracts_version, ) sys.exit(1) # Get deployed addresses for", "deployed addresses for those contracts which have no overwrites addresses", "contracts to use. Returns: A dictionary with the contract addresses", "deployed contracts were found at the default registry\", contracts_version=contracts_version, )", "get_contract_addresses_and_start_block( chain_id: ChainID, contracts: List[str], address_overwrites: Dict[str, Address], development_environment: ContractDevEnvironment", "Dict of addresses which should be used instead of the", "and contracts version. The default contracts can be overwritten by", "dictionary with the contract addresses and start block for the", "The chain id to look for deployed contracts. contracts: The", "addresses which should be used instead of the ones in", "log.error( \"No deployed contracts were found at the default registry\",", "ChainID, contracts: List[str], address_overwrites: Dict[str, Address], development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO,", ") -> Tuple[Dict[str, Address], BlockNumber]: \"\"\"Returns contract addresses and start", "chain and contracts version. The default contracts can be overwritten", "and start query block for a given chain and contracts", "or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for c in contracts } # Set", "= None, ) -> Tuple[Dict[str, Address], BlockNumber]: \"\"\"Returns contract addresses", "-> Tuple[Dict[str, Address], BlockNumber]: \"\"\"Returns contract addresses and start query", "addresses and start block for the given information \"\"\" contract_data", "structlog from eth_utils import to_canonical_address from raiden.utils.typing import Address, BlockNumber,", "# Set start block to zero if any contract addresses", "BlockNumber]: \"\"\"Returns contract addresses and start query block for a", "BlockNumber(0) else: start_block = BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for c in", "A dictionary with the contract addresses and start block for", "chain id to look for deployed contracts. contracts: The list", "deployment. contracts_version: The version of the contracts to use. Returns:", "{ c: ( address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for c in", "zero if any contract addresses are overwritten if any(address_overwrites.values()): start_block", "information \"\"\" contract_data = get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment, ) if", "start block to zero if any contract addresses are overwritten", "for c in contracts } # Set start block to", "import Address, BlockNumber, ChainID, Optional from raiden_contracts.contract_manager import ( ContractDevEnvironment,", "get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment, ) if not contract_data: log.error( \"No", "from eth_utils import to_canonical_address from raiden.utils.typing import Address, BlockNumber, ChainID,", "ContractDevEnvironment.DEMO, contracts_version: Optional[str] = None, ) -> Tuple[Dict[str, Address], BlockNumber]:", "block for a given chain and contracts version. The default", "block to zero if any contract addresses are overwritten if", "which have no overwrites addresses = { c: ( address_overwrites.get(c)", "= structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id: ChainID, contracts:", "version. The default contracts can be overwritten by the additional", "the contract addresses and start block for the given information", "contracts_precompiled_path, get_contracts_deployment_info, ) log = structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def", "if any(address_overwrites.values()): start_block = BlockNumber(0) else: start_block = BlockNumber( max(0,", "deployed contracts. contracts: The list of contracts which should be", "contract addresses and start query block for a given chain", "max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for c in contracts)) ) return addresses, start_block", "to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for c in contracts } # Set start", "List, Tuple import structlog from eth_utils import to_canonical_address from raiden.utils.typing", "not contract_data: log.error( \"No deployed contracts were found at the", "version of the contracts to use. Returns: A dictionary with", "be considered address_overwrites: Dict of addresses which should be used", "contract_data = get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment, ) if not contract_data:", "overwritten if any(address_overwrites.values()): start_block = BlockNumber(0) else: start_block = BlockNumber(", "address_overwrites: Dict of addresses which should be used instead of", "contracts which have no overwrites addresses = { c: (", "import Dict, List, Tuple import structlog from eth_utils import to_canonical_address", "contract_data: log.error( \"No deployed contracts were found at the default", "no overwrites addresses = { c: ( address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"])", "overwritten by the additional parameters. Args: chain_id: The chain id", "Get deployed addresses for those contracts which have no overwrites", "Optional from raiden_contracts.contract_manager import ( ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info, )", "( address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for c in contracts }", "given information \"\"\" contract_data = get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment, )", "from raiden_contracts.contract_manager import ( ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info, ) log", "= BlockNumber(0) else: start_block = BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for c", "of the ones in the requested deployment. contracts_version: The version", "= get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment, ) if not contract_data: log.error(", "Optional[str] = None, ) -> Tuple[Dict[str, Address], BlockNumber]: \"\"\"Returns contract", "default contracts can be overwritten by the additional parameters. Args:", "parameters. Args: chain_id: The chain id to look for deployed", "considered address_overwrites: Dict of addresses which should be used instead", "contracts_version=contracts_version, ) sys.exit(1) # Get deployed addresses for those contracts", ") sys.exit(1) # Get deployed addresses for those contracts which", "def get_contract_addresses_and_start_block( chain_id: ChainID, contracts: List[str], address_overwrites: Dict[str, Address], development_environment:", "CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id: ChainID, contracts: List[str], address_overwrites:", "Tuple[Dict[str, Address], BlockNumber]: \"\"\"Returns contract addresses and start query block", "query block for a given chain and contracts version. The", "chain_id: The chain id to look for deployed contracts. contracts:", "which should be used instead of the ones in the", "get_contracts_deployment_info, ) log = structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block(", "addresses for those contracts which have no overwrites addresses =", "= ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id: ChainID, contracts: List[str], address_overwrites: Dict[str,", "Address, BlockNumber, ChainID, Optional from raiden_contracts.contract_manager import ( ContractDevEnvironment, ContractManager,", "the contracts to use. Returns: A dictionary with the contract", "requested deployment. contracts_version: The version of the contracts to use.", "additional parameters. Args: chain_id: The chain id to look for", "with the contract addresses and start block for the given", "ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id: ChainID, contracts: List[str], address_overwrites: Dict[str, Address],", "have no overwrites addresses = { c: ( address_overwrites.get(c) or", "in contracts } # Set start block to zero if", "from raiden.utils.typing import Address, BlockNumber, ChainID, Optional from raiden_contracts.contract_manager import", "\"\"\"Returns contract addresses and start query block for a given", "\"No deployed contracts were found at the default registry\", contracts_version=contracts_version,", "id to look for deployed contracts. contracts: The list of", "development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version: Optional[str] = None, ) ->", "contracts which should be considered address_overwrites: Dict of addresses which", "to use. Returns: A dictionary with the contract addresses and", "the requested deployment. contracts_version: The version of the contracts to", "the given information \"\"\" contract_data = get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment,", "import ( ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info, ) log = structlog.get_logger(__name__)", "contracts can be overwritten by the additional parameters. Args: chain_id:", "be used instead of the ones in the requested deployment.", "which should be considered address_overwrites: Dict of addresses which should", "at the default registry\", contracts_version=contracts_version, ) sys.exit(1) # Get deployed", "The list of contracts which should be considered address_overwrites: Dict", "start_block = BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for c in contracts)) )", "instead of the ones in the requested deployment. contracts_version: The", "contracts version. The default contracts can be overwritten by the", "of addresses which should be used instead of the ones", "eth_utils import to_canonical_address from raiden.utils.typing import Address, BlockNumber, ChainID, Optional", "ones in the requested deployment. contracts_version: The version of the", "= BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for c in contracts)) ) return", "sys.exit(1) # Get deployed addresses for those contracts which have", "Tuple import structlog from eth_utils import to_canonical_address from raiden.utils.typing import", "for the given information \"\"\" contract_data = get_contracts_deployment_info( chain_id=chain_id, version=contracts_version,", ") log = structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id:", "addresses are overwritten if any(address_overwrites.values()): start_block = BlockNumber(0) else: start_block", "ChainID, Optional from raiden_contracts.contract_manager import ( ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info,", "for a given chain and contracts version. The default contracts", "of the contracts to use. Returns: A dictionary with the", "contracts: List[str], address_overwrites: Dict[str, Address], development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version:", "can be overwritten by the additional parameters. Args: chain_id: The", "a given chain and contracts version. The default contracts can", "start query block for a given chain and contracts version.", "= { c: ( address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for c", "Set start block to zero if any contract addresses are", "to zero if any contract addresses are overwritten if any(address_overwrites.values()):", "if any contract addresses are overwritten if any(address_overwrites.values()): start_block =", "Address], development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version: Optional[str] = None, )", "ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info, ) log = structlog.get_logger(__name__) CONTRACT_MANAGER =", "and start block for the given information \"\"\" contract_data =", "addresses = { c: ( address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) ) for", "any(address_overwrites.values()): start_block = BlockNumber(0) else: start_block = BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"]", "of contracts which should be considered address_overwrites: Dict of addresses", "used instead of the ones in the requested deployment. contracts_version:", "those contracts which have no overwrites addresses = { c:", "Address], BlockNumber]: \"\"\"Returns contract addresses and start query block for", "found at the default registry\", contracts_version=contracts_version, ) sys.exit(1) # Get", "raiden.utils.typing import Address, BlockNumber, ChainID, Optional from raiden_contracts.contract_manager import (", "ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version: Optional[str] = None, ) -> Tuple[Dict[str,", "block for the given information \"\"\" contract_data = get_contracts_deployment_info( chain_id=chain_id,", "the default registry\", contracts_version=contracts_version, ) sys.exit(1) # Get deployed addresses", "registry\", contracts_version=contracts_version, ) sys.exit(1) # Get deployed addresses for those", "the additional parameters. Args: chain_id: The chain id to look", ") if not contract_data: log.error( \"No deployed contracts were found", "were found at the default registry\", contracts_version=contracts_version, ) sys.exit(1) #", "for those contracts which have no overwrites addresses = {", "contract addresses are overwritten if any(address_overwrites.values()): start_block = BlockNumber(0) else:", "contracts } # Set start block to zero if any", "contract addresses and start block for the given information \"\"\"", "addresses and start query block for a given chain and", "the ones in the requested deployment. contracts_version: The version of", "None, ) -> Tuple[Dict[str, Address], BlockNumber]: \"\"\"Returns contract addresses and", "Dict, List, Tuple import structlog from eth_utils import to_canonical_address from", "if not contract_data: log.error( \"No deployed contracts were found at", "Returns: A dictionary with the contract addresses and start block", "Args: chain_id: The chain id to look for deployed contracts.", "should be used instead of the ones in the requested", ") for c in contracts } # Set start block", "} # Set start block to zero if any contract", "else: start_block = BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for c in contracts))", "chain_id=chain_id, version=contracts_version, development_environment=development_environment, ) if not contract_data: log.error( \"No deployed", "\"\"\" contract_data = get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment, ) if not", "by the additional parameters. Args: chain_id: The chain id to", "contracts_version: The version of the contracts to use. Returns: A", "ContractManager, contracts_precompiled_path, get_contracts_deployment_info, ) log = structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path())", "contracts_version: Optional[str] = None, ) -> Tuple[Dict[str, Address], BlockNumber]: \"\"\"Returns", "development_environment=development_environment, ) if not contract_data: log.error( \"No deployed contracts were", "version=contracts_version, development_environment=development_environment, ) if not contract_data: log.error( \"No deployed contracts", "should be considered address_overwrites: Dict of addresses which should be", "c in contracts } # Set start block to zero", "given chain and contracts version. The default contracts can be", "any contract addresses are overwritten if any(address_overwrites.values()): start_block = BlockNumber(0)", "are overwritten if any(address_overwrites.values()): start_block = BlockNumber(0) else: start_block =", "log = structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id: ChainID,", "chain_id: ChainID, contracts: List[str], address_overwrites: Dict[str, Address], development_environment: ContractDevEnvironment =", "use. Returns: A dictionary with the contract addresses and start", "<filename>src/raiden_libs/contract_info.py import sys from typing import Dict, List, Tuple import", "contracts were found at the default registry\", contracts_version=contracts_version, ) sys.exit(1)", "import sys from typing import Dict, List, Tuple import structlog", "typing import Dict, List, Tuple import structlog from eth_utils import", "list of contracts which should be considered address_overwrites: Dict of", "List[str], address_overwrites: Dict[str, Address], development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version: Optional[str]", "The version of the contracts to use. Returns: A dictionary", "start block for the given information \"\"\" contract_data = get_contracts_deployment_info(", "overwrites addresses = { c: ( address_overwrites.get(c) or to_canonical_address(contract_data[\"contracts\"][c][\"address\"]) )", "# Get deployed addresses for those contracts which have no", "= ContractDevEnvironment.DEMO, contracts_version: Optional[str] = None, ) -> Tuple[Dict[str, Address],", "BlockNumber( max(0, min(contract_data[\"contracts\"][c][\"block_number\"] for c in contracts)) ) return addresses,", "raiden_contracts.contract_manager import ( ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info, ) log =", "look for deployed contracts. contracts: The list of contracts which", "( ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info, ) log = structlog.get_logger(__name__) CONTRACT_MANAGER", "sys from typing import Dict, List, Tuple import structlog from", "Dict[str, Address], development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version: Optional[str] = None,", "contracts. contracts: The list of contracts which should be considered" ]
[ "class of the given dataset. cosine_classifier: A bool. If true,", "use. start_idx: An int Tensor. The index of the first", "2022 The Meta-Dataset Authors. # # Licensed under the Apache", "classifier, or possibly a cosine classifier. Args: embeddings: A Tensor", "2.0 (the \"License\"); # you may not use this file", "Tensor of size [batch size, embedding dim]. num_classes: An integer;", "g = tf.get_variable( 'g', dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True) b_fc = None", "cosine_classifier: # Each column of the weight matrix may be", "The index of the first class of the given dataset.", "License. # Lint as: python2,python3 \"\"\"Classifier-related code.\"\"\" from __future__ import", "the dataset head to use. start_idx: An int Tensor. The", "make the cosine_logits_multiplier a learnable parameter. Only applies if cosine_classifier", "= tf.gather(num_classes, dataset_idx) w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes] logits", "+ dataset_num_classes] logits = linear_classifier_forward_pass(embeddings, w_fc, None, cosine_classifier, cosine_logits_multiplier, False)", "linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm): \"\"\"Passes embeddings through the", "# Forward pass through the layer defined by w_fc and", "No weight norm. w_fc = functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay) b_fc =", "embeddings but not the weights. weight_decay: A float; the scalar", "trainable=True) def _do_data_dependent_init(): \"\"\"Returns ops for the data-dependent init of", "b_fc is not None logits = tf.matmul(embeddings, w_fc) + b_fc", "dimension of the classifier layers of the different heads. dataset_idx:", "linear layer defined by w_fc and b_fc. Args: embeddings: A", "classifier is used which does not require the bias b_fc.", "w_fc) + b_fc return logits @gin.configurable def linear_classifier(embeddings, num_classes, cosine_classifier,", "logits = tf.matmul(embeddings, w_fc) # Scale the logits as passing", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= functional_backbones.weight_variable( [embedding_dims, sum(num_classes)], weight_decay=weight_decay) # Select the output \"head\"", "the bias b_fc. cosine_logits_multiplier: A float. Only used if cosine_classifier", "num outputs]. \"\"\" if not cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier` currently '", "if cosine_classifier is True. weight_decay: A float; the scalar multiple", "Each column of the weight matrix may be interpreted as", "initialization has already # happened. data_dependent_init_done = tf.get_variable( 'data_dependent_init_done', initializer=0,", "learnable_scale: A bool. Whether to make the cosine_logits_multiplier a learnable", "weight_decay=weight_decay) b_fc = None if not cosine_classifier: # Also initialize", "[batch size, num outputs]. \"\"\" embedding_dims = embeddings.get_shape().as_list()[-1] if use_weight_norm:", "w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm): \"\"\"Passes embeddings through the linear", "raise NotImplementedError('`separate_head_linear_classifier` currently ' 'only supports `cosine_classifier` True.') if learnable_scale:", "__future__ import print_function import gin.tf from meta_dataset.models import functional_backbones import", "functional_backbones import tensorflow.compat.v1 as tf def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier,", "multiplies the resulting logits. use_weight_norm: A bool. Whether weight norm", "dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True) b_fc = None if not cosine_classifier: #", "the License. # Lint as: python2,python3 \"\"\"Classifier-related code.\"\"\" from __future__", "multiplies the resulting logits. learnable_scale: A bool. Whether to make", "tf def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm): \"\"\"Passes embeddings", "from __future__ import print_function import gin.tf from meta_dataset.models import functional_backbones", "[batch size, embedding dim]. w_fc: A Tensor of size [embedding", "a bias in a data-dependent way. b_fc_init_value = -mean_init *", "[embedding_dims, sum(num_classes)], weight_decay=weight_decay) # Select the output \"head\" to use", "outputs]. \"\"\" if cosine_classifier: # Each column of the weight", "A Tensor of size [batch size, embedding dim]. num_classes: A", "forward pass through this layer. g = tf.get_variable( 'g', dtype=tf.float32,", "by w_fc and b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier,", "= tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if not use_weight_norm: # Only normalize", "A bool. Whether weight norm was used. If so, then", "w_fc and b_fc. Args: embeddings: A Tensor of size [batch", "is the cosine # similarity between that embedding and that", "use this file except in compliance with the License. #", "= linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, True) else: # No", "Also initialize a bias in a data-dependent way. b_fc_init_value =", "used which does not require the bias b_fc. cosine_logits_multiplier: A", "of size [num outputs] or []. If cosine_classifier is False,", "[tf.assign(g, g_init_value)] if not cosine_classifier: # Also initialize a bias", "representation. embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if not use_weight_norm: #", "needs to be done in a data-dependent way. # It", "done to prevent it from # happening again in the", "If so, then if using cosine classifier, normalize only the", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import print_function import gin.tf from meta_dataset.models import functional_backbones import tensorflow.compat.v1", "float. Only used if cosine_classifier is True, and multiplies the", "= tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]), trainable=True) def _do_data_dependent_init(): \"\"\"Returns ops for", "= linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, False) return logits @gin.configurable", "= tf.matmul(embeddings, w_fc) + b_fc return logits @gin.configurable def linear_classifier(embeddings,", "License. # You may obtain a copy of the License", "= -mean_init * g_init_value ops.append(tf.assign(b_fc, b_fc_init_value)) # Mark that the", "way. # It will be overwritten during the first forward", "# similarity between that embedding and that class representation. embeddings", "for an embedding vector belonging to that class is the", "under the License is distributed on an \"AS IS\" BASIS,", "the weights. Returns: logits: A Tensor of size [batch size,", "classification. cosine_classifier: A bool. If true, a cosine classifier is", "A list of integers; the dimension of the classifier layers", "License for the specific language governing permissions and # limitations", "embedding and that class representation. embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3)", "= None if not cosine_classifier: # Also initialize a bias.", "of size [batch size, embedding dim]. w_fc: A Tensor of", "__future__ import division from __future__ import print_function import gin.tf from", "return tf.group(*ops) # Possibly perform data-dependent init (if it hasn't", "# Data-dependent init values. g_init_value = 1. / tf.sqrt(var_init +", "Only normalize the weights if weight norm was not used.", "Lint as: python2,python3 \"\"\"Classifier-related code.\"\"\" from __future__ import absolute_import from", "dataset head to use. start_idx: An int Tensor. The index", "embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if not use_weight_norm: # Only", "\"\"\"Returns ops for the data-dependent init of g and maybe", "of the first class of the given dataset. cosine_classifier: A", "or possibly a cosine classifier. Args: embeddings: A Tensor of", "from __future__ import division from __future__ import print_function import gin.tf", "[batch size, embedding dim]. num_classes: A list of integers; the", "size, num outputs]. \"\"\" if cosine_classifier: # Each column of", "= tf.nn.moments(output_init, [0]) # Data-dependent init values. g_init_value = 1.", "bool. Whether to make the cosine_logits_multiplier a learnable parameter. Only", "size [batch size, num outputs]. \"\"\" embedding_dims = embeddings.get_shape().as_list()[-1] if", "output \"head\" to use in the forward pass. dataset_num_classes =", "if not use_weight_norm: # Only normalize the weights if weight", "as: python2,python3 \"\"\"Classifier-related code.\"\"\" from __future__ import absolute_import from __future__", "\"\"\" embedding_dims = embeddings.get_shape().as_list()[-1] if use_weight_norm: # A variable to", "None logits = tf.matmul(embeddings, w_fc) + b_fc return logits @gin.configurable", "from __future__ import absolute_import from __future__ import division from __future__", "already). init_op = tf.cond( tf.equal(data_dependent_init_done, 0), _do_data_dependent_init, tf.no_op) with tf.control_dependencies([init_op]):", "b_fc. Args: embeddings: A Tensor of size [batch size, embedding", "in compliance with the License. # You may obtain a", "not the weights. Returns: logits: A Tensor of size [batch", "Only applies if cosine_classifier is True. weight_decay: A float; the", "# happening again in the future. ops.append(tf.assign(data_dependent_init_done, 1)) return tf.group(*ops)", "\"\"\"A linear classifier with num_sets heads, for different datasets. Args:", "software # distributed under the License is distributed on an", "dim]. w_fc: A Tensor of size [embedding dim, num outputs].", "not None logits = tf.matmul(embeddings, w_fc) + b_fc return logits", "linear_classifier(embeddings, num_classes, cosine_classifier, cosine_logits_multiplier, use_weight_norm, weight_decay): \"\"\"Forward pass through a", "This init is temporary as it needs to be done", "a data-dependent way. # It will be overwritten during the", "[0]) # Data-dependent init values. g_init_value = 1. / tf.sqrt(var_init", "b_fc_init_value = -mean_init * g_init_value ops.append(tf.assign(b_fc, b_fc_init_value)) # Mark that", "cosine_logits_multiplier = tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True) embedding_dims = embeddings.get_shape().as_list()[-1]", "be None. cosine_classifier: A bool. If true, a cosine classifier", "# expressive. logits *= cosine_logits_multiplier else: assert b_fc is not", "normalization. w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) # Forward pass", "learnable_scale, weight_decay): \"\"\"A linear classifier with num_sets heads, for different", "index of the dataset head to use. start_idx: An int", "the dimension of the classifier layers of the different heads.", "`cosine_classifier` True.') if learnable_scale: cosine_logits_multiplier = tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32,", "data-dependent initialization is done to prevent it from # happening", "dim, num outputs]. b_fc: Either None, or a Tensor of", "import division from __future__ import print_function import gin.tf from meta_dataset.models", "w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits = tf.matmul(embeddings, w_fc) #", "to make the cosine_logits_multiplier a learnable parameter. Only applies if", "gin.tf from meta_dataset.models import functional_backbones import tensorflow.compat.v1 as tf def", "trainable=False) w_fc = tf.get_variable( 'w_fc', [embedding_dims, num_classes], initializer=tf.random_normal_initializer(0, 0.05), trainable=True)", "the weights if weight norm was not used. w_fc =", "cosine_logits_multiplier: A float. Only used if cosine_classifier is True, and", "weight_decay: A float; the scalar multiple on the L2 regularization", "in a data-dependent way. b_fc_init_value = -mean_init * g_init_value ops.append(tf.assign(b_fc,", "functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay) b_fc = None if not cosine_classifier: #", "to keep track of whether the initialization has already #", "def linear_classifier(embeddings, num_classes, cosine_classifier, cosine_logits_multiplier, use_weight_norm, weight_decay): \"\"\"Forward pass through", "# Only normalize the weights if weight norm was not", "and that class representation. embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if", "of size [batch size, embedding dim]. num_classes: A list of", "logits *= cosine_logits_multiplier else: assert b_fc is not None logits", "tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]), trainable=True) def _do_data_dependent_init(): \"\"\"Returns ops for the", "not require the bias b_fc. cosine_logits_multiplier: A float. Only used", "outputs]. \"\"\" embedding_dims = embeddings.get_shape().as_list()[-1] if use_weight_norm: # A variable", "start_idx, cosine_classifier, cosine_logits_multiplier, learnable_scale, weight_decay): \"\"\"A linear classifier with num_sets", "permissions and # limitations under the License. # Lint as:", "initializer=tf.ones([num_classes]), trainable=True) b_fc = None if not cosine_classifier: # Also", "output_init = tf.matmul(embeddings, w_fc_normalized) mean_init, var_init = tf.nn.moments(output_init, [0]) #", "layer defined by w_fc and b_fc. Args: embeddings: A Tensor", "cosine_logits_multiplier, True) else: # No weight norm. w_fc = functional_backbones.weight_variable([embedding_dims,", "size, embedding dim]. w_fc: A Tensor of size [embedding dim,", "Tensor of size [batch size, embedding dim]. num_classes: A list", "start_idx:start_idx + dataset_num_classes] logits = linear_classifier_forward_pass(embeddings, w_fc, None, cosine_classifier, cosine_logits_multiplier,", "Tensor. The index of the dataset head to use. start_idx:", "Scale the logits as passing numbers in [-1, 1] to", "keep track of whether the initialization has already # happened.", "embedding_dims = embeddings.get_shape().as_list()[-1] w_fc = functional_backbones.weight_variable( [embedding_dims, sum(num_classes)], weight_decay=weight_decay) #", "*= cosine_logits_multiplier else: assert b_fc is not None logits =", "the weight matrix may be interpreted as a class #", "A variable to keep track of whether the initialization has", "Whether to make the cosine_logits_multiplier a learnable parameter. Only applies", "\"\"\"Forward pass through a linear classifier, or possibly a cosine", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "if using cosine classifier, normalize only the embeddings but not", "which does not require a bias. cosine_logits_multiplier: A float. Only", "to be done in a data-dependent way. # It will", "w_fc = functional_backbones.weight_variable( [embedding_dims, sum(num_classes)], weight_decay=weight_decay) # Select the output", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "(of the same dimenionality as the embedding space). The #", "be done in a data-dependent way. # It will be", "learnable_scale: cosine_logits_multiplier = tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True) embedding_dims =", "and maybe b_fc.\"\"\" w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init = tf.matmul(embeddings,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "return logits @gin.configurable def separate_head_linear_classifier(embeddings, num_classes, dataset_idx, start_idx, cosine_classifier, cosine_logits_multiplier,", "else: # No weight norm. w_fc = functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay)", "to in writing, software # distributed under the License is", "cosine_classifier: # Also initialize a bias in a data-dependent way.", "separate_head_linear_classifier(embeddings, num_classes, dataset_idx, start_idx, cosine_classifier, cosine_logits_multiplier, learnable_scale, weight_decay): \"\"\"A linear", "use in the forward pass. dataset_num_classes = tf.gather(num_classes, dataset_idx) w_fc", "dataset_num_classes = tf.gather(num_classes, dataset_idx) w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes]", "norm was not used. w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits", "# See the License for the specific language governing permissions", "b_fc, cosine_classifier, cosine_logits_multiplier, True) else: # No weight norm. w_fc", "the embeddings but not the weights. weight_decay: A float; the", "and multiplies the resulting logits. use_weight_norm: A bool. Whether weight", "with tf.control_dependencies([init_op]): # Apply weight normalization. w_fc *= g /", "dimension of the classification. cosine_classifier: A bool. If true, a", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "the future. ops.append(tf.assign(data_dependent_init_done, 1)) return tf.group(*ops) # Possibly perform data-dependent", "num_sets heads, for different datasets. Args: embeddings: A Tensor of", "required by applicable law or agreed to in writing, software", "Tensor of size [batch size, num outputs]. \"\"\" embedding_dims =", "python2,python3 \"\"\"Classifier-related code.\"\"\" from __future__ import absolute_import from __future__ import", "size [batch size, num outputs]. \"\"\" if not cosine_classifier: raise", "be interpreted as a class # representation (of the same", "The Meta-Dataset Authors. # # Licensed under the Apache License,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "matrix may be interpreted as a class # representation (of", "used. w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits = tf.matmul(embeddings, w_fc)", "with the License. # You may obtain a copy of", "# Mark that the data-dependent initialization is done to prevent", "very # expressive. logits *= cosine_logits_multiplier else: assert b_fc is", "hasn't been done already). init_op = tf.cond( tf.equal(data_dependent_init_done, 0), _do_data_dependent_init,", "dataset. cosine_classifier: A bool. If true, a cosine classifier is", "the scalar multiple on the L2 regularization of the weight", "of size [batch size, num outputs]. \"\"\" if cosine_classifier: #", "and b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, False)", "trainable=True) embedding_dims = embeddings.get_shape().as_list()[-1] w_fc = functional_backbones.weight_variable( [embedding_dims, sum(num_classes)], weight_decay=weight_decay)", "tf.sqrt(var_init + 1e-10) ops = [tf.assign(g, g_init_value)] if not cosine_classifier:", "cosine_classifier, cosine_logits_multiplier, learnable_scale, weight_decay): \"\"\"A linear classifier with num_sets heads,", "cosine_classifier, cosine_logits_multiplier, True) else: # No weight norm. w_fc =", "class # representation (of the same dimenionality as the embedding", "# Also initialize a bias. b_fc = tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]),", "classifier with num_sets heads, for different datasets. Args: embeddings: A", "if cosine_classifier is True, and multiplies the resulting logits. learnable_scale:", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "int Tensor. The index of the first class of the", "done in a data-dependent way. # It will be overwritten", "distributed under the License is distributed on an \"AS IS\"", "epsilon=1e-3) logits = tf.matmul(embeddings, w_fc) # Scale the logits as", "cosine classifier is used which does not require the bias", "resulting logits. learnable_scale: A bool. Whether to make the cosine_logits_multiplier", "size [batch size, embedding dim]. w_fc: A Tensor of size", "can not be None. cosine_classifier: A bool. If true, a", "Apply weight normalization. w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) #", "cosine_classifier, cosine_logits_multiplier, use_weight_norm, weight_decay): \"\"\"Forward pass through a linear classifier,", "pass through the layer defined by w_fc and b_fc. logits", "by w_fc and b_fc. Args: embeddings: A Tensor of size", "weights. weight_decay: A float; the scalar multiple on the L2", "of size [embedding dim, num outputs]. b_fc: Either None, or", "it hasn't been done already). init_op = tf.cond( tf.equal(data_dependent_init_done, 0),", "-mean_init * g_init_value ops.append(tf.assign(b_fc, b_fc_init_value)) # Mark that the data-dependent", "the weight matrix. Returns: logits: A Tensor of size [batch", "classifier layers of the different heads. dataset_idx: An int Tensor.", "used if cosine_classifier is True, and multiplies the resulting logits.", "cosine_classifier: # Also initialize a bias. b_fc = functional_backbones.bias_variable([num_classes]) #", "express or implied. # See the License for the specific", "g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) # Forward pass through the layer", "dataset_num_classes] logits = linear_classifier_forward_pass(embeddings, w_fc, None, cosine_classifier, cosine_logits_multiplier, False) return", "except in compliance with the License. # You may obtain", "and b_fc. Args: embeddings: A Tensor of size [batch size,", "@gin.configurable def linear_classifier(embeddings, num_classes, cosine_classifier, cosine_logits_multiplier, use_weight_norm, weight_decay): \"\"\"Forward pass", "= [tf.assign(g, g_init_value)] if not cosine_classifier: # Also initialize a", "norm. w_fc = functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay) b_fc = None if", "If true, a cosine classifier is used which does not", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "to softmax is not very # expressive. logits *= cosine_logits_multiplier", "the output \"head\" to use in the forward pass. dataset_num_classes", "not use this file except in compliance with the License.", "that class is the cosine # similarity between that embedding", "if not cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier` currently ' 'only supports `cosine_classifier`", "w_fc[:, start_idx:start_idx + dataset_num_classes] logits = linear_classifier_forward_pass(embeddings, w_fc, None, cosine_classifier,", "data-dependent init (if it hasn't been done already). init_op =", "to that class is the cosine # similarity between that", "or a Tensor of size [num outputs] or []. If", "def _do_data_dependent_init(): \"\"\"Returns ops for the data-dependent init of g", "writing, software # distributed under the License is distributed on", "ops = [tf.assign(g, g_init_value)] if not cosine_classifier: # Also initialize", "1. / tf.sqrt(var_init + 1e-10) ops = [tf.assign(g, g_init_value)] if", "first class of the given dataset. cosine_classifier: A bool. If", "\"\"\" if cosine_classifier: # Each column of the weight matrix", "you may not use this file except in compliance with", "[]. If cosine_classifier is False, it can not be None.", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "is not None logits = tf.matmul(embeddings, w_fc) + b_fc return", "will be overwritten during the first forward pass through this", "Tensor of size [batch size, num outputs]. \"\"\" if not", "be overwritten during the first forward pass through this layer.", "b_fc = None if not cosine_classifier: # Also initialize a", "= functional_backbones.bias_variable([num_classes]) # Forward pass through the layer defined by", "[embedding dim, num outputs]. b_fc: Either None, or a Tensor", "not used. w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits = tf.matmul(embeddings,", "[0])) # Forward pass through the layer defined by w_fc", "if learnable_scale: cosine_logits_multiplier = tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True) embedding_dims", "in the future. ops.append(tf.assign(data_dependent_init_done, 1)) return tf.group(*ops) # Possibly perform", "initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True) embedding_dims = embeddings.get_shape().as_list()[-1] w_fc = functional_backbones.weight_variable( [embedding_dims,", "classifier is used, which does not require a bias. cosine_logits_multiplier:", "governing permissions and # limitations under the License. # Lint", "between that embedding and that class representation. embeddings = tf.nn.l2_normalize(embeddings,", "/ tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) # Forward pass through the layer defined", "classifier, normalize only the embeddings but not the weights. Returns:", "the same dimenionality as the embedding space). The # logit", "CONDITIONS OF ANY KIND, either express or implied. # See", "logits as passing numbers in [-1, 1] to softmax is", "a learnable parameter. Only applies if cosine_classifier is True. weight_decay:", "cosine_classifier: A bool. If true, a cosine classifier is used,", "tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) # Forward pass through the layer defined by", "embeddings: A Tensor of size [batch size, embedding dim]. w_fc:", "learnable parameter. Only applies if cosine_classifier is True. weight_decay: A", "embedding dim]. w_fc: A Tensor of size [embedding dim, num", "# representation (of the same dimenionality as the embedding space).", "init is temporary as it needs to be done in", "= tf.cond( tf.equal(data_dependent_init_done, 0), _do_data_dependent_init, tf.no_op) with tf.control_dependencies([init_op]): # Apply", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "given dataset. cosine_classifier: A bool. If true, a cosine classifier", "assert b_fc is not None logits = tf.matmul(embeddings, w_fc) +", "representation (of the same dimenionality as the embedding space). The", "cosine_logits_multiplier, learnable_scale, weight_decay): \"\"\"A linear classifier with num_sets heads, for", "w_fc_normalized) mean_init, var_init = tf.nn.moments(output_init, [0]) # Data-dependent init values.", "so, then if using cosine classifier, normalize only the embeddings", "True, and multiplies the resulting logits. learnable_scale: A bool. Whether", "w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, False) return logits @gin.configurable def separate_head_linear_classifier(embeddings,", "tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if not use_weight_norm: # Only normalize the", "= tf.matmul(embeddings, w_fc_normalized) mean_init, var_init = tf.nn.moments(output_init, [0]) # Data-dependent", "that embedding and that class representation. embeddings = tf.nn.l2_normalize(embeddings, axis=1,", "does not require the bias b_fc. cosine_logits_multiplier: A float. Only", "class is the cosine # similarity between that embedding and", "0), _do_data_dependent_init, tf.no_op) with tf.control_dependencies([init_op]): # Apply weight normalization. w_fc", "size, num outputs]. \"\"\" if not cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier` currently", "data-dependent way. b_fc_init_value = -mean_init * g_init_value ops.append(tf.assign(b_fc, b_fc_init_value)) #", "future. ops.append(tf.assign(data_dependent_init_done, 1)) return tf.group(*ops) # Possibly perform data-dependent init", "for different datasets. Args: embeddings: A Tensor of size [batch", "logits = linear_classifier_forward_pass(embeddings, w_fc, None, cosine_classifier, cosine_logits_multiplier, False) return logits", "[num outputs] or []. If cosine_classifier is False, it can", "[batch size, embedding dim]. num_classes: An integer; the dimension of", "# happened. data_dependent_init_done = tf.get_variable( 'data_dependent_init_done', initializer=0, dtype=tf.int32, trainable=False) w_fc", "False, it can not be None. cosine_classifier: A bool. If", "1)) return tf.group(*ops) # Possibly perform data-dependent init (if it", "is True, and multiplies the resulting logits. learnable_scale: A bool.", "a Tensor of size [num outputs] or []. If cosine_classifier", "to prevent it from # happening again in the future.", "but not the weights. weight_decay: A float; the scalar multiple", "of the classifier layers of the different heads. dataset_idx: An", "OR CONDITIONS OF ANY KIND, either express or implied. #", "cosine_logits_multiplier a learnable parameter. Only applies if cosine_classifier is True.", "cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier` currently ' 'only supports `cosine_classifier` True.') if", "use_weight_norm: # A variable to keep track of whether the", "not be None. cosine_classifier: A bool. If true, a cosine", "An integer; the dimension of the classification. cosine_classifier: A bool.", "weight_decay=weight_decay) # Select the output \"head\" to use in the", "the License is distributed on an \"AS IS\" BASIS, #", "and b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, True)", "w_fc = tf.get_variable( 'w_fc', [embedding_dims, num_classes], initializer=tf.random_normal_initializer(0, 0.05), trainable=True) #", "# Lint as: python2,python3 \"\"\"Classifier-related code.\"\"\" from __future__ import absolute_import", "coding=utf-8 # Copyright 2022 The Meta-Dataset Authors. # # Licensed", "logits @gin.configurable def separate_head_linear_classifier(embeddings, num_classes, dataset_idx, start_idx, cosine_classifier, cosine_logits_multiplier, learnable_scale,", "in the forward pass. dataset_num_classes = tf.gather(num_classes, dataset_idx) w_fc =", "dim]. num_classes: An integer; the dimension of the classification. cosine_classifier:", "else: assert b_fc is not None logits = tf.matmul(embeddings, w_fc)", "bool. If true, a cosine classifier is used, which does", "not require a bias. cosine_logits_multiplier: A float. Only used if", "logits @gin.configurable def linear_classifier(embeddings, num_classes, cosine_classifier, cosine_logits_multiplier, use_weight_norm, weight_decay): \"\"\"Forward", "different datasets. Args: embeddings: A Tensor of size [batch size,", "and # limitations under the License. # Lint as: python2,python3", "as tf def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm): \"\"\"Passes", "the classification. cosine_classifier: A bool. If true, a cosine classifier", "w_fc and b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier,", "w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, True) else: # No weight norm.", "b_fc. cosine_logits_multiplier: A float. Only used if cosine_classifier is True,", "cosine_classifier is True. weight_decay: A float; the scalar multiple on", "import functional_backbones import tensorflow.compat.v1 as tf def linear_classifier_forward_pass(embeddings, w_fc, b_fc,", "logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, True) else: #", "on the L2 regularization of the weight matrix. Returns: logits:", "# Possibly perform data-dependent init (if it hasn't been done", "a class # representation (of the same dimenionality as the", "If cosine_classifier is False, it can not be None. cosine_classifier:", "dataset_idx: An int Tensor. The index of the dataset head", "law or agreed to in writing, software # distributed under", "None, or a Tensor of size [num outputs] or [].", "size [batch size, embedding dim]. num_classes: An integer; the dimension", "normalize only the embeddings but not the weights. Returns: logits:", "# No weight norm. w_fc = functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay) b_fc", "layer. g = tf.get_variable( 'g', dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True) b_fc =", "it from # happening again in the future. ops.append(tf.assign(data_dependent_init_done, 1))", "data_dependent_init_done = tf.get_variable( 'data_dependent_init_done', initializer=0, dtype=tf.int32, trainable=False) w_fc = tf.get_variable(", "using cosine classifier, normalize only the embeddings but not the", "similarity between that embedding and that class representation. embeddings =", "cosine_classifier is True, and multiplies the resulting logits. use_weight_norm: A", "logits: A Tensor of size [batch size, num outputs]. \"\"\"", "If true, a cosine classifier is used, which does not", "size [num outputs] or []. If cosine_classifier is False, it", "'g', dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True) b_fc = None if not cosine_classifier:", "the embeddings but not the weights. Returns: logits: A Tensor", "Tensor of size [num outputs] or []. If cosine_classifier is", "\"\"\"Classifier-related code.\"\"\" from __future__ import absolute_import from __future__ import division", "classifier. Args: embeddings: A Tensor of size [batch size, embedding", "of integers; the dimension of the classifier layers of the", "'data_dependent_init_done', initializer=0, dtype=tf.int32, trainable=False) w_fc = tf.get_variable( 'w_fc', [embedding_dims, num_classes],", "size, num outputs]. \"\"\" embedding_dims = embeddings.get_shape().as_list()[-1] if use_weight_norm: #", "list of integers; the dimension of the classifier layers of", "in a data-dependent way. # It will be overwritten during", "may obtain a copy of the License at # #", "The # logit for an embedding vector belonging to that", "tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits = tf.matmul(embeddings, w_fc) # Scale the", "normalize the weights if weight norm was not used. w_fc", "done already). init_op = tf.cond( tf.equal(data_dependent_init_done, 0), _do_data_dependent_init, tf.no_op) with", "use_weight_norm): \"\"\"Passes embeddings through the linear layer defined by w_fc", "the layer defined by w_fc and b_fc. logits = linear_classifier_forward_pass(embeddings,", "use_weight_norm, weight_decay): \"\"\"Forward pass through a linear classifier, or possibly", "is not very # expressive. logits *= cosine_logits_multiplier else: assert", "w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init = tf.matmul(embeddings, w_fc_normalized) mean_init, var_init", "Whether weight norm was used. If so, then if using", "import gin.tf from meta_dataset.models import functional_backbones import tensorflow.compat.v1 as tf", "maybe b_fc.\"\"\" w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init = tf.matmul(embeddings, w_fc_normalized)", "again in the future. ops.append(tf.assign(data_dependent_init_done, 1)) return tf.group(*ops) # Possibly", "_do_data_dependent_init(): \"\"\"Returns ops for the data-dependent init of g and", "= functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay) b_fc = None if not cosine_classifier:", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "num_classes: An integer; the dimension of the classification. cosine_classifier: A", "A Tensor of size [batch size, embedding dim]. num_classes: An", "# A variable to keep track of whether the initialization", "dtype=tf.int32, trainable=False) w_fc = tf.get_variable( 'w_fc', [embedding_dims, num_classes], initializer=tf.random_normal_initializer(0, 0.05),", "cosine_classifier: # Also initialize a bias. b_fc = tf.get_variable( 'b_fc',", "initialization is done to prevent it from # happening again", "of the weight matrix may be interpreted as a class", "may not use this file except in compliance with the", "Either None, or a Tensor of size [num outputs] or", "cosine_classifier is False, it can not be None. cosine_classifier: A", "as passing numbers in [-1, 1] to softmax is not", "through the layer defined by w_fc and b_fc. logits =", "division from __future__ import print_function import gin.tf from meta_dataset.models import", "Tensor of size [embedding dim, num outputs]. b_fc: Either None,", "of the classification. cosine_classifier: A bool. If true, a cosine", "= tf.get_variable( 'g', dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True) b_fc = None if", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "column of the weight matrix may be interpreted as a", "this file except in compliance with the License. # You", "weight matrix may be interpreted as a class # representation", "tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init = tf.matmul(embeddings, w_fc_normalized) mean_init, var_init = tf.nn.moments(output_init,", "import absolute_import from __future__ import division from __future__ import print_function", "b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm): \"\"\"Passes embeddings through the linear layer", "outputs]. \"\"\" if not cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier` currently ' 'only", "epsilon=1e-3) if not use_weight_norm: # Only normalize the weights if", "under the License. # Lint as: python2,python3 \"\"\"Classifier-related code.\"\"\" from", "may be interpreted as a class # representation (of the", "a bias. b_fc = tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]), trainable=True) def _do_data_dependent_init():", "Tensor of size [batch size, num outputs]. \"\"\" if cosine_classifier:", "softmax is not very # expressive. logits *= cosine_logits_multiplier else:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "layers of the different heads. dataset_idx: An int Tensor. The", "# # Licensed under the Apache License, Version 2.0 (the", "a cosine classifier is used which does not require the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "A Tensor of size [batch size, embedding dim]. w_fc: A", "the data-dependent init of g and maybe b_fc.\"\"\" w_fc_normalized =", "different heads. dataset_idx: An int Tensor. The index of the", "defined by w_fc and b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,", "Possibly perform data-dependent init (if it hasn't been done already).", "initializer=tf.random_normal_initializer(0, 0.05), trainable=True) # This init is temporary as it", "b_fc_init_value)) # Mark that the data-dependent initialization is done to", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "def separate_head_linear_classifier(embeddings, num_classes, dataset_idx, start_idx, cosine_classifier, cosine_logits_multiplier, learnable_scale, weight_decay): \"\"\"A", "heads. dataset_idx: An int Tensor. The index of the dataset", "is False, it can not be None. cosine_classifier: A bool.", "not very # expressive. logits *= cosine_logits_multiplier else: assert b_fc", "used, which does not require a bias. cosine_logits_multiplier: A float.", "of whether the initialization has already # happened. data_dependent_init_done =", "init (if it hasn't been done already). init_op = tf.cond(", "# coding=utf-8 # Copyright 2022 The Meta-Dataset Authors. # #", "interpreted as a class # representation (of the same dimenionality", "num outputs]. \"\"\" if cosine_classifier: # Each column of the", "A Tensor of size [batch size, num outputs]. \"\"\" embedding_dims", "Tensor of size [batch size, embedding dim]. w_fc: A Tensor", "axis=1, epsilon=1e-3) if not use_weight_norm: # Only normalize the weights", "embeddings through the linear layer defined by w_fc and b_fc.", "tf.matmul(embeddings, w_fc) # Scale the logits as passing numbers in", "if not cosine_classifier: # Also initialize a bias in a", "dtype=tf.float32, trainable=True) embedding_dims = embeddings.get_shape().as_list()[-1] w_fc = functional_backbones.weight_variable( [embedding_dims, sum(num_classes)],", "not the weights. weight_decay: A float; the scalar multiple on", "from meta_dataset.models import functional_backbones import tensorflow.compat.v1 as tf def linear_classifier_forward_pass(embeddings,", "the different heads. dataset_idx: An int Tensor. The index of", "used. If so, then if using cosine classifier, normalize only", "of size [batch size, num outputs]. \"\"\" if not cosine_classifier:", "True.') if learnable_scale: cosine_logits_multiplier = tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True)", "track of whether the initialization has already # happened. data_dependent_init_done", "w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes] logits = linear_classifier_forward_pass(embeddings, w_fc,", "normalize only the embeddings but not the weights. weight_decay: A", "the classifier layers of the different heads. dataset_idx: An int", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "size [batch size, num outputs]. \"\"\" if cosine_classifier: # Each", "tf.matmul(embeddings, w_fc) + b_fc return logits @gin.configurable def linear_classifier(embeddings, num_classes,", "trainable=True) # This init is temporary as it needs to", "from # happening again in the future. ops.append(tf.assign(data_dependent_init_done, 1)) return", "or implied. # See the License for the specific language", "'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True) embedding_dims = embeddings.get_shape().as_list()[-1] w_fc = functional_backbones.weight_variable(", "w_fc: A Tensor of size [embedding dim, num outputs]. b_fc:", "bias b_fc. cosine_logits_multiplier: A float. Only used if cosine_classifier is", "pass through a linear classifier, or possibly a cosine classifier.", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "# Scale the logits as passing numbers in [-1, 1]", "[batch size, num outputs]. \"\"\" if cosine_classifier: # Each column", "logits = tf.matmul(embeddings, w_fc) + b_fc return logits @gin.configurable def", "prevent it from # happening again in the future. ops.append(tf.assign(data_dependent_init_done,", "norm was used. If so, then if using cosine classifier,", "datasets. Args: embeddings: A Tensor of size [batch size, embedding", "is True. weight_decay: A float; the scalar multiple on the", "limitations under the License. # Lint as: python2,python3 \"\"\"Classifier-related code.\"\"\"", "cosine_classifier: A bool. If true, a cosine classifier is used", "bias in a data-dependent way. b_fc_init_value = -mean_init * g_init_value", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "* g_init_value ops.append(tf.assign(b_fc, b_fc_init_value)) # Mark that the data-dependent initialization", "applies if cosine_classifier is True. weight_decay: A float; the scalar", "trainable=True) b_fc = None if not cosine_classifier: # Also initialize", "of the given dataset. cosine_classifier: A bool. If true, a", "b_fc, cosine_classifier, cosine_logits_multiplier, False) return logits @gin.configurable def separate_head_linear_classifier(embeddings, num_classes,", "the linear layer defined by w_fc and b_fc. Args: embeddings:", "dataset_idx, start_idx, cosine_classifier, cosine_logits_multiplier, learnable_scale, weight_decay): \"\"\"A linear classifier with", "the first forward pass through this layer. g = tf.get_variable(", "of size [batch size, num outputs]. \"\"\" embedding_dims = embeddings.get_shape().as_list()[-1]", "code.\"\"\" from __future__ import absolute_import from __future__ import division from", "data-dependent init of g and maybe b_fc.\"\"\" w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(),", "sum(num_classes)], weight_decay=weight_decay) # Select the output \"head\" to use in", "pass. dataset_num_classes = tf.gather(num_classes, dataset_idx) w_fc = w_fc[:, start_idx:start_idx +", "a cosine classifier. Args: embeddings: A Tensor of size [batch", "(the \"License\"); # you may not use this file except", "temporary as it needs to be done in a data-dependent", "tensorflow.compat.v1 as tf def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm):", "belonging to that class is the cosine # similarity between", "# you may not use this file except in compliance", "embedding_dims = embeddings.get_shape().as_list()[-1] if use_weight_norm: # A variable to keep", "tf.gather(num_classes, dataset_idx) w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes] logits =", "None if not cosine_classifier: # Also initialize a bias. b_fc", "def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm): \"\"\"Passes embeddings through", "heads, for different datasets. Args: embeddings: A Tensor of size", "initialize a bias in a data-dependent way. b_fc_init_value = -mean_init", "= w_fc[:, start_idx:start_idx + dataset_num_classes] logits = linear_classifier_forward_pass(embeddings, w_fc, None,", "the cosine_logits_multiplier a learnable parameter. Only applies if cosine_classifier is", "perform data-dependent init (if it hasn't been done already). init_op", "tf.equal(data_dependent_init_done, 0), _do_data_dependent_init, tf.no_op) with tf.control_dependencies([init_op]): # Apply weight normalization.", "= embeddings.get_shape().as_list()[-1] if use_weight_norm: # A variable to keep track", "linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, True) else: # No weight", "1] to softmax is not very # expressive. logits *=", "True) else: # No weight norm. w_fc = functional_backbones.weight_variable([embedding_dims, num_classes],", "not cosine_classifier: # Also initialize a bias. b_fc = functional_backbones.bias_variable([num_classes])", "/ tf.sqrt(var_init + 1e-10) ops = [tf.assign(g, g_init_value)] if not", "only the embeddings but not the weights. Returns: logits: A", "num_classes], initializer=tf.random_normal_initializer(0, 0.05), trainable=True) # This init is temporary as", "space). The # logit for an embedding vector belonging to", "A float; the scalar multiple on the L2 regularization of", "true, a cosine classifier is used which does not require", "cosine_logits_multiplier else: assert b_fc is not None logits = tf.matmul(embeddings,", "during the first forward pass through this layer. g =", "start_idx: An int Tensor. The index of the first class", "use_weight_norm: # Only normalize the weights if weight norm was", "num_classes, dataset_idx, start_idx, cosine_classifier, cosine_logits_multiplier, learnable_scale, weight_decay): \"\"\"A linear classifier", "# # Unless required by applicable law or agreed to", "and multiplies the resulting logits. learnable_scale: A bool. Whether to", "= tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init = tf.matmul(embeddings, w_fc_normalized) mean_init, var_init =", "Mark that the data-dependent initialization is done to prevent it", "was used. If so, then if using cosine classifier, normalize", "# logit for an embedding vector belonging to that class", "cosine classifier. Args: embeddings: A Tensor of size [batch size,", "whether the initialization has already # happened. data_dependent_init_done = tf.get_variable(", "scalar multiple on the L2 regularization of the weight matrix.", "logits. learnable_scale: A bool. Whether to make the cosine_logits_multiplier a", "num_classes: A list of integers; the dimension of the classifier", "mean_init, var_init = tf.nn.moments(output_init, [0]) # Data-dependent init values. g_init_value", "data-dependent way. # It will be overwritten during the first", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "or []. If cosine_classifier is False, it can not be", "Select the output \"head\" to use in the forward pass.", "tf.get_variable( 'w_fc', [embedding_dims, num_classes], initializer=tf.random_normal_initializer(0, 0.05), trainable=True) # This init", "+ b_fc return logits @gin.configurable def linear_classifier(embeddings, num_classes, cosine_classifier, cosine_logits_multiplier,", "ops.append(tf.assign(data_dependent_init_done, 1)) return tf.group(*ops) # Possibly perform data-dependent init (if", "g_init_value)] if not cosine_classifier: # Also initialize a bias in", "num outputs]. \"\"\" embedding_dims = embeddings.get_shape().as_list()[-1] if use_weight_norm: # A", "w_fc = functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay) b_fc = None if not", "with num_sets heads, for different datasets. Args: embeddings: A Tensor", "Version 2.0 (the \"License\"); # you may not use this", "the forward pass. dataset_num_classes = tf.gather(num_classes, dataset_idx) w_fc = w_fc[:,", "[embedding_dims, num_classes], initializer=tf.random_normal_initializer(0, 0.05), trainable=True) # This init is temporary", "of size [batch size, embedding dim]. num_classes: An integer; the", "is temporary as it needs to be done in a", "tf.cond( tf.equal(data_dependent_init_done, 0), _do_data_dependent_init, tf.no_op) with tf.control_dependencies([init_op]): # Apply weight", "Forward pass through the layer defined by w_fc and b_fc.", "through the linear layer defined by w_fc and b_fc. Args:", "integers; the dimension of the classifier layers of the different", "An int Tensor. The index of the first class of", "cosine_logits_multiplier, False) return logits @gin.configurable def separate_head_linear_classifier(embeddings, num_classes, dataset_idx, start_idx,", "' 'only supports `cosine_classifier` True.') if learnable_scale: cosine_logits_multiplier = tf.get_variable(", "num outputs]. b_fc: Either None, or a Tensor of size", "__future__ import absolute_import from __future__ import division from __future__ import", "the logits as passing numbers in [-1, 1] to softmax", "# limitations under the License. # Lint as: python2,python3 \"\"\"Classifier-related", "implied. # See the License for the specific language governing", "= tf.get_variable( 'data_dependent_init_done', initializer=0, dtype=tf.int32, trainable=False) w_fc = tf.get_variable( 'w_fc',", "a cosine classifier is used, which does not require a", "embedding dim]. num_classes: An integer; the dimension of the classification.", "under the Apache License, Version 2.0 (the \"License\"); # you", "cosine # similarity between that embedding and that class representation.", "ops for the data-dependent init of g and maybe b_fc.\"\"\"", "way. b_fc_init_value = -mean_init * g_init_value ops.append(tf.assign(b_fc, b_fc_init_value)) # Mark", "Args: embeddings: A Tensor of size [batch size, embedding dim].", "init of g and maybe b_fc.\"\"\" w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0])", "in [-1, 1] to softmax is not very # expressive.", "by applicable law or agreed to in writing, software #", "weight norm. w_fc = functional_backbones.weight_variable([embedding_dims, num_classes], weight_decay=weight_decay) b_fc = None", "'w_fc', [embedding_dims, num_classes], initializer=tf.random_normal_initializer(0, 0.05), trainable=True) # This init is", "initialize a bias. b_fc = tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]), trainable=True) def", "weight norm was used. If so, then if using cosine", "matrix. Returns: logits: A Tensor of size [batch size, num", "dim]. num_classes: A list of integers; the dimension of the", "supports `cosine_classifier` True.') if learnable_scale: cosine_logits_multiplier = tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier,", "bias. b_fc = functional_backbones.bias_variable([num_classes]) # Forward pass through the layer", "+ 1e-10) ops = [tf.assign(g, g_init_value)] if not cosine_classifier: #", "defined by w_fc and b_fc. Args: embeddings: A Tensor of", "was not used. w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits =", "tf.get_variable( 'data_dependent_init_done', initializer=0, dtype=tf.int32, trainable=False) w_fc = tf.get_variable( 'w_fc', [embedding_dims,", "outputs] or []. If cosine_classifier is False, it can not", "b_fc: Either None, or a Tensor of size [num outputs]", "of the weight matrix. Returns: logits: A Tensor of size", "possibly a cosine classifier. Args: embeddings: A Tensor of size", "the initialization has already # happened. data_dependent_init_done = tf.get_variable( 'data_dependent_init_done',", "weight normalization. w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) # Forward", "an embedding vector belonging to that class is the cosine", "class representation. embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if not use_weight_norm:", "vector belonging to that class is the cosine # similarity", "that the data-dependent initialization is done to prevent it from", "Copyright 2022 The Meta-Dataset Authors. # # Licensed under the", "dimenionality as the embedding space). The # logit for an", "cosine classifier, normalize only the embeddings but not the weights.", "numbers in [-1, 1] to softmax is not very #", "the dimension of the classification. cosine_classifier: A bool. If true,", "var_init = tf.nn.moments(output_init, [0]) # Data-dependent init values. g_init_value =", "'b_fc', initializer=tf.zeros([num_classes]), trainable=True) def _do_data_dependent_init(): \"\"\"Returns ops for the data-dependent", "w_fc) # Scale the logits as passing numbers in [-1,", "values. g_init_value = 1. / tf.sqrt(var_init + 1e-10) ops =", "Tensor. The index of the first class of the given", "Only used if cosine_classifier is True, and multiplies the resulting", "= 1. / tf.sqrt(var_init + 1e-10) ops = [tf.assign(g, g_init_value)]", "num_classes], weight_decay=weight_decay) b_fc = None if not cosine_classifier: # Also", "the resulting logits. learnable_scale: A bool. Whether to make the", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "tf.nn.moments(output_init, [0]) # Data-dependent init values. g_init_value = 1. /", "currently ' 'only supports `cosine_classifier` True.') if learnable_scale: cosine_logits_multiplier =", "*= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) # Forward pass through the", "A bool. If true, a cosine classifier is used, which", "Unless required by applicable law or agreed to in writing,", "require the bias b_fc. cosine_logits_multiplier: A float. Only used if", "has already # happened. data_dependent_init_done = tf.get_variable( 'data_dependent_init_done', initializer=0, dtype=tf.int32,", "index of the first class of the given dataset. cosine_classifier:", "weight matrix. Returns: logits: A Tensor of size [batch size,", "ops.append(tf.assign(b_fc, b_fc_init_value)) # Mark that the data-dependent initialization is done", "return logits @gin.configurable def linear_classifier(embeddings, num_classes, cosine_classifier, cosine_logits_multiplier, use_weight_norm, weight_decay):", "linear classifier, or possibly a cosine classifier. Args: embeddings: A", "weight_decay): \"\"\"A linear classifier with num_sets heads, for different datasets.", "if cosine_classifier: # Each column of the weight matrix may", "of g and maybe b_fc.\"\"\" w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init", "tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True) embedding_dims = embeddings.get_shape().as_list()[-1] w_fc =", "bool. If true, a cosine classifier is used which does", "embedding vector belonging to that class is the cosine #", "layer defined by w_fc and b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc,", "the specific language governing permissions and # limitations under the", "functional_backbones.bias_variable([num_classes]) # Forward pass through the layer defined by w_fc", "expressive. logits *= cosine_logits_multiplier else: assert b_fc is not None", "applicable law or agreed to in writing, software # distributed", "require a bias. cosine_logits_multiplier: A float. Only used if cosine_classifier", "it can not be None. cosine_classifier: A bool. If true,", "# Also initialize a bias in a data-dependent way. b_fc_init_value", "b_fc = tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]), trainable=True) def _do_data_dependent_init(): \"\"\"Returns ops", "which does not require the bias b_fc. cosine_logits_multiplier: A float.", "linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, False) return logits @gin.configurable def", "[-1, 1] to softmax is not very # expressive. logits", "use_weight_norm: A bool. Whether weight norm was used. If so,", "overwritten during the first forward pass through this layer. g", "# Apply weight normalization. w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0]))", "in writing, software # distributed under the License is distributed", "embeddings: A Tensor of size [batch size, embedding dim]. num_classes:", "a bias. cosine_logits_multiplier: A float. Only used if cosine_classifier is", "\"\"\"Passes embeddings through the linear layer defined by w_fc and", "pass through this layer. g = tf.get_variable( 'g', dtype=tf.float32, initializer=tf.ones([num_classes]),", "b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, True) else:", "a bias. b_fc = functional_backbones.bias_variable([num_classes]) # Forward pass through the", "tf.get_variable( 'g', dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True) b_fc = None if not", "init values. g_init_value = 1. / tf.sqrt(var_init + 1e-10) ops", "cosine_logits_multiplier, use_weight_norm): \"\"\"Passes embeddings through the linear layer defined by", "g_init_value ops.append(tf.assign(b_fc, b_fc_init_value)) # Mark that the data-dependent initialization is", "Authors. # # Licensed under the Apache License, Version 2.0", "this layer. g = tf.get_variable( 'g', dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True) b_fc", "forward pass. dataset_num_classes = tf.gather(num_classes, dataset_idx) w_fc = w_fc[:, start_idx:start_idx", "# It will be overwritten during the first forward pass", "b_fc. logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, False) return", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "the embedding space). The # logit for an embedding vector", "License, Version 2.0 (the \"License\"); # you may not use", "is done to prevent it from # happening again in", "# Also initialize a bias. b_fc = functional_backbones.bias_variable([num_classes]) # Forward", "been done already). init_op = tf.cond( tf.equal(data_dependent_init_done, 0), _do_data_dependent_init, tf.no_op)", "# You may obtain a copy of the License at", "only the embeddings but not the weights. weight_decay: A float;", "tf.control_dependencies([init_op]): # Apply weight normalization. w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc),", "= tf.matmul(embeddings, w_fc) # Scale the logits as passing numbers", "passing numbers in [-1, 1] to softmax is not very", "parameter. Only applies if cosine_classifier is True. weight_decay: A float;", "= embeddings.get_shape().as_list()[-1] w_fc = functional_backbones.weight_variable( [embedding_dims, sum(num_classes)], weight_decay=weight_decay) # Select", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "outputs]. b_fc: Either None, or a Tensor of size [num", "through this layer. g = tf.get_variable( 'g', dtype=tf.float32, initializer=tf.ones([num_classes]), trainable=True)", "meta_dataset.models import functional_backbones import tensorflow.compat.v1 as tf def linear_classifier_forward_pass(embeddings, w_fc,", "A Tensor of size [batch size, num outputs]. \"\"\" if", "to use. start_idx: An int Tensor. The index of the", "\"\"\" if not cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier` currently ' 'only supports", "then if using cosine classifier, normalize only the embeddings but", "= tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits = tf.matmul(embeddings, w_fc) # Scale", "A Tensor of size [embedding dim, num outputs]. b_fc: Either", "initializer=0, dtype=tf.int32, trainable=False) w_fc = tf.get_variable( 'w_fc', [embedding_dims, num_classes], initializer=tf.random_normal_initializer(0,", "= tf.get_variable( 'cosine_scale', initializer=cosine_logits_multiplier, dtype=tf.float32, trainable=True) embedding_dims = embeddings.get_shape().as_list()[-1] w_fc", "logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, False) return logits", "variable to keep track of whether the initialization has already", "the License for the specific language governing permissions and #", "cosine_classifier, cosine_logits_multiplier, use_weight_norm): \"\"\"Passes embeddings through the linear layer defined", "[0]) output_init = tf.matmul(embeddings, w_fc_normalized) mean_init, var_init = tf.nn.moments(output_init, [0])", "not cosine_classifier: # Also initialize a bias in a data-dependent", "Apache License, Version 2.0 (the \"License\"); # you may not", "through a linear classifier, or possibly a cosine classifier. Args:", "the weights. weight_decay: A float; the scalar multiple on the", "either express or implied. # See the License for the", "Also initialize a bias. b_fc = functional_backbones.bias_variable([num_classes]) # Forward pass", "embedding space). The # logit for an embedding vector belonging", "A bool. Whether to make the cosine_logits_multiplier a learnable parameter.", "[batch size, num outputs]. \"\"\" if not cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier`", "g and maybe b_fc.\"\"\" w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init =", "w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0])) # Forward pass through", "import tensorflow.compat.v1 as tf def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier,", "happened. data_dependent_init_done = tf.get_variable( 'data_dependent_init_done', initializer=0, dtype=tf.int32, trainable=False) w_fc =", "float; the scalar multiple on the L2 regularization of the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "is used, which does not require a bias. cosine_logits_multiplier: A", "classifier, normalize only the embeddings but not the weights. weight_decay:", "weight_decay): \"\"\"Forward pass through a linear classifier, or possibly a", "weights if weight norm was not used. w_fc = tf.nn.l2_normalize(w_fc,", "a linear classifier, or possibly a cosine classifier. Args: embeddings:", "it needs to be done in a data-dependent way. #", "size, embedding dim]. num_classes: A list of integers; the dimension", "True, and multiplies the resulting logits. use_weight_norm: A bool. Whether", "if not cosine_classifier: # Also initialize a bias. b_fc =", "\"head\" to use in the forward pass. dataset_num_classes = tf.gather(num_classes,", "the first class of the given dataset. cosine_classifier: A bool.", "of the different heads. dataset_idx: An int Tensor. The index", "True. weight_decay: A float; the scalar multiple on the L2", "true, a cosine classifier is used, which does not require", "= tf.get_variable( 'w_fc', [embedding_dims, num_classes], initializer=tf.random_normal_initializer(0, 0.05), trainable=True) # This", "An int Tensor. The index of the dataset head to", "b_fc = functional_backbones.bias_variable([num_classes]) # Forward pass through the layer defined", "the data-dependent initialization is done to prevent it from #", "num_classes, cosine_classifier, cosine_logits_multiplier, use_weight_norm, weight_decay): \"\"\"Forward pass through a linear", "functional_backbones.weight_variable( [embedding_dims, sum(num_classes)], weight_decay=weight_decay) # Select the output \"head\" to", "does not require a bias. cosine_logits_multiplier: A float. Only used", "init_op = tf.cond( tf.equal(data_dependent_init_done, 0), _do_data_dependent_init, tf.no_op) with tf.control_dependencies([init_op]): #", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "happening again in the future. ops.append(tf.assign(data_dependent_init_done, 1)) return tf.group(*ops) #", "integer; the dimension of the classification. cosine_classifier: A bool. If", "cosine_logits_multiplier, use_weight_norm, weight_decay): \"\"\"Forward pass through a linear classifier, or", "as a class # representation (of the same dimenionality as", "Data-dependent init values. g_init_value = 1. / tf.sqrt(var_init + 1e-10)", "tf.matmul(embeddings, w_fc_normalized) mean_init, var_init = tf.nn.moments(output_init, [0]) # Data-dependent init", "g_init_value = 1. / tf.sqrt(var_init + 1e-10) ops = [tf.assign(g,", "multiple on the L2 regularization of the weight matrix. Returns:", "tf.group(*ops) # Possibly perform data-dependent init (if it hasn't been", "1e-10) ops = [tf.assign(g, g_init_value)] if not cosine_classifier: # Also", "b_fc.\"\"\" w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0]) output_init = tf.matmul(embeddings, w_fc_normalized) mean_init,", "initialize a bias. b_fc = functional_backbones.bias_variable([num_classes]) # Forward pass through", "to use in the forward pass. dataset_num_classes = tf.gather(num_classes, dataset_idx)", "_do_data_dependent_init, tf.no_op) with tf.control_dependencies([init_op]): # Apply weight normalization. w_fc *=", "A float. Only used if cosine_classifier is True, and multiplies", "resulting logits. use_weight_norm: A bool. Whether weight norm was used.", "L2 regularization of the weight matrix. Returns: logits: A Tensor", "initializer=tf.zeros([num_classes]), trainable=True) def _do_data_dependent_init(): \"\"\"Returns ops for the data-dependent init", "Meta-Dataset Authors. # # Licensed under the Apache License, Version", "A bool. If true, a cosine classifier is used which", "logit for an embedding vector belonging to that class is", "absolute_import from __future__ import division from __future__ import print_function import", "# Each column of the weight matrix may be interpreted", "logits. use_weight_norm: A bool. Whether weight norm was used. If", "the L2 regularization of the weight matrix. Returns: logits: A", "linear classifier with num_sets heads, for different datasets. Args: embeddings:", "size [embedding dim, num outputs]. b_fc: Either None, or a", "bias. cosine_logits_multiplier: A float. Only used if cosine_classifier is True,", "size, embedding dim]. num_classes: An integer; the dimension of the", "\"License\"); # you may not use this file except in", "not use_weight_norm: # Only normalize the weights if weight norm", "size [batch size, embedding dim]. num_classes: A list of integers;", "but not the weights. Returns: logits: A Tensor of size", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "NotImplementedError('`separate_head_linear_classifier` currently ' 'only supports `cosine_classifier` True.') if learnable_scale: cosine_logits_multiplier", "weights. Returns: logits: A Tensor of size [batch size, num", "# Select the output \"head\" to use in the forward", "# Copyright 2022 The Meta-Dataset Authors. # # Licensed under", "the cosine # similarity between that embedding and that class", "# distributed under the License is distributed on an \"AS", "first forward pass through this layer. g = tf.get_variable( 'g',", "None. cosine_classifier: A bool. If true, a cosine classifier is", "embeddings.get_shape().as_list()[-1] if use_weight_norm: # A variable to keep track of", "as it needs to be done in a data-dependent way.", "# Unless required by applicable law or agreed to in", "int Tensor. The index of the dataset head to use.", "as the embedding space). The # logit for an embedding", "print_function import gin.tf from meta_dataset.models import functional_backbones import tensorflow.compat.v1 as", "embeddings but not the weights. Returns: logits: A Tensor of", "tf.no_op) with tf.control_dependencies([init_op]): # Apply weight normalization. w_fc *= g", "False) return logits @gin.configurable def separate_head_linear_classifier(embeddings, num_classes, dataset_idx, start_idx, cosine_classifier,", "not cosine_classifier: # Also initialize a bias. b_fc = tf.get_variable(", "@gin.configurable def separate_head_linear_classifier(embeddings, num_classes, dataset_idx, start_idx, cosine_classifier, cosine_logits_multiplier, learnable_scale, weight_decay):", "same dimenionality as the embedding space). The # logit for", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "of the dataset head to use. start_idx: An int Tensor.", "not cosine_classifier: raise NotImplementedError('`separate_head_linear_classifier` currently ' 'only supports `cosine_classifier` True.')", "head to use. start_idx: An int Tensor. The index of", "weight norm was not used. w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3)", "is used which does not require the bias b_fc. cosine_logits_multiplier:", "You may obtain a copy of the License at #", "axis=0, epsilon=1e-3) logits = tf.matmul(embeddings, w_fc) # Scale the logits", "bool. Whether weight norm was used. If so, then if", "if cosine_classifier is True, and multiplies the resulting logits. use_weight_norm:", "Also initialize a bias. b_fc = tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]), trainable=True)", "(if it hasn't been done already). init_op = tf.cond( tf.equal(data_dependent_init_done,", "already # happened. data_dependent_init_done = tf.get_variable( 'data_dependent_init_done', initializer=0, dtype=tf.int32, trainable=False)", "embedding dim]. num_classes: A list of integers; the dimension of", "that class representation. embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if not", "embeddings.get_shape().as_list()[-1] w_fc = functional_backbones.weight_variable( [embedding_dims, sum(num_classes)], weight_decay=weight_decay) # Select the", "cosine_classifier is True, and multiplies the resulting logits. learnable_scale: A", "the given dataset. cosine_classifier: A bool. If true, a cosine", "dataset_idx) w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes] logits = linear_classifier_forward_pass(embeddings,", "Returns: logits: A Tensor of size [batch size, num outputs].", "the resulting logits. use_weight_norm: A bool. Whether weight norm was", "0.05), trainable=True) # This init is temporary as it needs", "if weight norm was not used. w_fc = tf.nn.l2_normalize(w_fc, axis=0,", "the Apache License, Version 2.0 (the \"License\"); # you may", "regularization of the weight matrix. Returns: logits: A Tensor of", "a data-dependent way. b_fc_init_value = -mean_init * g_init_value ops.append(tf.assign(b_fc, b_fc_init_value))", "'only supports `cosine_classifier` True.') if learnable_scale: cosine_logits_multiplier = tf.get_variable( 'cosine_scale',", "bias. b_fc = tf.get_variable( 'b_fc', initializer=tf.zeros([num_classes]), trainable=True) def _do_data_dependent_init(): \"\"\"Returns", "is True, and multiplies the resulting logits. use_weight_norm: A bool.", "cosine classifier is used, which does not require a bias.", "if use_weight_norm: # A variable to keep track of whether", "It will be overwritten during the first forward pass through", "b_fc return logits @gin.configurable def linear_classifier(embeddings, num_classes, cosine_classifier, cosine_logits_multiplier, use_weight_norm,", "cosine_classifier, cosine_logits_multiplier, False) return logits @gin.configurable def separate_head_linear_classifier(embeddings, num_classes, dataset_idx,", "The index of the dataset head to use. start_idx: An", "# This init is temporary as it needs to be", "for the data-dependent init of g and maybe b_fc.\"\"\" w_fc_normalized" ]
[ "# print(\"\\n Deleted user:\", user_id_key, \" @\", datetime.now()) session_timer =", "from them temp folder named with user_id under users dir", "document is not added ..\") add_doc = False os.remove(uploads_dir +", "connected at \" + str(datetime.now()) + \" with IP: \"", "data current_users = dict() # Used to store user id", "( processed[doc][\"meta\"][\"name\"] == \"context_file.txt\" and doc_name == \"context_file.txt\" ): #", "haystack.preprocessor.cleaning import clean_wiki_text from haystack import Finder from haystack.retriever.sparse import", "shutil.rmtree(\"users/\" + str(user_id_key)) es.indices.delete( index=\"user_\" + str(user_id_key), ignore=[400, 404] )", "time_left=time_left) else: session.permanent = True current_time = datetime.now() user_id +=", "if time_diff[0] >= session_time: try: del current_users[user_id_key] del user_doc_store[user_id_key] del", "in current_users: query_question = request.form[\"question\"] es_stats = es.indices.stats(index=\"user_\" + str(user_id_key))", "to build docker image session_time = 60 # Session Timeout", "current_users[user_id_key] del user_doc_store[user_id_key] del user_settings[user_id_key] shutil.rmtree(\"users/\" + str(user_id_key)) es.indices.delete( index=\"user_\"", "For session timeout on client side return render_template(\"index.html\", time_left=time_left) else:", "None and answers_dict[\"answers\"][i][\"answer\"] not in unique_answers ): temp_dict = answers_dict[\"answers\"][i]", "\"%S\") ) diff_time = diff_min_sec + diff_sec time_left = (", "current_users[user_id] = current_time session[\"user\"] = user_id # print(current_users) if not", "import * from haystack.preprocessor.utils import convert_files_to_dicts from haystack.preprocessor.cleaning import clean_wiki_text", "\"}, ] return jsonify({\"output\": output}) else: return render_template(\"session_out.html\") # Handles", "a docker image or running on WSGI server like gunicorn", "the session time ends and deletes user id from dict", "session_timer.start() # Handles users w.r.t new session or already in", "i in range(len(answers_dict[\"answers\"])): if ( answers_dict[\"answers\"][i][\"answer\"] is not None and", "def question(): if \"user\" in session: user_id_key = session[\"user\"] if", "CPU\") reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) else: reader", "dir for uploading context os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id)", "GPU setting changes. @app.route(\"/gpu\", methods=[\"POST\"]) def gpu(): if \"user\" in", "len(answers_dict[\"answers\"]) > 0: for i in range(len(answers_dict[\"answers\"])): if ( answers_dict[\"answers\"][i][\"answer\"]", "dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True ) for doc in range(len(processed)): try: #", "a duplicate, So this document is not added ..\") add_doc", "es.delete( index=\"user_\" + str(user_id_key), doc_type=\"_doc\", id=doc_id ) if processed[doc][\"text\"] ==", "@app.route(\"/upload_file\", methods=[\"GET\", \"POST\"]) def upload_file(): global current_users if \"user\" in", "if ( answers_dict[\"answers\"][i][\"answer\"] is not None and answers_dict[\"answers\"][i][\"answer\"] not in", "datetime.now()) while True: for user_id_key in current_users.copy(): current_time = datetime.now()", "} # Initial user settings logged_on = current_users[user_id] current_time =", "str(request.environ[\"REMOTE_ADDR\"]) ) if \"user\" in session and session[\"user\"] in current_users:", "render_template(\"session_out.html\") # Handles GPU setting changes. @app.route(\"/gpu\", methods=[\"POST\"]) def gpu():", "changes ..\") es.delete( index=\"user_\" + str(user_id_key), doc_type=\"_doc\", id=doc_id ) if", "id from dict def user_session_timer(): global current_users, session_time seconds_in_day =", "for key in remove: if key in temp_dict: del temp_dict[key]", "User ID to keep track w.r.t sessions and context data", "threading.Thread(target=user_session_timer) session_timer.start() # Handles users w.r.t new session or already", "del user_settings[user_id_key] shutil.rmtree(\"users/\" + str(user_id_key)) es.indices.delete( index=\"user_\" + str(user_id_key), ignore=[400,", "in session: user_id_key = session[\"user\"] if user_id_key in current_users: user_settings[user_id_key][\"model\"]", "( int(datetime.strftime(current_time, \"%M\")) - int(datetime.strftime(logged_on, \"%M\")) ) * 60 diff_sec", "this to es..\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"]))", "timedelta, datetime from flask import Flask, render_template, request, session, jsonify,", "use_gpu=-1 ) finder = Finder(reader, retriever) return finder # Handles", "data completely from the server after the session time ends", "= ( int(datetime.strftime(current_time, \"%M\")) - int(datetime.strftime(logged_on, \"%M\")) ) * 60", "= session[\"user\"] if user_id_key in current_users: text_context = request.form[\"context\"] context_file", "ends and deletes user id from dict def user_session_timer(): global", "def set_finder(user_id_key): if user_settings[user_id_key][\"model\"] == \"roberta\": model_path = ( \"deepset/roberta-base-squad2\"", "with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to build docker image session_time = 60", "reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=0 ) except Exception as", "= \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"] == \"distilbert\": model_path = \"distilbert-base-uncased-distilled-squad\" else:", "\" + str(datetime.now()) + \" with IP: \" + str(request.environ[\"REMOTE_ADDR\"])", "difference.seconds, 60 ) if time_diff[0] >= session_time: try: del current_users[user_id_key]", "\"%S\")) - int( datetime.strftime(logged_on, \"%S\") ) diff_time = diff_min_sec +", "int(datetime.strftime(logged_on, \"%M\")) ) * 60 diff_sec = int(datetime.strftime(current_time, \"%S\")) -", "+ str(user_id_key) + \"/uploads/context_file.txt\", \"w\" ) context_file.write(text_context) context_file.close() pre_process(user_id_key) return", "store user id with time of login user_doc_store = dict()", "pre-processed context to Elasticsearch # Each user is assigned with", "answers_dict[\"answers\"][i] remove = ( \"score\", \"probability\", \"offset_start\", \"offset_end\", \"document_id\", )", "running on WSGI server like gunicorn if __name__ == \"__main__\":", "print(\"Error: %s - %s.\" % (e.filename, e.strerror)) # print(\"\\n Deleted", "as e: print(e) print(\"GPU not available. Inferencing on CPU\") reader", "\"user_{user_id}\" # Documents & textual context are deleted from them", "upload_file(): global current_users if \"user\" in session: user_id_key = session[\"user\"]", "- user_time time_diff = divmod( difference.days * seconds_in_day + difference.seconds,", "\"roberta\", } # Initial user settings logged_on = current_users[user_id] current_time", "in session: user_id_key = session[\"user\"] if user_id_key in current_users: if", "if \"user\" in session and session[\"user\"] in current_users: user_id =", "flask import Flask, render_template, request, session, jsonify, url_for, redirect from", "user_id_key in current_users: for f in request.files.getlist(\"file\"): f.save( os.path.join(\"users/\" +", "user_index_size == 208 ): # To check if index in", "..\", \"context\": \" \", \"meta\": \" \"}, ] return jsonify({\"output\":", "* from haystack.preprocessor.utils import convert_files_to_dicts from haystack.preprocessor.cleaning import clean_wiki_text from", "and retriever def set_finder(user_id_key): if user_settings[user_id_key][\"model\"] == \"roberta\": model_path =", "models hosted in Hugging Face ) elif user_settings[user_id_key][\"model\"] == \"bert\":", "+ str(user_id) + \"/uploads\") else: os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" +", "+ \"/uploads/context_file.txt\", \"w\" ) context_file.write(text_context) context_file.close() pre_process(user_id_key) return jsonify({\"output\": \"\"", "\"distilbert-base-uncased-distilled-squad\" else: model_path = \"illuin/camembert-base-fquad\" retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"]", "\"/uploads/context_file.txt\", \"w\" ) context_file.write(text_context) context_file.close() pre_process(user_id_key) return jsonify({\"output\": \"\" +", "Face ) elif user_settings[user_id_key][\"model\"] == \"bert\": model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif", "& textual context are deleted from them temp folder named", "except OSError as e: print(\"Error: %s - %s.\" % (e.filename,", "datetime.now() diff_min_sec = ( int(datetime.strftime(current_time, \"%M\")) - int(datetime.strftime(logged_on, \"%M\")) )", "adding this to es..\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir +", "tracker thread started @ \", datetime.now()) while True: for user_id_key", "context are deleted from them temp folder named with user_id", "* 60 ) - diff_time # For session timeout on", "index size without docs return jsonify({\"error\": \"add context\"}) finder =", "answers_dict = finder.get_answers( question=query_question, top_k_retriever=5, top_k_reader=5 ) unique_answers = list()", "60 # Session Timeout in Minutes app = Flask(__name__) app.secret_key", "\"on\": user_settings[user_id_key][\"gpu\"] = \"off\" else: user_settings[user_id_key][\"gpu\"] = \"on\" return jsonify({\"output\":", "current_users: user_id = session[\"user\"] logged_on = current_users[user_id] current_time = datetime.now()", "in remove: if key in temp_dict: del temp_dict[key] else: output", "changed\"}) # Handles pre-trained model choice setting changes. @app.route(\"/models\", methods=[\"POST\"])", "doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if ( processed[doc][\"meta\"][\"name\"] == \"context_file.txt\" and doc_name", "reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) finder = Finder(reader,", "str(user_id) ) # Change host = \"elasticsearch\" to build docker", "methods=[\"POST\"]) def gpu(): if \"user\" in session: user_id_key = session[\"user\"]", ") # Replace with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to build docker image", "datetime import timedelta, datetime from flask import Flask, render_template, request,", "if user_settings[user_id_key][\"model\"] == \"roberta\": model_path = ( \"deepset/roberta-base-squad2\" # Path", "60 * 60 print(\"\\n User tracker thread started @ \",", "folder named with user_id under users dir after uploading to", "= current_time session[\"user\"] = user_id # print(current_users) if not os.path.exists(\"users/\"):", "current_users.copy(): current_time = datetime.now() user_time = current_users[user_id_key] difference = current_time", "str(datetime.now()) + \" with IP: \" + str(request.environ[\"REMOTE_ADDR\"]) ) if", "doc_text: # print(\"\\n There is a duplicate, So this document", "\"/uploads\") else: os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\")", "\"POST\"]) def upload_file(): global current_users if \"user\" in session: user_id_key", "os.makedirs(\"users/\" + str(user_id) + \"/uploads\") user_doc_store[user_id] = ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\"", ") context_file.write(text_context) context_file.close() pre_process(user_id_key) return jsonify({\"output\": \"\" + text_context}) else:", "temp_dict[\"meta\"][\"name\"] == \"context_file.txt\": temp_dict[\"meta\"][\"name\"] = \"Textual Context\" temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"]", "size without docs return jsonify({\"error\": \"add context\"}) finder = set_finder(user_id_key)", "): temp_dict = answers_dict[\"answers\"][i] remove = ( \"score\", \"probability\", \"offset_start\",", "None) return redirect(url_for(\"session_timeout\")) # Comment the below block in case", "ID to keep track w.r.t sessions and context data current_users", "os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) # Handles setting up reader and retriever", "user_id_key in current_users.copy(): current_time = datetime.now() user_time = current_users[user_id_key] difference", "= datetime.now() user_id += 1 current_users[user_id] = current_time session[\"user\"] =", "# Handles pre-processing the context and uploads the pre-processed context", "user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if ( user_index_size == 208 ): #", "answers_dict[\"answers\"][i][\"answer\"] not in unique_answers ): temp_dict = answers_dict[\"answers\"][i] remove =", "current_users: query_question = request.form[\"question\"] es_stats = es.indices.stats(index=\"user_\" + str(user_id_key)) user_index_size", "user_settings[user_id_key][\"model\"] = request.form[\"model\"] return jsonify({\"output\": \"model changed\"}) # Handles session", "= finder.get_answers( question=query_question, top_k_retriever=5, top_k_reader=5 ) unique_answers = list() output", "60 ) if time_diff[0] >= session_time: try: del current_users[user_id_key] del", "# Handles context documents uploads @app.route(\"/upload_file\", methods=[\"GET\", \"POST\"]) def upload_file():", "# User ID to keep track w.r.t sessions and context", "already in session @app.route(\"/\") def home(): global user_id, current_users, session_time", "temp_dict[\"meta\"][\"name\"] = \"Textual Context\" temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for key", "\"roberta\": model_path = ( \"deepset/roberta-base-squad2\" # Path of the models", "render_template, request, session, jsonify, url_for, redirect from haystack.document_store.elasticsearch import *", "build docker image session_time = 60 # Session Timeout in", "f.save( os.path.join(\"users/\" + str(user_id_key) + \"/uploads\", f.filename) ) pre_process(user_id_key) return", "= session[\"user\"] if user_id_key in current_users: query_question = request.form[\"question\"] es_stats", "only when app tab is open until session completes @app.route(\"/session_out\",", "session[\"user\"] in current_users: user_id = session[\"user\"] logged_on = current_users[user_id] current_time", "\"/uploads\", f.filename) ) pre_process(user_id_key) return render_template(\"index.html\") else: return redirect(url_for(\"session_timeout\")) else:", "diff_min_sec + diff_sec time_left = ( session_time * 60 )", "Handles context added through the textbox @app.route(\"/context\", methods=[\"POST\"]) def context():", "request.form[\"question\"] es_stats = es.indices.stats(index=\"user_\" + str(user_id_key)) user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if", "models choice # Handles pre-processing the context and uploads the", "es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if ( user_index_size == 208 ): # To check", "of session identifier from session dict, This works only when", "return render_template(\"session_out.html\") # Handles GPU setting changes. @app.route(\"/gpu\", methods=[\"POST\"]) def", "return redirect(url_for(\"session_timeout\")) # Handles context added through the textbox @app.route(\"/context\",", "\"%M\")) - int(datetime.strftime(logged_on, \"%M\")) ) * 60 diff_sec = int(datetime.strftime(current_time,", "unique_answers = list() output = list() if len(answers_dict[\"answers\"]) > 0:", "thread started @ \", datetime.now()) while True: for user_id_key in", "..\") add_doc = True for each_doc in range(no_docs): doc_text =", "current_users: if user_settings[user_id_key][\"gpu\"] == \"on\": user_settings[user_id_key][\"gpu\"] = \"off\" else: user_settings[user_id_key][\"gpu\"]", "finder = Finder(reader, retriever) return finder # Handles deletion of", "from haystack.reader.transformers import TransformersReader from elasticsearch import Elasticsearch es =", "time_diff[0] >= session_time: try: del current_users[user_id_key] del user_doc_store[user_id_key] del user_settings[user_id_key]", "tokenizer=model_path, use_gpu=-1 ) else: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1", "\"users/\" + str(user_id_key) + \"/uploads/context_file.txt\", \"w\" ) context_file.write(text_context) context_file.close() pre_process(user_id_key)", "def models(): if \"user\" in session: user_id_key = session[\"user\"] if", "temp_dict: del temp_dict[key] else: output = [ {\"answer\": \"No Answers", "the user id user_settings = dict() # User settings for", "retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"] == \"on\": try: reader =", "GPU and Pre-trained models choice # Handles pre-processing the context", "user_doc_store[user_id] = ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\" + str(user_id) ) # Change", "Elasticsearch es = ( Elasticsearch() ) # Replace with Elasticsearch([\"http://elasticsearch:9200/\"],", "True for each_doc in range(no_docs): doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name =", "doc_name == \"context_file.txt\" ): # print(\"Deleting context file to update", "..\") es.delete( index=\"user_\" + str(user_id_key), doc_type=\"_doc\", id=doc_id ) if processed[doc][\"text\"]", "user_settings[user_id_key][\"gpu\"] == \"on\": try: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=0", "user id from dict def user_session_timer(): global current_users, session_time seconds_in_day", "open( \"users/\" + str(user_id_key) + \"/uploads/context_file.txt\", \"w\" ) context_file.write(text_context) context_file.close()", "session[\"user\"] if user_id_key in current_users: if user_settings[user_id_key][\"gpu\"] == \"on\": user_settings[user_id_key][\"gpu\"]", "session_timeout(): return render_template(\"session_out.html\") # Handles removing of session identifier from", "= { \"gpu\": \"off\", \"model\": \"roberta\", } # Initial user", "from datetime import timedelta, datetime from flask import Flask, render_template,", "remove: if key in temp_dict: del temp_dict[key] else: output =", "= ( \"score\", \"probability\", \"offset_start\", \"offset_end\", \"document_id\", ) unique_answers.append(temp_dict[\"answer\"]) if", "# Initial user settings logged_on = current_users[user_id] current_time = datetime.now()", "es_result = es.search( index=\"user_\" + str(user_id_key), body={\"query\": {\"match_all\": {}}} )", "added ..\") add_doc = False os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) break if", "\"deepset/roberta-base-squad2\" # Path of the models hosted in Hugging Face", "image user_settings[user_id] = { \"gpu\": \"off\", \"model\": \"roberta\", } #", "pre_process(user_id_key) return render_template(\"index.html\") else: return redirect(url_for(\"session_timeout\")) else: return redirect(url_for(\"session_timeout\")) #", "\"gpu status changed\"}) # Handles pre-trained model choice setting changes.", "Exception as e: print(e) print(\"\\n no documents in es\") processed", "True current_time = datetime.now() user_id += 1 current_users[user_id] = current_time", "# Each user is assigned with a separate Elasticsearch index", "user is assigned with a separate Elasticsearch index starting with", "datetime.now()) session_timer = threading.Thread(target=user_session_timer) session_timer.start() # Handles users w.r.t new", "user_settings[user_id_key][\"model\"] == \"bert\": model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"] == \"distilbert\":", "time_diff = divmod( difference.days * seconds_in_day + difference.seconds, 60 )", "@app.route(\"/context\", methods=[\"POST\"]) def context(): if \"user\" in session: user_id_key =", "session: user_id_key = session[\"user\"] if user_id_key in current_users: text_context =", "key in temp_dict: del temp_dict[key] else: output = [ {\"answer\":", "\", \"meta\": \" \"}, ] return jsonify({\"output\": output}) else: return", "\"model changed\"}) # Handles session timeout redirection @app.route(\"/session_timeout\") def session_timeout():", "session identifier from session dict, This works only when app", "session completes @app.route(\"/session_out\", methods=[\"POST\"]) def session_out(): session.pop(\"user\", None) return redirect(url_for(\"session_timeout\"))", "return render_template(\"index.html\") else: return redirect(url_for(\"session_timeout\")) else: return redirect(url_for(\"session_timeout\")) # Handles", "render_template(\"index.html\") else: return redirect(url_for(\"session_timeout\")) else: return redirect(url_for(\"session_timeout\")) # Handles context", "check if index in Es is empty. 208 bytes is", "list() if len(answers_dict[\"answers\"]) > 0: for i in range(len(answers_dict[\"answers\"])): if", "is not None and answers_dict[\"answers\"][i][\"answer\"] not in unique_answers ): temp_dict", "side return render_template(\"index.html\", time_left=time_left) # Handles context documents uploads @app.route(\"/upload_file\",", "# print(\"\\n Checking for duplicate docs ..\") add_doc = True", "is not added ..\") add_doc = False os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"]))", "user_settings[user_id_key][\"model\"] == \"distilbert\": model_path = \"distilbert-base-uncased-distilled-squad\" else: model_path = \"illuin/camembert-base-fquad\"", "pre_process(user_id_key): uploads_dir = \"users/\" + str(user_id_key) + \"/uploads/\" try: es_result", "e: print(e) print(\"GPU not available. Inferencing on CPU\") reader =", "user_id_key in current_users: if user_settings[user_id_key][\"gpu\"] == \"on\": user_settings[user_id_key][\"gpu\"] = \"off\"", "finder # Handles deletion of context data completely from the", "== \"context_file.txt\" ): # print(\"Deleting context file to update with", "= 24 * 60 * 60 print(\"\\n User tracker thread", "= dict() # User settings for GPU and Pre-trained models", "+ str(user_id_key)) es.indices.delete( index=\"user_\" + str(user_id_key), ignore=[400, 404] ) except", "\"gpu\": \"off\", \"model\": \"roberta\", } # Initial user settings logged_on", "dict() # Used to store user id with time of", "each_doc in range(no_docs): doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id", "import timedelta, datetime from flask import Flask, render_template, request, session,", "datetime from flask import Flask, render_template, request, session, jsonify, url_for,", "= answers_dict[\"answers\"][i] remove = ( \"score\", \"probability\", \"offset_start\", \"offset_end\", \"document_id\",", "user_id = session[\"user\"] logged_on = current_users[user_id] current_time = datetime.now() diff_min_sec", "answers_dict[\"answers\"][i][\"answer\"] is not None and answers_dict[\"answers\"][i][\"answer\"] not in unique_answers ):", "works only when app tab is open until session completes", "es.indices.stats(index=\"user_\" + str(user_id_key)) user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if ( user_index_size ==", "user_id_key = session[\"user\"] if user_id_key in current_users: text_context = request.form[\"context\"]", "temp dir for uploading context os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" +", "[processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) except Exception as e: print(e)", "not None and answers_dict[\"answers\"][i][\"answer\"] not in unique_answers ): temp_dict =", "for doc in range(len(processed)): try: # print(\"\\n Checking for duplicate", "session_time = 60 # Session Timeout in Minutes app =", "if ( user_index_size == 208 ): # To check if", "+ str(processed[doc][\"meta\"][\"name\"])) break if add_doc: # print(\"\\n No duplicates found,", "in temp_dict: del temp_dict[key] else: output = [ {\"answer\": \"No", "in es\") processed = convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True ) for", "\"off\" else: user_settings[user_id_key][\"gpu\"] = \"on\" return jsonify({\"output\": \"gpu status changed\"})", "Answers found ..\", \"context\": \" \", \"meta\": \" \"}, ]", "( \"score\", \"probability\", \"offset_start\", \"offset_end\", \"document_id\", ) unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"]", "print(\"\\n Checking for duplicate docs ..\") add_doc = True for", "timeout redirection @app.route(\"/session_timeout\") def session_timeout(): return render_template(\"session_out.html\") # Handles removing", "session: user_id_key = session[\"user\"] if user_id_key in current_users: query_question =", "[ {\"answer\": \"No Answers found ..\", \"context\": \" \", \"meta\":", "\"context_file.txt\" and doc_name == \"context_file.txt\" ): # print(\"Deleting context file", "session time ends and deletes user id from dict def", "Change host = \"elasticsearch\" to build docker image user_settings[user_id] =", "es..\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) except Exception", "user id with time of login user_doc_store = dict() #", "str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") else: os.makedirs(\"users/\" + str(user_id))", "if \"user\" in session: user_id_key = session[\"user\"] if user_id_key in", "# Handles users w.r.t new session or already in session", "TransformersReader from elasticsearch import Elasticsearch es = ( Elasticsearch() )", "es_stats = es.indices.stats(index=\"user_\" + str(user_id_key)) user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if (", "return jsonify({\"output\": \"gpu status changed\"}) # Handles pre-trained model choice", "e: print(e) # print(\"\\n no documents in es\") processed_lst =", "str(user_id_key), ignore=[400, 404] ) except OSError as e: print(\"Error: %s", "= \"elasticsearch\" to build docker image user_settings[user_id] = { \"gpu\":", "\"context\": \" \", \"meta\": \" \"}, ] return jsonify({\"output\": output})", "\" + str(request.environ[\"REMOTE_ADDR\"]) ) if \"user\" in session and session[\"user\"]", "print(\"\\n User tracker thread started @ \", datetime.now()) while True:", "context_file.close() pre_process(user_id_key) return jsonify({\"output\": \"\" + text_context}) else: return render_template(\"session_out.html\")", "= \"illuin/camembert-base-fquad\" retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"] == \"on\": try:", "diff_time # For session timeout on client side return render_template(\"index.html\",", "os import threading import shutil from datetime import timedelta, datetime", "doc_type=\"_doc\", id=doc_id ) if processed[doc][\"text\"] == doc_text: # print(\"\\n There", ">= session_time: try: del current_users[user_id_key] del user_doc_store[user_id_key] del user_settings[user_id_key] shutil.rmtree(\"users/\"", "current_time = datetime.now() diff_min_sec = ( int(datetime.strftime(current_time, \"%M\")) - int(datetime.strftime(logged_on,", "file to update with new changes ..\") es.delete( index=\"user_\" +", "the context and uploads the pre-processed context to Elasticsearch #", "in unique_answers ): temp_dict = answers_dict[\"answers\"][i] remove = ( \"score\",", "user_settings[user_id_key][\"gpu\"] == \"on\": user_settings[user_id_key][\"gpu\"] = \"off\" else: user_settings[user_id_key][\"gpu\"] = \"on\"", "session, jsonify, url_for, redirect from haystack.document_store.elasticsearch import * from haystack.preprocessor.utils", "use_gpu=-1 ) else: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 )", "model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) else: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path,", "from haystack.preprocessor.cleaning import clean_wiki_text from haystack import Finder from haystack.retriever.sparse", "+ str(datetime.now()) + \" with IP: \" + str(request.environ[\"REMOTE_ADDR\"]) )", "after the session time ends and deletes user id from", "finder.get_answers( question=query_question, top_k_retriever=5, top_k_reader=5 ) unique_answers = list() output =", "Handles context documents uploads @app.route(\"/upload_file\", methods=[\"GET\", \"POST\"]) def upload_file(): global", "+ str(user_id_key)) user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if ( user_index_size == 208", "pre_process(user_id_key) return jsonify({\"output\": \"\" + text_context}) else: return render_template(\"session_out.html\") else:", "@app.route(\"/session_out\", methods=[\"POST\"]) def session_out(): session.pop(\"user\", None) return redirect(url_for(\"session_timeout\")) # Comment", "es = ( Elasticsearch() ) # Replace with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True)", "diff_sec = int(datetime.strftime(current_time, \"%S\")) - int( datetime.strftime(logged_on, \"%S\") ) diff_time", "dict, This works only when app tab is open until", "uploads @app.route(\"/upload_file\", methods=[\"GET\", \"POST\"]) def upload_file(): global current_users if \"user\"", "Handles removing of session identifier from session dict, This works", "redirect(url_for(\"session_timeout\")) # Handles context added through the textbox @app.route(\"/context\", methods=[\"POST\"])", "user_settings[user_id_key][\"model\"] == \"roberta\": model_path = ( \"deepset/roberta-base-squad2\" # Path of", "to es..\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) except", "duplicate, So this document is not added ..\") add_doc =", "are deleted from them temp folder named with user_id under", "in case of building a docker image or running on", "list() output = list() if len(answers_dict[\"answers\"]) > 0: for i", "in range(len(processed)): try: # print(\"\\n Checking for duplicate docs ..\")", "hosted in Hugging Face ) elif user_settings[user_id_key][\"model\"] == \"bert\": model_path", "os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") user_doc_store[user_id] =", "= ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"] == \"on\": try: reader = TransformersReader(", "import Flask, render_template, request, session, jsonify, url_for, redirect from haystack.document_store.elasticsearch", "add_doc: # print(\"\\n No duplicates found, so adding this to", "index=\"user_\" + str(user_id) ) # Change host = \"elasticsearch\" to", "+ str(user_id_key), doc_type=\"_doc\", id=doc_id ) if processed[doc][\"text\"] == doc_text: #", "= \"<KEY>\" app.permanent_session_lifetime = timedelta(minutes=session_time) user_id = 0 # User", "This works only when app tab is open until session", "split_paragraphs=True ) for doc in range(len(processed)): try: # print(\"\\n Checking", "= current_users[user_id_key] difference = current_time - user_time time_diff = divmod(", "request, session, jsonify, url_for, redirect from haystack.document_store.elasticsearch import * from", "= ( session_time * 60 ) - diff_time # For", "print(e) print(\"GPU not available. Inferencing on CPU\") reader = TransformersReader(", "index in Es is empty. 208 bytes is default index", "until session completes @app.route(\"/session_out\", methods=[\"POST\"]) def session_out(): session.pop(\"user\", None) return", "+ diff_sec time_left = ( session_time * 60 ) -", "w.r.t new session or already in session @app.route(\"/\") def home():", "current_users, session_time seconds_in_day = 24 * 60 * 60 print(\"\\n", "* 60 diff_sec = int(datetime.strftime(current_time, \"%S\")) - int( datetime.strftime(logged_on, \"%S\")", "user_doc_store[user_id_key] del user_settings[user_id_key] shutil.rmtree(\"users/\" + str(user_id_key)) es.indices.delete( index=\"user_\" + str(user_id_key),", "while True: for user_id_key in current_users.copy(): current_time = datetime.now() user_time", "so adding this to es..\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir", "not available. Inferencing on CPU\") reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path,", "e: print(\"Error: %s - %s.\" % (e.filename, e.strerror)) # print(\"\\n", "session: user_id_key = session[\"user\"] if user_id_key in current_users: for f", "\"users/\" + str(user_id_key) + \"/uploads/\" try: es_result = es.search( index=\"user_\"", "Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to build docker image session_time = 60 #", "print(\"\\n There is a duplicate, So this document is not", "from haystack.document_store.elasticsearch import * from haystack.preprocessor.utils import convert_files_to_dicts from haystack.preprocessor.cleaning", "print(\"GPU not available. Inferencing on CPU\") reader = TransformersReader( model_name_or_path=model_path,", "= [ {\"answer\": \"No Answers found ..\", \"context\": \" \",", "with \"user_{user_id}\" # Documents & textual context are deleted from", "str(user_id_key), doc_type=\"_doc\", id=doc_id ) if processed[doc][\"text\"] == doc_text: # print(\"\\n", "\" @\", datetime.now()) session_timer = threading.Thread(target=user_session_timer) session_timer.start() # Handles users", "timedelta(minutes=session_time) user_id = 0 # User ID to keep track", "of the models hosted in Hugging Face ) elif user_settings[user_id_key][\"model\"]", "= set_finder(user_id_key) answers_dict = finder.get_answers( question=query_question, top_k_retriever=5, top_k_reader=5 ) unique_answers", "import TransformersReader from elasticsearch import Elasticsearch es = ( Elasticsearch()", "diff_min_sec = ( int(datetime.strftime(current_time, \"%M\")) - int(datetime.strftime(logged_on, \"%M\")) ) *", "output = [ {\"answer\": \"No Answers found ..\", \"context\": \"", "# Provides extracted answers for the posted question @app.route(\"/question\", methods=[\"POST\"])", "str(user_id) + \"/uploads\") else: os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id)", "user_id # print(current_users) if not os.path.exists(\"users/\"): # Creating user temp", "in current_users: for f in request.files.getlist(\"file\"): f.save( os.path.join(\"users/\" + str(user_id_key)", "else: return render_template(\"session_out.html\") # Handles GPU setting changes. @app.route(\"/gpu\", methods=[\"POST\"])", "for user_id_key in current_users.copy(): current_time = datetime.now() user_time = current_users[user_id_key]", "from haystack.retriever.sparse import ElasticsearchRetriever from haystack.reader.transformers import TransformersReader from elasticsearch", "in session: user_id_key = session[\"user\"] if user_id_key in current_users: for", "from session dict, This works only when app tab is", ") no_docs = len(es_result[\"hits\"][\"hits\"]) except Exception as e: print(e) print(\"\\n", "user_settings[user_id_key] shutil.rmtree(\"users/\" + str(user_id_key)) es.indices.delete( index=\"user_\" + str(user_id_key), ignore=[400, 404]", "in current_users: user_id = session[\"user\"] logged_on = current_users[user_id] current_time =", "TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) finder = Finder(reader, retriever) return", "current_users: text_context = request.form[\"context\"] context_file = open( \"users/\" + str(user_id_key)", "return finder # Handles deletion of context data completely from", "host=\"localhost\", index=\"user_\" + str(user_id) ) # Change host = \"elasticsearch\"", "processed[doc][\"text\"] == doc_text: # print(\"\\n There is a duplicate, So", "user_id_key in current_users: query_question = request.form[\"question\"] es_stats = es.indices.stats(index=\"user_\" +", "after uploading to Es def pre_process(user_id_key): uploads_dir = \"users/\" +", "haystack.document_store.elasticsearch import * from haystack.preprocessor.utils import convert_files_to_dicts from haystack.preprocessor.cleaning import", "else: return redirect(url_for(\"session_timeout\")) # Provides extracted answers for the posted", "context and uploads the pre-processed context to Elasticsearch # Each", "%s - %s.\" % (e.filename, e.strerror)) # print(\"\\n Deleted user:\",", "id=doc_id ) if processed[doc][\"text\"] == doc_text: # print(\"\\n There is", "+ \" with IP: \" + str(request.environ[\"REMOTE_ADDR\"]) ) if \"user\"", "redirect from haystack.document_store.elasticsearch import * from haystack.preprocessor.utils import convert_files_to_dicts from", "os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") else: os.makedirs(\"users/\"", "session[\"user\"] if user_id_key in current_users: query_question = request.form[\"question\"] es_stats =", "user settings logged_on = current_users[user_id] current_time = datetime.now() diff_min_sec =", "TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=0 ) except Exception as e: print(e)", "user_time = current_users[user_id_key] difference = current_time - user_time time_diff =", "started @ \", datetime.now()) while True: for user_id_key in current_users.copy():", "in current_users: user_settings[user_id_key][\"model\"] = request.form[\"model\"] return jsonify({\"output\": \"model changed\"}) #", "top_k_retriever=5, top_k_reader=5 ) unique_answers = list() output = list() if", "= threading.Thread(target=user_session_timer) session_timer.start() # Handles users w.r.t new session or", "\"%M\")) ) * 60 diff_sec = int(datetime.strftime(current_time, \"%S\")) - int(", "Used to store user id with time of login user_doc_store", "time_left=time_left) # Handles context documents uploads @app.route(\"/upload_file\", methods=[\"GET\", \"POST\"]) def", "str(user_id_key), body={\"query\": {\"match_all\": {}}} ) no_docs = len(es_result[\"hits\"][\"hits\"]) except Exception", "except Exception as e: print(e) print(\"GPU not available. Inferencing on", "For session timeout on client side return render_template(\"index.html\", time_left=time_left) #", "without docs return jsonify({\"error\": \"add context\"}) finder = set_finder(user_id_key) answers_dict", "output = list() if len(answers_dict[\"answers\"]) > 0: for i in", "] return jsonify({\"output\": output}) else: return render_template(\"session_out.html\") # Handles GPU", "# User settings for GPU and Pre-trained models choice #", "time ends and deletes user id from dict def user_session_timer():", "set_finder(user_id_key): if user_settings[user_id_key][\"model\"] == \"roberta\": model_path = ( \"deepset/roberta-base-squad2\" #", "settings logged_on = current_users[user_id] current_time = datetime.now() diff_min_sec = (", "print(e) print(\"\\n no documents in es\") processed = convert_files_to_dicts( dir_path=uploads_dir,", "User tracker thread started @ \", datetime.now()) while True: for", "dict def user_session_timer(): global current_users, session_time seconds_in_day = 24 *", "add_doc = False os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) break if add_doc: #", "\"document_id\", ) unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"] == \"context_file.txt\": temp_dict[\"meta\"][\"name\"] = \"Textual", "methods=[\"GET\", \"POST\"]) def upload_file(): global current_users if \"user\" in session:", ") pre_process(user_id_key) return render_template(\"index.html\") else: return redirect(url_for(\"session_timeout\")) else: return redirect(url_for(\"session_timeout\"))", "with a separate Elasticsearch index starting with \"user_{user_id}\" # Documents", "Documents & textual context are deleted from them temp folder", "global user_id, current_users, session_time logging.info( \"User connected at \" +", "\"No Answers found ..\", \"context\": \" \", \"meta\": \" \"},", "session dict, This works only when app tab is open", "208 bytes is default index size without docs return jsonify({\"error\":", "= [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) # Handles setting up", "- int(datetime.strftime(logged_on, \"%M\")) ) * 60 diff_sec = int(datetime.strftime(current_time, \"%S\"))", "True: for user_id_key in current_users.copy(): current_time = datetime.now() user_time =", "to keep track w.r.t sessions and context data current_users =", "No duplicates found, so adding this to es..\") processed_lst =", "404] ) except OSError as e: print(\"Error: %s - %s.\"", "Deleted user:\", user_id_key, \" @\", datetime.now()) session_timer = threading.Thread(target=user_session_timer) session_timer.start()", ") # Change host = \"elasticsearch\" to build docker image", "context added through the textbox @app.route(\"/context\", methods=[\"POST\"]) def context(): if", "# Replace with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to build docker image session_time", "context documents uploads @app.route(\"/upload_file\", methods=[\"GET\", \"POST\"]) def upload_file(): global current_users", "Pre-trained models choice # Handles pre-processing the context and uploads", "str(user_id_key)) user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if ( user_index_size == 208 ):", "models(): if \"user\" in session: user_id_key = session[\"user\"] if user_id_key", "on CPU\") reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) else:", "is a duplicate, So this document is not added ..\")", "id with time of login user_doc_store = dict() # Document", "import Finder from haystack.retriever.sparse import ElasticsearchRetriever from haystack.reader.transformers import TransformersReader", "processed = convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True ) for doc in", "to update with new changes ..\") es.delete( index=\"user_\" + str(user_id_key),", ") else: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) finder", "import shutil from datetime import timedelta, datetime from flask import", "current_users if \"user\" in session: user_id_key = session[\"user\"] if user_id_key", "seconds_in_day + difference.seconds, 60 ) if time_diff[0] >= session_time: try:", "+ str(request.environ[\"REMOTE_ADDR\"]) ) if \"user\" in session and session[\"user\"] in", "user_id_key = session[\"user\"] if user_id_key in current_users: if user_settings[user_id_key][\"gpu\"] ==", "\"on\" return jsonify({\"output\": \"gpu status changed\"}) # Handles pre-trained model", "except Exception as e: print(e) print(\"\\n no documents in es\")", "image or running on WSGI server like gunicorn if __name__", "setting changes. @app.route(\"/gpu\", methods=[\"POST\"]) def gpu(): if \"user\" in session:", "jsonify({\"output\": \"gpu status changed\"}) # Handles pre-trained model choice setting", "uploads the pre-processed context to Elasticsearch # Each user is", "if not os.path.exists(\"users/\"): # Creating user temp dir for uploading", "in Hugging Face ) elif user_settings[user_id_key][\"model\"] == \"bert\": model_path =", "current_users[user_id_key] difference = current_time - user_time time_diff = divmod( difference.days", "Finder from haystack.retriever.sparse import ElasticsearchRetriever from haystack.reader.transformers import TransformersReader from", "jsonify, url_for, redirect from haystack.document_store.elasticsearch import * from haystack.preprocessor.utils import", "0 # User ID to keep track w.r.t sessions and", "question @app.route(\"/question\", methods=[\"POST\"]) def question(): if \"user\" in session: user_id_key", "Document store object of the user id user_settings = dict()", "\"/uploads/\" try: es_result = es.search( index=\"user_\" + str(user_id_key), body={\"query\": {\"match_all\":", "difference.days * seconds_in_day + difference.seconds, 60 ) if time_diff[0] >=", "Elasticsearch index starting with \"user_{user_id}\" # Documents & textual context", "image session_time = 60 # Session Timeout in Minutes app", "= es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if ( processed[doc][\"meta\"][\"name\"] == \"context_file.txt\" and doc_name ==", "side return render_template(\"index.html\", time_left=time_left) else: session.permanent = True current_time =", "dict() # Document store object of the user id user_settings", "render_template(\"session_out.html\") else: return redirect(url_for(\"session_timeout\")) # Provides extracted answers for the", "temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for key in remove: if key in temp_dict:", "no documents in es\") processed = convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True", "# Handles session timeout redirection @app.route(\"/session_timeout\") def session_timeout(): return render_template(\"session_out.html\")", "= user_id # print(current_users) if not os.path.exists(\"users/\"): # Creating user", "== \"on\": try: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=0 )", "range(len(processed)): try: # print(\"\\n Checking for duplicate docs ..\") add_doc", "haystack.retriever.sparse import ElasticsearchRetriever from haystack.reader.transformers import TransformersReader from elasticsearch import", "= list() if len(answers_dict[\"answers\"]) > 0: for i in range(len(answers_dict[\"answers\"])):", "convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True ) for doc in range(len(processed)): try:", "# Used to store user id with time of login", "as e: print(e) print(\"\\n no documents in es\") processed =", "at \" + str(datetime.now()) + \" with IP: \" +", "a separate Elasticsearch index starting with \"user_{user_id}\" # Documents &", "datetime.now() user_id += 1 current_users[user_id] = current_time session[\"user\"] = user_id", "verify_certs=True) to build docker image session_time = 60 # Session", "docs ..\") add_doc = True for each_doc in range(no_docs): doc_text", "str(user_id_key)) es.indices.delete( index=\"user_\" + str(user_id_key), ignore=[400, 404] ) except OSError", "if user_id_key in current_users: for f in request.files.getlist(\"file\"): f.save( os.path.join(\"users/\"", "Handles pre-trained model choice setting changes. @app.route(\"/models\", methods=[\"POST\"]) def models():", "session[\"user\"] if user_id_key in current_users: for f in request.files.getlist(\"file\"): f.save(", "for f in request.files.getlist(\"file\"): f.save( os.path.join(\"users/\" + str(user_id_key) + \"/uploads\",", "no_docs = len(es_result[\"hits\"][\"hits\"]) except Exception as e: print(e) print(\"\\n no", "question(): if \"user\" in session: user_id_key = session[\"user\"] if user_id_key", "for each_doc in range(no_docs): doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"]", "( session_time * 60 ) - diff_time # For session", "docs return jsonify({\"error\": \"add context\"}) finder = set_finder(user_id_key) answers_dict =", "output.append(temp_dict) for key in remove: if key in temp_dict: del", "= Flask(__name__) app.secret_key = \"<KEY>\" app.permanent_session_lifetime = timedelta(minutes=session_time) user_id =", ") for doc in range(len(processed)): try: # print(\"\\n Checking for", "+ text_context}) else: return render_template(\"session_out.html\") else: return redirect(url_for(\"session_timeout\")) # Provides", "doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if ( processed[doc][\"meta\"][\"name\"] ==", "them temp folder named with user_id under users dir after", "if key in temp_dict: del temp_dict[key] else: output = [", "user_settings[user_id_key][\"gpu\"] = \"off\" else: user_settings[user_id_key][\"gpu\"] = \"on\" return jsonify({\"output\": \"gpu", "difference = current_time - user_time time_diff = divmod( difference.days *", "Hugging Face ) elif user_settings[user_id_key][\"model\"] == \"bert\": model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\"", "str(user_id_key) + \"/uploads/\" try: es_result = es.search( index=\"user_\" + str(user_id_key),", "global current_users, session_time seconds_in_day = 24 * 60 * 60", "in current_users.copy(): current_time = datetime.now() user_time = current_users[user_id_key] difference =", "try: del current_users[user_id_key] del user_doc_store[user_id_key] del user_settings[user_id_key] shutil.rmtree(\"users/\" + str(user_id_key))", "user_id_key in current_users: user_settings[user_id_key][\"model\"] = request.form[\"model\"] return jsonify({\"output\": \"model changed\"})", "changed\"}) # Handles session timeout redirection @app.route(\"/session_timeout\") def session_timeout(): return", "is open until session completes @app.route(\"/session_out\", methods=[\"POST\"]) def session_out(): session.pop(\"user\",", "Handles GPU setting changes. @app.route(\"/gpu\", methods=[\"POST\"]) def gpu(): if \"user\"", "elif user_settings[user_id_key][\"model\"] == \"distilbert\": model_path = \"distilbert-base-uncased-distilled-squad\" else: model_path =", "reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) else: reader =", "methods=[\"POST\"]) def session_out(): session.pop(\"user\", None) return redirect(url_for(\"session_timeout\")) # Comment the", "bytes is default index size without docs return jsonify({\"error\": \"add", "import os import threading import shutil from datetime import timedelta,", "> 0: for i in range(len(answers_dict[\"answers\"])): if ( answers_dict[\"answers\"][i][\"answer\"] is", "Session Timeout in Minutes app = Flask(__name__) app.secret_key = \"<KEY>\"", "and doc_name == \"context_file.txt\" ): # print(\"Deleting context file to", "from flask import Flask, render_template, request, session, jsonify, url_for, redirect", "= int(datetime.strftime(current_time, \"%S\")) - int( datetime.strftime(logged_on, \"%S\") ) diff_time =", "user_time time_diff = divmod( difference.days * seconds_in_day + difference.seconds, 60", "# Handles context added through the textbox @app.route(\"/context\", methods=[\"POST\"]) def", "of context data completely from the server after the session", "and uploads the pre-processed context to Elasticsearch # Each user", "( \"deepset/roberta-base-squad2\" # Path of the models hosted in Hugging", "question=query_question, top_k_retriever=5, top_k_reader=5 ) unique_answers = list() output = list()", "len(es_result[\"hits\"][\"hits\"]) except Exception as e: print(e) print(\"\\n no documents in", "return redirect(url_for(\"session_timeout\")) # Provides extracted answers for the posted question", "top_k_reader=5 ) unique_answers = list() output = list() if len(answers_dict[\"answers\"])", "add_doc = True for each_doc in range(no_docs): doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"]", "int( datetime.strftime(logged_on, \"%S\") ) diff_time = diff_min_sec + diff_sec time_left", "for i in range(len(answers_dict[\"answers\"])): if ( answers_dict[\"answers\"][i][\"answer\"] is not None", "try: es_result = es.search( index=\"user_\" + str(user_id_key), body={\"query\": {\"match_all\": {}}}", "documents in es\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"]))", "\"meta\": \" \"}, ] return jsonify({\"output\": output}) else: return render_template(\"session_out.html\")", "Es def pre_process(user_id_key): uploads_dir = \"users/\" + str(user_id_key) + \"/uploads/\"", "convert_files_to_dicts from haystack.preprocessor.cleaning import clean_wiki_text from haystack import Finder from", "es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if ( processed[doc][\"meta\"][\"name\"] == \"context_file.txt\" and doc_name == \"context_file.txt\"", "docker image user_settings[user_id] = { \"gpu\": \"off\", \"model\": \"roberta\", }", "Minutes app = Flask(__name__) app.secret_key = \"<KEY>\" app.permanent_session_lifetime = timedelta(minutes=session_time)", "from haystack.preprocessor.utils import convert_files_to_dicts from haystack.preprocessor.cleaning import clean_wiki_text from haystack", "or already in session @app.route(\"/\") def home(): global user_id, current_users,", "context data current_users = dict() # Used to store user", "= datetime.now() diff_min_sec = ( int(datetime.strftime(current_time, \"%M\")) - int(datetime.strftime(logged_on, \"%M\"))", "+ str(user_id_key) + \"/uploads/\" try: es_result = es.search( index=\"user_\" +", "os.path.join(\"users/\" + str(user_id_key) + \"/uploads\", f.filename) ) pre_process(user_id_key) return render_template(\"index.html\")", "{\"answer\": \"No Answers found ..\", \"context\": \" \", \"meta\": \"", "for GPU and Pre-trained models choice # Handles pre-processing the", "= session[\"user\"] if user_id_key in current_users: user_settings[user_id_key][\"model\"] = request.form[\"model\"] return", "return redirect(url_for(\"session_timeout\")) # Comment the below block in case of", "deletes user id from dict def user_session_timer(): global current_users, session_time", "Creating user temp dir for uploading context os.makedirs(\"users/\" + str(user_id))", "user_settings[user_id_key][\"gpu\"] = \"on\" return jsonify({\"output\": \"gpu status changed\"}) # Handles", "datetime.strftime(logged_on, \"%S\") ) diff_time = diff_min_sec + diff_sec time_left =", "def gpu(): if \"user\" in session: user_id_key = session[\"user\"] if", "text_context = request.form[\"context\"] context_file = open( \"users/\" + str(user_id_key) +", "+ str(user_id) + \"/uploads\") user_doc_store[user_id] = ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\" +", "else: return redirect(url_for(\"session_timeout\")) else: return redirect(url_for(\"session_timeout\")) # Handles context added", "def context(): if \"user\" in session: user_id_key = session[\"user\"] if", "diff_sec time_left = ( session_time * 60 ) - diff_time", "= True current_time = datetime.now() user_id += 1 current_users[user_id] =", "session timeout on client side return render_template(\"index.html\", time_left=time_left) # Handles", "is empty. 208 bytes is default index size without docs", "request.form[\"context\"] context_file = open( \"users/\" + str(user_id_key) + \"/uploads/context_file.txt\", \"w\"", "not in unique_answers ): temp_dict = answers_dict[\"answers\"][i] remove = (", "\"score\", \"probability\", \"offset_start\", \"offset_end\", \"document_id\", ) unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"] ==", "session_time seconds_in_day = 24 * 60 * 60 print(\"\\n User", "= session[\"user\"] logged_on = current_users[user_id] current_time = datetime.now() diff_min_sec =", "( user_index_size == 208 ): # To check if index", "= list() output = list() if len(answers_dict[\"answers\"]) > 0: for", "@\", datetime.now()) session_timer = threading.Thread(target=user_session_timer) session_timer.start() # Handles users w.r.t", "es\") processed = convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True ) for doc", "elasticsearch import Elasticsearch es = ( Elasticsearch() ) # Replace", "if temp_dict[\"meta\"][\"name\"] == \"context_file.txt\": temp_dict[\"meta\"][\"name\"] = \"Textual Context\" temp_dict[\"meta\"] =", "id user_settings = dict() # User settings for GPU and", "Handles deletion of context data completely from the server after", "divmod( difference.days * seconds_in_day + difference.seconds, 60 ) if time_diff[0]", "to Elasticsearch # Each user is assigned with a separate", "print(\"\\n no documents in es\") processed = convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text,", "= es.search( index=\"user_\" + str(user_id_key), body={\"query\": {\"match_all\": {}}} ) no_docs", "Path of the models hosted in Hugging Face ) elif", "import Elasticsearch es = ( Elasticsearch() ) # Replace with", "app.permanent_session_lifetime = timedelta(minutes=session_time) user_id = 0 # User ID to", "setting up reader and retriever def set_finder(user_id_key): if user_settings[user_id_key][\"model\"] ==", "\"distilbert\": model_path = \"distilbert-base-uncased-distilled-squad\" else: model_path = \"illuin/camembert-base-fquad\" retriever =", "users w.r.t new session or already in session @app.route(\"/\") def", "output}) else: return render_template(\"session_out.html\") # Handles GPU setting changes. @app.route(\"/gpu\",", "f in request.files.getlist(\"file\"): f.save( os.path.join(\"users/\" + str(user_id_key) + \"/uploads\", f.filename)", "if user_id_key in current_users: text_context = request.form[\"context\"] context_file = open(", "text_context}) else: return render_template(\"session_out.html\") else: return redirect(url_for(\"session_timeout\")) # Provides extracted", ") except OSError as e: print(\"Error: %s - %s.\" %", "@ \", datetime.now()) while True: for user_id_key in current_users.copy(): current_time", "range(no_docs): doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"]", "user:\", user_id_key, \" @\", datetime.now()) session_timer = threading.Thread(target=user_session_timer) session_timer.start() #", "# print(current_users) if not os.path.exists(\"users/\"): # Creating user temp dir", "timeout on client side return render_template(\"index.html\", time_left=time_left) else: session.permanent =", "on WSGI server like gunicorn if __name__ == \"__main__\": app.run(host=\"0.0.0.0\")", "\"add context\"}) finder = set_finder(user_id_key) answers_dict = finder.get_answers( question=query_question, top_k_retriever=5,", "unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"] == \"context_file.txt\": temp_dict[\"meta\"][\"name\"] = \"Textual Context\" temp_dict[\"meta\"]", "haystack.reader.transformers import TransformersReader from elasticsearch import Elasticsearch es = (", "else: return render_template(\"session_out.html\") else: return redirect(url_for(\"session_timeout\")) # Provides extracted answers", "changes. @app.route(\"/gpu\", methods=[\"POST\"]) def gpu(): if \"user\" in session: user_id_key", "diff_time = diff_min_sec + diff_sec time_left = ( session_time *", ") elif user_settings[user_id_key][\"model\"] == \"bert\": model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"]", "return jsonify({\"output\": \"model changed\"}) # Handles session timeout redirection @app.route(\"/session_timeout\")", "client side return render_template(\"index.html\", time_left=time_left) # Handles context documents uploads", "\"User connected at \" + str(datetime.now()) + \" with IP:", "208 ): # To check if index in Es is", "\"off\", \"model\": \"roberta\", } # Initial user settings logged_on =", "current_users[user_id] current_time = datetime.now() diff_min_sec = ( int(datetime.strftime(current_time, \"%M\")) -", "if processed[doc][\"text\"] == doc_text: # print(\"\\n There is a duplicate,", "session[\"user\"] if user_id_key in current_users: text_context = request.form[\"context\"] context_file =", "= open( \"users/\" + str(user_id_key) + \"/uploads/context_file.txt\", \"w\" ) context_file.write(text_context)", "keep track w.r.t sessions and context data current_users = dict()", "key in remove: if key in temp_dict: del temp_dict[key] else:", "jsonify({\"output\": output}) else: return render_template(\"session_out.html\") # Handles GPU setting changes.", "user_id, current_users, session_time logging.info( \"User connected at \" + str(datetime.now())", "es.indices.delete( index=\"user_\" + str(user_id_key), ignore=[400, 404] ) except OSError as", "\"context_file.txt\": temp_dict[\"meta\"][\"name\"] = \"Textual Context\" temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for", "store object of the user id user_settings = dict() #", "user_id under users dir after uploading to Es def pre_process(user_id_key):", "else: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) finder =", "request.form[\"model\"] return jsonify({\"output\": \"model changed\"}) # Handles session timeout redirection", "body={\"query\": {\"match_all\": {}}} ) no_docs = len(es_result[\"hits\"][\"hits\"]) except Exception as", "60 diff_sec = int(datetime.strftime(current_time, \"%S\")) - int( datetime.strftime(logged_on, \"%S\") )", "and answers_dict[\"answers\"][i][\"answer\"] not in unique_answers ): temp_dict = answers_dict[\"answers\"][i] remove", "# Change host = \"elasticsearch\" to build docker image user_settings[user_id]", "session_time logging.info( \"User connected at \" + str(datetime.now()) + \"", "= True for each_doc in range(no_docs): doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name", "temp_dict = answers_dict[\"answers\"][i] remove = ( \"score\", \"probability\", \"offset_start\", \"offset_end\",", "to Es def pre_process(user_id_key): uploads_dir = \"users/\" + str(user_id_key) +", "user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) except Exception as e: print(e) #", ") unique_answers = list() output = list() if len(answers_dict[\"answers\"]) >", "current_time session[\"user\"] = user_id # print(current_users) if not os.path.exists(\"users/\"): #", "separate Elasticsearch index starting with \"user_{user_id}\" # Documents & textual", "try: # print(\"\\n Checking for duplicate docs ..\") add_doc =", "the models hosted in Hugging Face ) elif user_settings[user_id_key][\"model\"] ==", "== doc_text: # print(\"\\n There is a duplicate, So this", "if user_id_key in current_users: user_settings[user_id_key][\"model\"] = request.form[\"model\"] return jsonify({\"output\": \"model", "dir after uploading to Es def pre_process(user_id_key): uploads_dir = \"users/\"", "\"Textual Context\" temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for key in remove:", "user id user_settings = dict() # User settings for GPU", "index=\"user_\" + str(user_id_key), doc_type=\"_doc\", id=doc_id ) if processed[doc][\"text\"] == doc_text:", "time of login user_doc_store = dict() # Document store object", "use_gpu=0 ) except Exception as e: print(e) print(\"GPU not available.", "Replace with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to build docker image session_time =", "+ str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") else: os.makedirs(\"users/\" +", "on client side return render_template(\"index.html\", time_left=time_left) # Handles context documents", "on client side return render_template(\"index.html\", time_left=time_left) else: session.permanent = True", ") * 60 diff_sec = int(datetime.strftime(current_time, \"%S\")) - int( datetime.strftime(logged_on,", "= es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if (", "of the user id user_settings = dict() # User settings", "user_id_key = session[\"user\"] if user_id_key in current_users: query_question = request.form[\"question\"]", "import threading import shutil from datetime import timedelta, datetime from", "new changes ..\") es.delete( index=\"user_\" + str(user_id_key), doc_type=\"_doc\", id=doc_id )", "IP: \" + str(request.environ[\"REMOTE_ADDR\"]) ) if \"user\" in session and", "jsonify({\"output\": \"model changed\"}) # Handles session timeout redirection @app.route(\"/session_timeout\") def", "if len(answers_dict[\"answers\"]) > 0: for i in range(len(answers_dict[\"answers\"])): if (", "= \"distilbert-base-uncased-distilled-squad\" else: model_path = \"illuin/camembert-base-fquad\" retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if", "with IP: \" + str(request.environ[\"REMOTE_ADDR\"]) ) if \"user\" in session", "* seconds_in_day + difference.seconds, 60 ) if time_diff[0] >= session_time:", "session_time: try: del current_users[user_id_key] del user_doc_store[user_id_key] del user_settings[user_id_key] shutil.rmtree(\"users/\" +", "\"<KEY>\" app.permanent_session_lifetime = timedelta(minutes=session_time) user_id = 0 # User ID", "render_template(\"index.html\", time_left=time_left) else: session.permanent = True current_time = datetime.now() user_id", "client side return render_template(\"index.html\", time_left=time_left) else: session.permanent = True current_time", "settings for GPU and Pre-trained models choice # Handles pre-processing", "( Elasticsearch() ) # Replace with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to build", "): # To check if index in Es is empty.", "doc in range(len(processed)): try: # print(\"\\n Checking for duplicate docs", "= request.form[\"question\"] es_stats = es.indices.stats(index=\"user_\" + str(user_id_key)) user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"]", "return render_template(\"session_out.html\") # Handles removing of session identifier from session", "+= 1 current_users[user_id] = current_time session[\"user\"] = user_id # print(current_users)", "context os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") else:", "def home(): global user_id, current_users, session_time logging.info( \"User connected at", "index starting with \"user_{user_id}\" # Documents & textual context are", "= \"off\" else: user_settings[user_id_key][\"gpu\"] = \"on\" return jsonify({\"output\": \"gpu status", "# print(\"\\n There is a duplicate, So this document is", "temp folder named with user_id under users dir after uploading", "str(processed[doc][\"meta\"][\"name\"])) # Handles setting up reader and retriever def set_finder(user_id_key):", "Timeout in Minutes app = Flask(__name__) app.secret_key = \"<KEY>\" app.permanent_session_lifetime", "session_timer = threading.Thread(target=user_session_timer) session_timer.start() # Handles users w.r.t new session", "not os.path.exists(\"users/\"): # Creating user temp dir for uploading context", "try: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=0 ) except Exception", "del temp_dict[key] else: output = [ {\"answer\": \"No Answers found", "block in case of building a docker image or running", "print(current_users) if not os.path.exists(\"users/\"): # Creating user temp dir for", "query_question = request.form[\"question\"] es_stats = es.indices.stats(index=\"user_\" + str(user_id_key)) user_index_size =", "doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if", "= \"Textual Context\" temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for key in", "seconds_in_day = 24 * 60 * 60 print(\"\\n User tracker", "removing of session identifier from session dict, This works only", "sessions and context data current_users = dict() # Used to", "\"offset_start\", \"offset_end\", \"document_id\", ) unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"] == \"context_file.txt\": temp_dict[\"meta\"][\"name\"]", "context file to update with new changes ..\") es.delete( index=\"user_\"", "model_path = \"distilbert-base-uncased-distilled-squad\" else: model_path = \"illuin/camembert-base-fquad\" retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key])", "finder = set_finder(user_id_key) answers_dict = finder.get_answers( question=query_question, top_k_retriever=5, top_k_reader=5 )", "deleted from them temp folder named with user_id under users", "retriever def set_finder(user_id_key): if user_settings[user_id_key][\"model\"] == \"roberta\": model_path = (", "current_time - user_time time_diff = divmod( difference.days * seconds_in_day +", "docker image session_time = 60 # Session Timeout in Minutes", ") diff_time = diff_min_sec + diff_sec time_left = ( session_time", "request.files.getlist(\"file\"): f.save( os.path.join(\"users/\" + str(user_id_key) + \"/uploads\", f.filename) ) pre_process(user_id_key)", "w.r.t sessions and context data current_users = dict() # Used", "default index size without docs return jsonify({\"error\": \"add context\"}) finder", "def session_out(): session.pop(\"user\", None) return redirect(url_for(\"session_timeout\")) # Comment the below", "[processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) # Handles setting up reader", "processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) except Exception as", "del current_users[user_id_key] del user_doc_store[user_id_key] del user_settings[user_id_key] shutil.rmtree(\"users/\" + str(user_id_key)) es.indices.delete(", "@app.route(\"/\") def home(): global user_id, current_users, session_time logging.info( \"User connected", "os.makedirs(\"users/\" + str(user_id) + \"/uploads\") else: os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\"", "+ str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") user_doc_store[user_id] = ElasticsearchDocumentStore(", "# Handles pre-trained model choice setting changes. @app.route(\"/models\", methods=[\"POST\"]) def", "\"offset_end\", \"document_id\", ) unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"] == \"context_file.txt\": temp_dict[\"meta\"][\"name\"] =", "in Minutes app = Flask(__name__) app.secret_key = \"<KEY>\" app.permanent_session_lifetime =", "session_out(): session.pop(\"user\", None) return redirect(url_for(\"session_timeout\")) # Comment the below block", "= ( \"deepset/roberta-base-squad2\" # Path of the models hosted in", "+ str(user_id_key) + \"/uploads\", f.filename) ) pre_process(user_id_key) return render_template(\"index.html\") else:", "== 208 ): # To check if index in Es", "jsonify({\"error\": \"add context\"}) finder = set_finder(user_id_key) answers_dict = finder.get_answers( question=query_question,", "\" with IP: \" + str(request.environ[\"REMOTE_ADDR\"]) ) if \"user\" in", "ElasticsearchRetriever from haystack.reader.transformers import TransformersReader from elasticsearch import Elasticsearch es", "case of building a docker image or running on WSGI", "to store user id with time of login user_doc_store =", "es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if ( processed[doc][\"meta\"][\"name\"] == \"context_file.txt\" and", "# Path of the models hosted in Hugging Face )", "return jsonify({\"output\": \"\" + text_context}) else: return render_template(\"session_out.html\") else: return", "model_name_or_path=model_path, tokenizer=model_path, use_gpu=0 ) except Exception as e: print(e) print(\"GPU", "and Pre-trained models choice # Handles pre-processing the context and", "( answers_dict[\"answers\"][i][\"answer\"] is not None and answers_dict[\"answers\"][i][\"answer\"] not in unique_answers", "track w.r.t sessions and context data current_users = dict() #", "else: return redirect(url_for(\"session_timeout\")) # Handles context added through the textbox", "Handles setting up reader and retriever def set_finder(user_id_key): if user_settings[user_id_key][\"model\"]", "new session or already in session @app.route(\"/\") def home(): global", "and session[\"user\"] in current_users: user_id = session[\"user\"] logged_on = current_users[user_id]", "\"user\" in session: user_id_key = session[\"user\"] if user_id_key in current_users:", "uploading context os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\")", "choice # Handles pre-processing the context and uploads the pre-processed", "in range(no_docs): doc_text = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id =", "if add_doc: # print(\"\\n No duplicates found, so adding this", "jsonify({\"output\": \"\" + text_context}) else: return render_template(\"session_out.html\") else: return redirect(url_for(\"session_timeout\"))", "in session @app.route(\"/\") def home(): global user_id, current_users, session_time logging.info(", "current_users = dict() # Used to store user id with", "the pre-processed context to Elasticsearch # Each user is assigned", "home(): global user_id, current_users, session_time logging.info( \"User connected at \"", "in session and session[\"user\"] in current_users: user_id = session[\"user\"] logged_on", "60 ) - diff_time # For session timeout on client", "@app.route(\"/models\", methods=[\"POST\"]) def models(): if \"user\" in session: user_id_key =", "== \"distilbert\": model_path = \"distilbert-base-uncased-distilled-squad\" else: model_path = \"illuin/camembert-base-fquad\" retriever", "print(\"\\n No duplicates found, so adding this to es..\") processed_lst", "dict() # User settings for GPU and Pre-trained models choice", "model_path = ( \"deepset/roberta-base-squad2\" # Path of the models hosted", "print(\"Deleting context file to update with new changes ..\") es.delete(", "starting with \"user_{user_id}\" # Documents & textual context are deleted", "= es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if ( processed[doc][\"meta\"][\"name\"] == \"context_file.txt\"", "of building a docker image or running on WSGI server", "Flask(__name__) app.secret_key = \"<KEY>\" app.permanent_session_lifetime = timedelta(minutes=session_time) user_id = 0", "redirect(url_for(\"session_timeout\")) # Provides extracted answers for the posted question @app.route(\"/question\",", "app.secret_key = \"<KEY>\" app.permanent_session_lifetime = timedelta(minutes=session_time) user_id = 0 #", "ignore=[400, 404] ) except OSError as e: print(\"Error: %s -", "posted question @app.route(\"/question\", methods=[\"POST\"]) def question(): if \"user\" in session:", "break if add_doc: # print(\"\\n No duplicates found, so adding", "url_for, redirect from haystack.document_store.elasticsearch import * from haystack.preprocessor.utils import convert_files_to_dicts", "model_path = \"illuin/camembert-base-fquad\" retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"] == \"on\":", "current_users: user_settings[user_id_key][\"model\"] = request.form[\"model\"] return jsonify({\"output\": \"model changed\"}) # Handles", "{}}} ) no_docs = len(es_result[\"hits\"][\"hits\"]) except Exception as e: print(e)", "+ str(user_id) ) # Change host = \"elasticsearch\" to build", "\" \"}, ] return jsonify({\"output\": output}) else: return render_template(\"session_out.html\") #", "So this document is not added ..\") add_doc = False", "f.filename) ) pre_process(user_id_key) return render_template(\"index.html\") else: return redirect(url_for(\"session_timeout\")) else: return", "app tab is open until session completes @app.route(\"/session_out\", methods=[\"POST\"]) def", "0: for i in range(len(answers_dict[\"answers\"])): if ( answers_dict[\"answers\"][i][\"answer\"] is not", "open until session completes @app.route(\"/session_out\", methods=[\"POST\"]) def session_out(): session.pop(\"user\", None)", "session_time * 60 ) - diff_time # For session timeout", "if user_id_key in current_users: query_question = request.form[\"question\"] es_stats = es.indices.stats(index=\"user_\"", "extracted answers for the posted question @app.route(\"/question\", methods=[\"POST\"]) def question():", "model choice setting changes. @app.route(\"/models\", methods=[\"POST\"]) def models(): if \"user\"", "the textbox @app.route(\"/context\", methods=[\"POST\"]) def context(): if \"user\" in session:", "building a docker image or running on WSGI server like", ") if processed[doc][\"text\"] == doc_text: # print(\"\\n There is a", "else: os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") user_doc_store[user_id]", "= False os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) break if add_doc: # print(\"\\n", "deletion of context data completely from the server after the", "session[\"user\"] logged_on = current_users[user_id] current_time = datetime.now() diff_min_sec = (", "+ str(user_id_key), body={\"query\": {\"match_all\": {}}} ) no_docs = len(es_result[\"hits\"][\"hits\"]) except", "clean_wiki_text from haystack import Finder from haystack.retriever.sparse import ElasticsearchRetriever from", "24 * 60 * 60 print(\"\\n User tracker thread started", "user_doc_store = dict() # Document store object of the user", "# Handles removing of session identifier from session dict, This", "completely from the server after the session time ends and", "the posted question @app.route(\"/question\", methods=[\"POST\"]) def question(): if \"user\" in", "user_settings[user_id] = { \"gpu\": \"off\", \"model\": \"roberta\", } # Initial", "Flask, render_template, request, session, jsonify, url_for, redirect from haystack.document_store.elasticsearch import", "{\"match_all\": {}}} ) no_docs = len(es_result[\"hits\"][\"hits\"]) except Exception as e:", "session timeout redirection @app.route(\"/session_timeout\") def session_timeout(): return render_template(\"session_out.html\") # Handles", "in current_users: text_context = request.form[\"context\"] context_file = open( \"users/\" +", "global current_users if \"user\" in session: user_id_key = session[\"user\"] if", "haystack import Finder from haystack.retriever.sparse import ElasticsearchRetriever from haystack.reader.transformers import", "* 60 print(\"\\n User tracker thread started @ \", datetime.now())", "session: user_id_key = session[\"user\"] if user_id_key in current_users: user_settings[user_id_key][\"model\"] =", "or running on WSGI server like gunicorn if __name__ ==", "Exception as e: print(e) # print(\"\\n no documents in es\")", "OSError as e: print(\"Error: %s - %s.\" % (e.filename, e.strerror))", "= len(es_result[\"hits\"][\"hits\"]) except Exception as e: print(e) print(\"\\n no documents", "context\"}) finder = set_finder(user_id_key) answers_dict = finder.get_answers( question=query_question, top_k_retriever=5, top_k_reader=5", "and context data current_users = dict() # Used to store", "+ \"/uploads/\" try: es_result = es.search( index=\"user_\" + str(user_id_key), body={\"query\":", "Handles session timeout redirection @app.route(\"/session_timeout\") def session_timeout(): return render_template(\"session_out.html\") #", "user_id_key in current_users: text_context = request.form[\"context\"] context_file = open( \"users/\"", "# Handles deletion of context data completely from the server", "for duplicate docs ..\") add_doc = True for each_doc in", "session.permanent = True current_time = datetime.now() user_id += 1 current_users[user_id]", "1 current_users[user_id] = current_time session[\"user\"] = user_id # print(current_users) if", "def session_timeout(): return render_template(\"session_out.html\") # Handles removing of session identifier", "pre-trained model choice setting changes. @app.route(\"/models\", methods=[\"POST\"]) def models(): if", "es.search( index=\"user_\" + str(user_id_key), body={\"query\": {\"match_all\": {}}} ) no_docs =", "in range(len(answers_dict[\"answers\"])): if ( answers_dict[\"answers\"][i][\"answer\"] is not None and answers_dict[\"answers\"][i][\"answer\"]", "es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"text\"] doc_name = es_result[\"hits\"][\"hits\"][each_doc][\"_source\"][\"name\"] doc_id = es_result[\"hits\"][\"hits\"][each_doc][\"_id\"] if ( processed[doc][\"meta\"][\"name\"]", "Inferencing on CPU\") reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 )", "@app.route(\"/gpu\", methods=[\"POST\"]) def gpu(): if \"user\" in session: user_id_key =", "the below block in case of building a docker image", ") unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"] == \"context_file.txt\": temp_dict[\"meta\"][\"name\"] = \"Textual Context\"", "when app tab is open until session completes @app.route(\"/session_out\", methods=[\"POST\"])", "There is a duplicate, So this document is not added", "os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) except Exception as e: print(e) # print(\"\\n", "tokenizer=model_path, use_gpu=-1 ) finder = Finder(reader, retriever) return finder #", "logging.info( \"User connected at \" + str(datetime.now()) + \" with", "documents uploads @app.route(\"/upload_file\", methods=[\"GET\", \"POST\"]) def upload_file(): global current_users if", "= diff_min_sec + diff_sec time_left = ( session_time * 60", "elif user_settings[user_id_key][\"model\"] == \"bert\": model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"] ==", "session timeout on client side return render_template(\"index.html\", time_left=time_left) else: session.permanent", "found ..\", \"context\": \" \", \"meta\": \" \"}, ] return", ") - diff_time # For session timeout on client side", "% (e.filename, e.strerror)) # print(\"\\n Deleted user:\", user_id_key, \" @\",", "== \"roberta\": model_path = ( \"deepset/roberta-base-squad2\" # Path of the", "* 60 * 60 print(\"\\n User tracker thread started @", "e.strerror)) # print(\"\\n Deleted user:\", user_id_key, \" @\", datetime.now()) session_timer", "to build docker image user_settings[user_id] = { \"gpu\": \"off\", \"model\":", "= session[\"user\"] if user_id_key in current_users: for f in request.files.getlist(\"file\"):", "index=\"user_\" + str(user_id_key), body={\"query\": {\"match_all\": {}}} ) no_docs = len(es_result[\"hits\"][\"hits\"])", "with time of login user_doc_store = dict() # Document store", "es\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) # Handles", "and deletes user id from dict def user_session_timer(): global current_users,", "methods=[\"POST\"]) def question(): if \"user\" in session: user_id_key = session[\"user\"]", "in current_users: if user_settings[user_id_key][\"gpu\"] == \"on\": user_settings[user_id_key][\"gpu\"] = \"off\" else:", "for uploading context os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) +", "session.pop(\"user\", None) return redirect(url_for(\"session_timeout\")) # Comment the below block in", "\"illuin/camembert-base-fquad\" retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"] == \"on\": try: reader", "index=\"user_\" + str(user_id_key), ignore=[400, 404] ) except OSError as e:", "To check if index in Es is empty. 208 bytes", "in session: user_id_key = session[\"user\"] if user_id_key in current_users: query_question", "uploads_dir = \"users/\" + str(user_id_key) + \"/uploads/\" try: es_result =", "+ \"/uploads\", f.filename) ) pre_process(user_id_key) return render_template(\"index.html\") else: return redirect(url_for(\"session_timeout\"))", "# To check if index in Es is empty. 208", "if user_id_key in current_users: if user_settings[user_id_key][\"gpu\"] == \"on\": user_settings[user_id_key][\"gpu\"] =", "\"bert\": model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"] == \"distilbert\": model_path =", "== \"bert\": model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"] == \"distilbert\": model_path", "users dir after uploading to Es def pre_process(user_id_key): uploads_dir =", "+ difference.seconds, 60 ) if time_diff[0] >= session_time: try: del", "False os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) break if add_doc: # print(\"\\n No", "return render_template(\"session_out.html\") else: return redirect(url_for(\"session_timeout\")) # Provides extracted answers for", "shutil from datetime import timedelta, datetime from flask import Flask,", "user_id += 1 current_users[user_id] = current_time session[\"user\"] = user_id #", "str(processed[doc][\"meta\"][\"name\"])) break if add_doc: # print(\"\\n No duplicates found, so", "build docker image user_settings[user_id] = { \"gpu\": \"off\", \"model\": \"roberta\",", "\"on\": try: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=0 ) except", "User settings for GPU and Pre-trained models choice # Handles", "Each user is assigned with a separate Elasticsearch index starting", "# print(\"\\n no documents in es\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst)", "+ \"/uploads\") user_doc_store[user_id] = ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\" + str(user_id) )", "from the server after the session time ends and deletes", "user temp dir for uploading context os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\"", "reader and retriever def set_finder(user_id_key): if user_settings[user_id_key][\"model\"] == \"roberta\": model_path", "datetime.now() user_time = current_users[user_id_key] difference = current_time - user_time time_diff", "\"/uploads\") user_doc_store[user_id] = ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\" + str(user_id) ) #", "- int( datetime.strftime(logged_on, \"%S\") ) diff_time = diff_min_sec + diff_sec", "= es.indices.stats(index=\"user_\" + str(user_id_key)) user_index_size = es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if ( user_index_size", "# Handles GPU setting changes. @app.route(\"/gpu\", methods=[\"POST\"]) def gpu(): if", "duplicates found, so adding this to es..\") processed_lst = [processed[doc]]", "{ \"gpu\": \"off\", \"model\": \"roberta\", } # Initial user settings", "\"probability\", \"offset_start\", \"offset_end\", \"document_id\", ) unique_answers.append(temp_dict[\"answer\"]) if temp_dict[\"meta\"][\"name\"] == \"context_file.txt\":", "\"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"] == \"distilbert\": model_path = \"distilbert-base-uncased-distilled-squad\" else: model_path", "with user_id under users dir after uploading to Es def", "user_settings = dict() # User settings for GPU and Pre-trained", "duplicate docs ..\") add_doc = True for each_doc in range(no_docs):", "user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) # Handles setting up reader and", "textbox @app.route(\"/context\", methods=[\"POST\"]) def context(): if \"user\" in session: user_id_key", "context_file.write(text_context) context_file.close() pre_process(user_id_key) return jsonify({\"output\": \"\" + text_context}) else: return", "status changed\"}) # Handles pre-trained model choice setting changes. @app.route(\"/models\",", "= datetime.now() user_time = current_users[user_id_key] difference = current_time - user_time", "identifier from session dict, This works only when app tab", "documents in es\") processed = convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True )", "completes @app.route(\"/session_out\", methods=[\"POST\"]) def session_out(): session.pop(\"user\", None) return redirect(url_for(\"session_timeout\")) #", "else: model_path = \"illuin/camembert-base-fquad\" retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"] ==", "# print(\"\\n No duplicates found, so adding this to es..\")", "under users dir after uploading to Es def pre_process(user_id_key): uploads_dir", "Finder(reader, retriever) return finder # Handles deletion of context data", "Provides extracted answers for the posted question @app.route(\"/question\", methods=[\"POST\"]) def", "= dict() # Used to store user id with time", "user_id_key, \" @\", datetime.now()) session_timer = threading.Thread(target=user_session_timer) session_timer.start() # Handles", "tab is open until session completes @app.route(\"/session_out\", methods=[\"POST\"]) def session_out():", "login user_doc_store = dict() # Document store object of the", "except Exception as e: print(e) # print(\"\\n no documents in", "= TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) finder = Finder(reader, retriever)", "print(\"\\n Deleted user:\", user_id_key, \" @\", datetime.now()) session_timer = threading.Thread(target=user_session_timer)", ") if \"user\" in session and session[\"user\"] in current_users: user_id", "user_id_key = session[\"user\"] if user_id_key in current_users: user_settings[user_id_key][\"model\"] = request.form[\"model\"]", "session[\"user\"] = user_id # print(current_users) if not os.path.exists(\"users/\"): # Creating", "# Comment the below block in case of building a", "found, so adding this to es..\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst)", "- %s.\" % (e.filename, e.strerror)) # print(\"\\n Deleted user:\", user_id_key,", "is assigned with a separate Elasticsearch index starting with \"user_{user_id}\"", "# Session Timeout in Minutes app = Flask(__name__) app.secret_key =", "if ( processed[doc][\"meta\"][\"name\"] == \"context_file.txt\" and doc_name == \"context_file.txt\" ):", "= divmod( difference.days * seconds_in_day + difference.seconds, 60 ) if", "= current_time - user_time time_diff = divmod( difference.days * seconds_in_day", "choice setting changes. @app.route(\"/models\", methods=[\"POST\"]) def models(): if \"user\" in", "session: user_id_key = session[\"user\"] if user_id_key in current_users: if user_settings[user_id_key][\"gpu\"]", "gpu(): if \"user\" in session: user_id_key = session[\"user\"] if user_id_key", "== \"context_file.txt\" and doc_name == \"context_file.txt\" ): # print(\"Deleting context", "+ str(processed[doc][\"meta\"][\"name\"])) except Exception as e: print(e) # print(\"\\n no", "60 print(\"\\n User tracker thread started @ \", datetime.now()) while", "# Creating user temp dir for uploading context os.makedirs(\"users/\" +", "def pre_process(user_id_key): uploads_dir = \"users/\" + str(user_id_key) + \"/uploads/\" try:", "str(user_id) + \"/uploads\") user_doc_store[user_id] = ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\" + str(user_id)", "user_id_key = session[\"user\"] if user_id_key in current_users: for f in", "with new changes ..\") es.delete( index=\"user_\" + str(user_id_key), doc_type=\"_doc\", id=doc_id", "if index in Es is empty. 208 bytes is default", "retriever) return finder # Handles deletion of context data completely", "context to Elasticsearch # Each user is assigned with a", "\"w\" ) context_file.write(text_context) context_file.close() pre_process(user_id_key) return jsonify({\"output\": \"\" + text_context})", "Checking for duplicate docs ..\") add_doc = True for each_doc", "remove = ( \"score\", \"probability\", \"offset_start\", \"offset_end\", \"document_id\", ) unique_answers.append(temp_dict[\"answer\"])", "= \"on\" return jsonify({\"output\": \"gpu status changed\"}) # Handles pre-trained", "def user_session_timer(): global current_users, session_time seconds_in_day = 24 * 60", "context_file = open( \"users/\" + str(user_id_key) + \"/uploads/context_file.txt\", \"w\" )", "server after the session time ends and deletes user id", "set_finder(user_id_key) answers_dict = finder.get_answers( question=query_question, top_k_retriever=5, top_k_reader=5 ) unique_answers =", "session @app.route(\"/\") def home(): global user_id, current_users, session_time logging.info( \"User", "update with new changes ..\") es.delete( index=\"user_\" + str(user_id_key), doc_type=\"_doc\",", "return render_template(\"index.html\", time_left=time_left) # Handles context documents uploads @app.route(\"/upload_file\", methods=[\"GET\",", "\" \", \"meta\": \" \"}, ] return jsonify({\"output\": output}) else:", "Elasticsearch() ) # Replace with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to build docker", "Comment the below block in case of building a docker", "temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for key in remove: if key", "session or already in session @app.route(\"/\") def home(): global user_id,", "model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) finder = Finder(reader, retriever) return finder", "up reader and retriever def set_finder(user_id_key): if user_settings[user_id_key][\"model\"] == \"roberta\":", "# Handles setting up reader and retriever def set_finder(user_id_key): if", "Handles pre-processing the context and uploads the pre-processed context to", "timeout on client side return render_template(\"index.html\", time_left=time_left) # Handles context", "methods=[\"POST\"]) def context(): if \"user\" in session: user_id_key = session[\"user\"]", "pre-processing the context and uploads the pre-processed context to Elasticsearch", "return jsonify({\"output\": output}) else: return render_template(\"session_out.html\") # Handles GPU setting", "changes. @app.route(\"/models\", methods=[\"POST\"]) def models(): if \"user\" in session: user_id_key", "Exception as e: print(e) print(\"GPU not available. Inferencing on CPU\")", "= 0 # User ID to keep track w.r.t sessions", "= dict() # Document store object of the user id", "clean_func=clean_wiki_text, split_paragraphs=True ) for doc in range(len(processed)): try: # print(\"\\n", "else: session.permanent = True current_time = datetime.now() user_id += 1", "= TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=0 ) except Exception as e:", "context(): if \"user\" in session: user_id_key = session[\"user\"] if user_id_key", "print(e) # print(\"\\n no documents in es\") processed_lst = [processed[doc]]", "= request.form[\"context\"] context_file = open( \"users/\" + str(user_id_key) + \"/uploads/context_file.txt\",", "Initial user settings logged_on = current_users[user_id] current_time = datetime.now() diff_min_sec", "user_id = 0 # User ID to keep track w.r.t", "processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) # Handles setting", "in Es is empty. 208 bytes is default index size", ") if time_diff[0] >= session_time: try: del current_users[user_id_key] del user_doc_store[user_id_key]", "unique_answers ): temp_dict = answers_dict[\"answers\"][i] remove = ( \"score\", \"probability\",", "session and session[\"user\"] in current_users: user_id = session[\"user\"] logged_on =", "render_template(\"session_out.html\") # Handles removing of session identifier from session dict,", "for the posted question @app.route(\"/question\", methods=[\"POST\"]) def question(): if \"user\"", "current_time = datetime.now() user_time = current_users[user_id_key] difference = current_time -", "= ( Elasticsearch() ) # Replace with Elasticsearch([\"http://elasticsearch:9200/\"], verify_certs=True) to", "user_session_timer(): global current_users, session_time seconds_in_day = 24 * 60 *", "in request.files.getlist(\"file\"): f.save( os.path.join(\"users/\" + str(user_id_key) + \"/uploads\", f.filename) )", "import convert_files_to_dicts from haystack.preprocessor.cleaning import clean_wiki_text from haystack import Finder", "= [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) except Exception as e:", "from elasticsearch import Elasticsearch es = ( Elasticsearch() ) #", "+ str(processed[doc][\"meta\"][\"name\"])) # Handles setting up reader and retriever def", ") except Exception as e: print(e) print(\"GPU not available. Inferencing", "render_template(\"index.html\", time_left=time_left) # Handles context documents uploads @app.route(\"/upload_file\", methods=[\"GET\", \"POST\"])", "# print(\"Deleting context file to update with new changes ..\")", "os.path.exists(\"users/\"): # Creating user temp dir for uploading context os.makedirs(\"users/\"", "(e.filename, e.strerror)) # print(\"\\n Deleted user:\", user_id_key, \" @\", datetime.now())", "tokenizer=model_path, use_gpu=0 ) except Exception as e: print(e) print(\"GPU not", "int(datetime.strftime(current_time, \"%M\")) - int(datetime.strftime(logged_on, \"%M\")) ) * 60 diff_sec =", "temp_dict[key] else: output = [ {\"answer\": \"No Answers found ..\",", "= session[\"user\"] if user_id_key in current_users: if user_settings[user_id_key][\"gpu\"] == \"on\":", "session[\"user\"] if user_id_key in current_users: user_settings[user_id_key][\"model\"] = request.form[\"model\"] return jsonify({\"output\":", "redirection @app.route(\"/session_timeout\") def session_timeout(): return render_template(\"session_out.html\") # Handles removing of", "docker image or running on WSGI server like gunicorn if", "e: print(e) print(\"\\n no documents in es\") processed = convert_files_to_dicts(", "%s.\" % (e.filename, e.strerror)) # print(\"\\n Deleted user:\", user_id_key, \"", "model_path = \"deepset/bert-large-uncased-whole-word-masking-squad2\" elif user_settings[user_id_key][\"model\"] == \"distilbert\": model_path = \"distilbert-base-uncased-distilled-squad\"", "= Finder(reader, retriever) return finder # Handles deletion of context", "del user_doc_store[user_id_key] del user_settings[user_id_key] shutil.rmtree(\"users/\" + str(user_id_key)) es.indices.delete( index=\"user_\" +", "+ \"/uploads\") else: os.makedirs(\"users/\" + str(user_id)) os.makedirs(\"users/\" + str(user_id) +", "setting changes. @app.route(\"/models\", methods=[\"POST\"]) def models(): if \"user\" in session:", "available. Inferencing on CPU\") reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1", "processed[doc][\"meta\"][\"name\"] == \"context_file.txt\" and doc_name == \"context_file.txt\" ): # print(\"Deleting", "answers for the posted question @app.route(\"/question\", methods=[\"POST\"]) def question(): if", "redirect(url_for(\"session_timeout\")) else: return redirect(url_for(\"session_timeout\")) # Handles context added through the", "this document is not added ..\") add_doc = False os.remove(uploads_dir", "): # print(\"Deleting context file to update with new changes", "object of the user id user_settings = dict() # User", "print(\"\\n no documents in es\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir", "- diff_time # For session timeout on client side return", "@app.route(\"/session_timeout\") def session_timeout(): return render_template(\"session_out.html\") # Handles removing of session", "ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\" + str(user_id) ) # Change host =", "= current_users[user_id] current_time = datetime.now() diff_min_sec = ( int(datetime.strftime(current_time, \"%M\"))", "Context\" temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for key in remove: if", "= \"users/\" + str(user_id_key) + \"/uploads/\" try: es_result = es.search(", "as e: print(e) # print(\"\\n no documents in es\") processed_lst", "return jsonify({\"error\": \"add context\"}) finder = set_finder(user_id_key) answers_dict = finder.get_answers(", "<gh_stars>10-100 import os import threading import shutil from datetime import", "= ElasticsearchDocumentStore( host=\"localhost\", index=\"user_\" + str(user_id) ) # Change host", "through the textbox @app.route(\"/context\", methods=[\"POST\"]) def context(): if \"user\" in", "haystack.preprocessor.utils import convert_files_to_dicts from haystack.preprocessor.cleaning import clean_wiki_text from haystack import", "= 60 # Session Timeout in Minutes app = Flask(__name__)", "= temp_dict[\"meta\"][\"name\"] output.append(temp_dict) for key in remove: if key in", "os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) break if add_doc: # print(\"\\n No duplicates", "else: user_settings[user_id_key][\"gpu\"] = \"on\" return jsonify({\"output\": \"gpu status changed\"}) #", "redirect(url_for(\"session_timeout\")) # Comment the below block in case of building", "= timedelta(minutes=session_time) user_id = 0 # User ID to keep", "threading import shutil from datetime import timedelta, datetime from flask", "@app.route(\"/question\", methods=[\"POST\"]) def question(): if \"user\" in session: user_id_key =", "current_users, session_time logging.info( \"User connected at \" + str(datetime.now()) +", "is default index size without docs return jsonify({\"error\": \"add context\"})", "\"\" + text_context}) else: return render_template(\"session_out.html\") else: return redirect(url_for(\"session_timeout\")) #", "empty. 208 bytes is default index size without docs return", "in es\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) #", "the server after the session time ends and deletes user", "= TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) else: reader = TransformersReader(", "time_left = ( session_time * 60 ) - diff_time #", "from dict def user_session_timer(): global current_users, session_time seconds_in_day = 24", "# Documents & textual context are deleted from them temp", "TransformersReader( model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1 ) else: reader = TransformersReader( model_name_or_path=model_path,", "ElasticsearchRetriever(document_store=user_doc_store[user_id_key]) if user_settings[user_id_key][\"gpu\"] == \"on\": try: reader = TransformersReader( model_name_or_path=model_path,", "= convert_files_to_dicts( dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True ) for doc in range(len(processed)):", "not added ..\") add_doc = False os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) break", "if user_settings[user_id_key][\"gpu\"] == \"on\": user_settings[user_id_key][\"gpu\"] = \"off\" else: user_settings[user_id_key][\"gpu\"] =", "return render_template(\"index.html\", time_left=time_left) else: session.permanent = True current_time = datetime.now()", "as e: print(\"Error: %s - %s.\" % (e.filename, e.strerror)) #", "..\") add_doc = False os.remove(uploads_dir + str(processed[doc][\"meta\"][\"name\"])) break if add_doc:", "uploading to Es def pre_process(user_id_key): uploads_dir = \"users/\" + str(user_id_key)", "named with user_id under users dir after uploading to Es", "def upload_file(): global current_users if \"user\" in session: user_id_key =", "import clean_wiki_text from haystack import Finder from haystack.retriever.sparse import ElasticsearchRetriever", "textual context are deleted from them temp folder named with", "if user_settings[user_id_key][\"gpu\"] == \"on\": try: reader = TransformersReader( model_name_or_path=model_path, tokenizer=model_path,", "\"user\" in session and session[\"user\"] in current_users: user_id = session[\"user\"]", "added through the textbox @app.route(\"/context\", methods=[\"POST\"]) def context(): if \"user\"", "# Document store object of the user id user_settings =", "else: output = [ {\"answer\": \"No Answers found ..\", \"context\":", "str(user_id_key) + \"/uploads\", f.filename) ) pre_process(user_id_key) return render_template(\"index.html\") else: return", "+ str(user_id_key), ignore=[400, 404] ) except OSError as e: print(\"Error:", "\"context_file.txt\" ): # print(\"Deleting context file to update with new", "Handles users w.r.t new session or already in session @app.route(\"/\")", "# For session timeout on client side return render_template(\"index.html\", time_left=time_left)", "from haystack import Finder from haystack.retriever.sparse import ElasticsearchRetriever from haystack.reader.transformers", "str(processed[doc][\"meta\"][\"name\"])) except Exception as e: print(e) # print(\"\\n no documents", "of login user_doc_store = dict() # Document store object of", "assigned with a separate Elasticsearch index starting with \"user_{user_id}\" #", "\", datetime.now()) while True: for user_id_key in current_users.copy(): current_time =", "current_time = datetime.now() user_id += 1 current_users[user_id] = current_time session[\"user\"]", "current_users: for f in request.files.getlist(\"file\"): f.save( os.path.join(\"users/\" + str(user_id_key) +", "= es_stats[\"_all\"][\"primaries\"][\"store\"][\"size_in_bytes\"] if ( user_index_size == 208 ): # To", "import ElasticsearchRetriever from haystack.reader.transformers import TransformersReader from elasticsearch import Elasticsearch", "== \"context_file.txt\": temp_dict[\"meta\"][\"name\"] = \"Textual Context\" temp_dict[\"meta\"] = temp_dict[\"meta\"][\"name\"] output.append(temp_dict)", "= request.form[\"model\"] return jsonify({\"output\": \"model changed\"}) # Handles session timeout", "\"elasticsearch\" to build docker image user_settings[user_id] = { \"gpu\": \"off\",", "== \"on\": user_settings[user_id_key][\"gpu\"] = \"off\" else: user_settings[user_id_key][\"gpu\"] = \"on\" return", "logged_on = current_users[user_id] current_time = datetime.now() diff_min_sec = ( int(datetime.strftime(current_time,", "\"model\": \"roberta\", } # Initial user settings logged_on = current_users[user_id]", "context data completely from the server after the session time", "range(len(answers_dict[\"answers\"])): if ( answers_dict[\"answers\"][i][\"answer\"] is not None and answers_dict[\"answers\"][i][\"answer\"] not", "str(user_id)) os.makedirs(\"users/\" + str(user_id) + \"/uploads\") user_doc_store[user_id] = ElasticsearchDocumentStore( host=\"localhost\",", "return redirect(url_for(\"session_timeout\")) else: return redirect(url_for(\"session_timeout\")) # Handles context added through", "str(user_id_key) + \"/uploads/context_file.txt\", \"w\" ) context_file.write(text_context) context_file.close() pre_process(user_id_key) return jsonify({\"output\":", "no documents in es\") processed_lst = [processed[doc]] user_doc_store[user_id_key].write_documents(processed_lst) os.remove(uploads_dir +", "Elasticsearch # Each user is assigned with a separate Elasticsearch", "app = Flask(__name__) app.secret_key = \"<KEY>\" app.permanent_session_lifetime = timedelta(minutes=session_time) user_id", ") finder = Finder(reader, retriever) return finder # Handles deletion", "Es is empty. 208 bytes is default index size without", "methods=[\"POST\"]) def models(): if \"user\" in session: user_id_key = session[\"user\"]", "below block in case of building a docker image or", "host = \"elasticsearch\" to build docker image user_settings[user_id] = {", "in session: user_id_key = session[\"user\"] if user_id_key in current_users: text_context", "int(datetime.strftime(current_time, \"%S\")) - int( datetime.strftime(logged_on, \"%S\") ) diff_time = diff_min_sec" ]
[ "\"\") # pylint: disable=I0011,W0631 return { KEY_VARIABLE_ID: element, KEY_SITE_ID: site_id,", "get_series(self, site_id, variable_id): \"\"\"Retrieve all series for a variable_id in", "str. \"\"\" message = { KEY_VALUE: error, KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID:", "% (site_folder, filename) var_id = filename.replace(\".tsv.%s\" % day_date, \"\") series_numbers", "{ KEY_VALUE: error, KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE:", "py-indent-offset: 4 -*- \"\"\"File storage adapter for timevortex project\"\"\" import", "\"\"\"Get last filename \"\"\" old_date = None last_filename = \"\"", "not exists(self.folder_path): makedirs(self.folder_path) def insert_series(self, series): \"\"\"Insert series in DB", "datetime.strptime(new_date, \"%Y-%m-%d\") if old_date is None or new_date > old_date:", "series[array_line[1]] = array_line[0] return series def get_last_file_name(site_folder, file_prefix): \"\"\"Get last", "\"%s.tsv.%s\" % (element, day_date) file_path = \"%s/%s\" % (site_folder, filename)", "in file\"\"\" file_folder = \"%s/%s\" % (self.folder_path, message[KEY_SITE_ID]) file_date =", "get_series_per_file(site_folder, file_prefix): \"\"\"Get series per file \"\"\" series = {}", "in listdir(site_folder): is_file = isfile(join(site_folder, new_filename)) if is_file and file_prefix", "filename) if exists(site_folder) and exists(file_path): return get_lines_number(file_path) return 0 def", "line in temp_series: array_line = line.split(\"\\t\") if len(array_line) >= 2:", "\"\"\"Function that create valid error message :param error: Mal formed", "timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from timevortex.utils.globals", "None or new_date > old_date: return new_date, new_filename except ValueError:", "datetime import pytz import dateutil.parser from django.conf import settings from", "message[KEY_NON_DST_TIMEZONE])) extracted.close() def insert_error(self, message): \"\"\"Function that store error in", "# -*- Mode: Python; py-indent-offset: 4 -*- \"\"\"File storage adapter", "old_date = None last_filename = \"\" for new_filename in listdir(site_folder):", "str. \"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID] = KEY_ERROR self.insert(message) def store_error(self, error):", "FileStorage(object): \"\"\"Class that help us to store and load data", "last_filename = \"\" for new_filename in listdir(site_folder): is_file = isfile(join(site_folder,", "import dateutil.parser from django.conf import settings from django.utils import timezone", "filename) with open(complete_filename, \"r\") as filed: temp_series = filed.readlines() for", "LOGGER.error(\"Not right file\") return old_date, last_filename class FileStorage(object): \"\"\"Class that", "last_filename = \"%s/%s\" % (site_folder, last_filename) try: with open(last_filename, \"rb\")", "tzname[1], KEY_NON_DST_TIMEZONE: tzname[0] } LOGGER.error(error) self.insert(message) def get_series(self, site_id, variable_id):", "= \"%s/%s\" % (site_folder, filename) var_id = filename.replace(\".tsv.%s\" % day_date,", "error): \"\"\"Function that create valid error message :param error: Mal", "series def set_data_location(self, folder_path): \"\"\"Set data folder space\"\"\" self.folder_path =", "% (element, day_date) file_path = \"%s/%s\" % (site_folder, filename) if", "except IsADirectoryError: return None LOGGER.debug(last) # pylint: disable=I0011,W0631 last =", "filename in listdir(site_folder): if \"%s.tsv\" % KEY_ERROR not in filename", "old_date is None or new_date > old_date: return new_date, new_filename", "Python; py-indent-offset: 4 -*- \"\"\"File storage adapter for timevortex project\"\"\"", "# pylint: disable=I0011,W0631 last = last.decode(\"utf-8\").replace(\"\\n\", \"\") # pylint: disable=I0011,W0631", "error of a site_id file storage \"\"\" return self.get_last_series(site_id, KEY_ERROR)", "if not exists(self.folder_path): makedirs(self.folder_path) def insert_series(self, series): \"\"\"Insert series in", "= isfile(join(site_folder, filename)) if is_file and file_prefix in filename: complete_filename", "file storage \"\"\" return self.get_last_series(site_id, KEY_ERROR) def get_number_of_error(self, site_id, day_date):", "with open(last_filename, \"rb\") as filed2: for last in filed2: pass", "filename and day_date in filename: file_path = \"%s/%s\" % (site_folder,", "filename in listdir(site_folder): is_file = isfile(join(site_folder, filename)) if is_file and", "KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\" def", "message[KEY_SITE_ID]) file_date = timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not exists(file_folder): makedirs(file_folder) raw_file", ":param message: Error to insert in DB :type message: str.", "open(complete_filename, \"r\") as filed: temp_series = filed.readlines() for line in", "None last_filename = \"\" for new_filename in listdir(site_folder): is_file =", "last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] } return None", "site_id, day_date): \"\"\"This method retrieve number of series published for", "= folder_path if not exists(self.folder_path): makedirs(self.folder_path) def insert_series(self, series): \"\"\"Insert", "last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename) return last_filename def update_last_file_name(file_prefix,", "= \"%s/%s\" % (self.folder_path, site_id) filename = \"%s.tsv.%s\" % (element,", "from os import listdir, makedirs from os.path import isfile, join,", "= line.split(\"\\t\") if len(array_line) >= 2: series[array_line[1]] = array_line[0] return", "name \"\"\" try: new_date = new_filename.replace(file_prefix, \"\") new_date = datetime.strptime(new_date,", "is None or new_date > old_date: return new_date, new_filename except", "that create valid error message :param error: Mal formed message", "= \"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): last_filename = get_last_file_name(site_folder,", "KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE: tzname[0] } LOGGER.error(error)", "file_folder = \"%s/%s\" % (self.folder_path, message[KEY_SITE_ID]) file_date = timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\")", "KEY_ERROR) def get_number_of_error(self, site_id, day_date): \"\"\"This method retrieve number of", "message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close() def insert_error(self, message): \"\"\"Function that store error", "import isfile, join, exists from time import tzname from datetime", "array_line[0] return series def get_last_file_name(site_folder, file_prefix): \"\"\"Get last filename \"\"\"", "tzname[0] } LOGGER.error(error) self.insert(message) def get_series(self, site_id, variable_id): \"\"\"Retrieve all", "KEY_ERROR site_folder = \"%s/%s\" % (self.folder_path, site_id) filename = \"%s.tsv.%s\"", "\"\"\"Get series per file \"\"\" series = {} for filename", "get_lines_number(file_path) series.append([var_id, series_numbers]) return series def set_data_location(self, folder_path): \"\"\"Set data", "get_series_per_file(site_folder, file_prefix) else: series = {} return series def get_last_series(self,", "not in filename and day_date in filename: file_path = \"%s/%s\"", "def insert(self, message): \"\"\"Insert data in file\"\"\" file_folder = \"%s/%s\"", "DB :type message: str. \"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID] = KEY_ERROR self.insert(message)", "{} return series def get_last_series(self, site_id, variable_id): \"\"\"Retrieve last value", "\"\"\" element = KEY_ERROR site_folder = \"%s/%s\" % (self.folder_path, site_id)", "file_path = \"%s/%s\" % (site_folder, filename) var_id = filename.replace(\".tsv.%s\" %", "self.get_last_series(site_id, KEY_ERROR) def get_number_of_error(self, site_id, day_date): \"\"\"This method retrieve number", "LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from timevortex.utils.globals import KEY_DST_TIMEZONE,", "file_prefix): \"\"\"Get last filename \"\"\" old_date = None last_filename =", "\"%s/%s\" % (self.folder_path, site_id) filename = \"%s.tsv.%s\" % (element, day_date)", "if \"%s.tsv\" % KEY_ERROR not in filename and day_date in", "filename.replace(\".tsv.%s\" % day_date, \"\") series_numbers = get_lines_number(file_path) series.append([var_id, series_numbers]) return", "file_prefix) else: series = {} return series def get_last_series(self, site_id,", "\"\"\"Insert data in file\"\"\" file_folder = \"%s/%s\" % (self.folder_path, message[KEY_SITE_ID])", "and in log :param message: Error to insert in DB", "= KEY_ERROR self.insert(message) def store_error(self, error): \"\"\"Function that create valid", "extracted = open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % ( message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE],", "new_filename): \"\"\"Update last file name \"\"\" try: new_date = new_filename.replace(file_prefix,", "project\"\"\" import os from os import listdir, makedirs from os.path", ":param error: Mal formed message :type error: str. \"\"\" message", "get_last_file_name(site_folder, file_prefix) last_filename = \"%s/%s\" % (site_folder, last_filename) try: with", "= get_lines_number(file_path) series.append([var_id, series_numbers]) return series def set_data_location(self, folder_path): \"\"\"Set", "in filename and day_date in filename: file_path = \"%s/%s\" %", "= filed.readlines() for line in temp_series: array_line = line.split(\"\\t\") if", "if not exists(file_folder): makedirs(file_folder) raw_file = \"%s/%s.tsv.%s\" % ( file_folder,", "site_id, variable_id): \"\"\"Retrieve last value of variable_id in site_id \"\"\"", "last_filename def update_last_file_name(file_prefix, old_date, last_filename, new_filename): \"\"\"Update last file name", "exists(site_folder): last_filename = get_last_file_name(site_folder, file_prefix) last_filename = \"%s/%s\" % (site_folder,", "and file_prefix in filename: complete_filename = \"%s/%s\" % (site_folder, filename)", "raw_file = \"%s/%s.tsv.%s\" % ( file_folder, message[KEY_VARIABLE_ID], file_date) extracted =", "site_id, KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] }", "that store error in errors collection and in log :param", "collection and in log :param message: Error to insert in", "site_folder = \"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): last_filename =", "disable=I0011,W0631 last = last.decode(\"utf-8\").replace(\"\\n\", \"\") # pylint: disable=I0011,W0631 return {", "number of series published for a day_date \"\"\" site_folder =", "exists(site_folder) and exists(file_path): return get_lines_number(file_path) return 0 def get_number_of_series(self, site_id,", "= get_series_per_file(site_folder, file_prefix) else: series = {} return series def", "file_prefix in filename: complete_filename = \"%s/%s\" % (site_folder, filename) with", "site_id) if exists(site_folder): last_filename = get_last_file_name(site_folder, file_prefix) last_filename = \"%s/%s\"", "new_date, new_filename except ValueError: LOGGER.error(\"Not right file\") return old_date, last_filename", "day_date \"\"\" site_folder = \"%s/%s\" % (self.folder_path, site_id) series =", "-*- \"\"\"File storage adapter for timevortex project\"\"\" import os from", "KEY_DATE from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\"", "LOGGER.error(message) message[KEY_VARIABLE_ID] = KEY_ERROR self.insert(message) def store_error(self, error): \"\"\"Function that", "get_last_error(self, site_id): \"\"\"Retrieve last error of a site_id file storage", "try: with open(last_filename, \"rb\") as filed2: for last in filed2:", "return { KEY_VARIABLE_ID: element, KEY_SITE_ID: site_id, KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1],", "file\"\"\" file_folder = \"%s/%s\" % (self.folder_path, message[KEY_SITE_ID]) file_date = timezone.localtime(", "retrieve number of series published for a day_date \"\"\" site_folder", "\"/tmp/data/\" def get_lines_number(file_path): \"\"\"Get lines number \"\"\" return sum(1 for", "series in DB :param series: Representation of a series :type", "(site_folder, filename) with open(complete_filename, \"r\") as filed: temp_series = filed.readlines()", "self.insert(series) def insert(self, message): \"\"\"Insert data in file\"\"\" file_folder =", "KEY_VARIABLE_ID: element, KEY_SITE_ID: site_id, KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2],", "KEY_ERROR not in filename and day_date in filename: file_path =", "(element, day_date) file_path = \"%s/%s\" % (site_folder, filename) if exists(site_folder)", "\"\"\"Retrieve all series for a variable_id in site_id \"\"\" element", "SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE: tzname[0] } LOGGER.error(error) self.insert(message)", "series = {} return series def get_last_series(self, site_id, variable_id): \"\"\"Retrieve", "filed.readlines() for line in temp_series: array_line = line.split(\"\\t\") if len(array_line)", "is_file = isfile(join(site_folder, new_filename)) if is_file and file_prefix in new_filename:", "day_date, \"\") series_numbers = get_lines_number(file_path) series.append([var_id, series_numbers]) return series def", "site_folder = \"%s/%s\" % (self.folder_path, site_id) series = [] if", "\"\"\"Class that help us to store and load data over", "message: str. \"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID] = KEY_ERROR self.insert(message) def store_error(self,", "for line in temp_series: array_line = line.split(\"\\t\") if len(array_line) >=", "message :param error: Mal formed message :type error: str. \"\"\"", "return sum(1 for line in open(file_path)) def get_series_per_file(site_folder, file_prefix): \"\"\"Get", "Mal formed message :type error: str. \"\"\" message = {", "= \"%s/%s\" % (site_folder, filename) if exists(site_folder) and exists(file_path): return", "series published for a day_date \"\"\" site_folder = \"%s/%s\" %", "( message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close() def insert_error(self, message): \"\"\"Function", "isfile(join(site_folder, filename)) if is_file and file_prefix in filename: complete_filename =", "sum(1 for line in open(file_path)) def get_series_per_file(site_folder, file_prefix): \"\"\"Get series", "get_number_of_series(self, site_id, day_date): \"\"\"This method retrieve number of series published", "get_last_series(self, site_id, variable_id): \"\"\"Retrieve last value of variable_id in site_id", "self.folder_path = folder_path def get_sites_list(self): \"\"\"Get sites list\"\"\" return os.listdir(self.folder_path)", "temp_series = filed.readlines() for line in temp_series: array_line = line.split(\"\\t\")", "KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\" def get_lines_number(file_path):", "message): \"\"\"Function that store error in errors collection and in", "= \"%s/%s\" % (site_folder, filename) with open(complete_filename, \"r\") as filed:", "utf8 -*- # -*- Mode: Python; py-indent-offset: 4 -*- \"\"\"File", "\"%s/%s\" % (site_folder, filename) with open(complete_filename, \"r\") as filed: temp_series", "% ( file_folder, message[KEY_VARIABLE_ID], file_date) extracted = open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\"", "\"%s/%s\" % (site_folder, filename) var_id = filename.replace(\".tsv.%s\" % day_date, \"\")", "= \"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): series = get_series_per_file(site_folder,", "us to store and load data over several file\"\"\" def", "def __init__(self, folder_path): \"\"\"Constructor\"\"\" self.folder_path = folder_path if not exists(self.folder_path):", "= \"%s.tsv.\" % element site_folder = \"%s/%s\" % (self.folder_path, site_id)", "last_filename) try: with open(last_filename, \"rb\") as filed2: for last in", "last in filed2: pass except IsADirectoryError: return None LOGGER.debug(last) #", "set_data_location(self, folder_path): \"\"\"Set data folder space\"\"\" self.folder_path = folder_path def", "def get_last_series(self, site_id, variable_id): \"\"\"Retrieve last value of variable_id in", "of a site_id file storage \"\"\" return self.get_last_series(site_id, KEY_ERROR) def", "\"\"\"Update last file name \"\"\" try: new_date = new_filename.replace(file_prefix, \"\")", "> old_date: return new_date, new_filename except ValueError: LOGGER.error(\"Not right file\")", "\"%s.tsv.\" % element site_folder = \"%s/%s\" % (self.folder_path, site_id) if", "\"\"\"Get lines number \"\"\" return sum(1 for line in open(file_path))", "file_prefix = \"%s.tsv.\" % element site_folder = \"%s/%s\" % (self.folder_path,", "from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from", "extracted.close() def insert_error(self, message): \"\"\"Function that store error in errors", "= get_last_file_name(site_folder, file_prefix) last_filename = \"%s/%s\" % (site_folder, last_filename) try:", "error: str. \"\"\" message = { KEY_VALUE: error, KEY_VARIABLE_ID: KEY_ERROR,", "get_sites_list(self): \"\"\"Get sites list\"\"\" return os.listdir(self.folder_path) FILE_STORAGE_SPACE = FileStorage(getattr(settings, SETTINGS_FILE_STORAGE_FOLDER,", "Representation of a series :type series: dict. \"\"\" self.insert(series) def", "{ KEY_VARIABLE_ID: element, KEY_SITE_ID: site_id, KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE:", "for filename in listdir(site_folder): if \"%s.tsv\" % KEY_ERROR not in", "in filename: file_path = \"%s/%s\" % (site_folder, filename) var_id =", "folder_path): \"\"\"Set data folder space\"\"\" self.folder_path = folder_path def get_sites_list(self):", "exists(file_folder): makedirs(file_folder) raw_file = \"%s/%s.tsv.%s\" % ( file_folder, message[KEY_VARIABLE_ID], file_date)", "listdir, makedirs from os.path import isfile, join, exists from time", ":param series: Representation of a series :type series: dict. \"\"\"", "% KEY_ERROR not in filename and day_date in filename: file_path", "series def get_last_series(self, site_id, variable_id): \"\"\"Retrieve last value of variable_id", "method retrieve number of error published for a day_date \"\"\"", "pylint: disable=I0011,W0631 last = last.decode(\"utf-8\").replace(\"\\n\", \"\") # pylint: disable=I0011,W0631 return", "day_date in filename: file_path = \"%s/%s\" % (site_folder, filename) var_id", "site_folder = \"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): series =", "LOGGER.error(error) self.insert(message) def get_series(self, site_id, variable_id): \"\"\"Retrieve all series for", "new_date > old_date: return new_date, new_filename except ValueError: LOGGER.error(\"Not right", "def insert_series(self, series): \"\"\"Insert series in DB :param series: Representation", "message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close() def insert_error(self, message): \"\"\"Function that", "in filename: complete_filename = \"%s/%s\" % (site_folder, filename) with open(complete_filename,", "listdir(site_folder): is_file = isfile(join(site_folder, new_filename)) if is_file and file_prefix in", "\"%Y-%m-%d\") if old_date is None or new_date > old_date: return", "update_last_file_name(file_prefix, old_date, last_filename, new_filename) return last_filename def update_last_file_name(file_prefix, old_date, last_filename,", "self.folder_path = folder_path if not exists(self.folder_path): makedirs(self.folder_path) def insert_series(self, series):", "series: dict. \"\"\" self.insert(series) def insert(self, message): \"\"\"Insert data in", "day_date \"\"\" element = KEY_ERROR site_folder = \"%s/%s\" % (self.folder_path,", "day_date) file_path = \"%s/%s\" % (site_folder, filename) if exists(site_folder) and", "old_date, last_filename, new_filename): \"\"\"Update last file name \"\"\" try: new_date", "series_numbers = get_lines_number(file_path) series.append([var_id, series_numbers]) return series def set_data_location(self, folder_path):", "new_filename.replace(file_prefix, \"\") new_date = datetime.strptime(new_date, \"%Y-%m-%d\") if old_date is None", "in DB :param series: Representation of a series :type series:", "log :param message: Error to insert in DB :type message:", "settings from django.utils import timezone from timevortex.utils.globals import LOGGER, KEY_ERROR,", "folder_path if not exists(self.folder_path): makedirs(self.folder_path) def insert_series(self, series): \"\"\"Insert series", "adapter for timevortex project\"\"\" import os from os import listdir,", "variable_id): \"\"\"Retrieve all series for a variable_id in site_id \"\"\"", "error published for a day_date \"\"\" element = KEY_ERROR site_folder", "tzname from datetime import datetime import pytz import dateutil.parser from", "def insert_error(self, message): \"\"\"Function that store error in errors collection", "KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] } return None def get_last_error(self, site_id): \"\"\"Retrieve last", "in temp_series: array_line = line.split(\"\\t\") if len(array_line) >= 2: series[array_line[1]]", "last.split(\"\\t\")[3] } return None def get_last_error(self, site_id): \"\"\"Retrieve last error", "last file name \"\"\" try: new_date = new_filename.replace(file_prefix, \"\") new_date", "store_error(self, error): \"\"\"Function that create valid error message :param error:", "as filed2: for last in filed2: pass except IsADirectoryError: return", "in new_filename: old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename) return", "if exists(site_folder): series = get_series_per_file(site_folder, file_prefix) else: series = {}", "site_id) if exists(site_folder): series = get_series_per_file(site_folder, file_prefix) else: series =", "element site_folder = \"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): last_filename", "else: series = {} return series def get_last_series(self, site_id, variable_id):", "def get_series(self, site_id, variable_id): \"\"\"Retrieve all series for a variable_id", "old_date: return new_date, new_filename except ValueError: LOGGER.error(\"Not right file\") return", "exists(file_path): return get_lines_number(file_path) return 0 def get_number_of_series(self, site_id, day_date): \"\"\"This", "series.append([var_id, series_numbers]) return series def set_data_location(self, folder_path): \"\"\"Set data folder", "\"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): last_filename = get_last_file_name(site_folder, file_prefix)", "get_number_of_error(self, site_id, day_date): \"\"\"This method retrieve number of error published", "last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] } return None def get_last_error(self,", "series): \"\"\"Insert series in DB :param series: Representation of a", "old_date, last_filename class FileStorage(object): \"\"\"Class that help us to store", "\"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID] = KEY_ERROR self.insert(message) def store_error(self, error): \"\"\"Function", ">= 2: series[array_line[1]] = array_line[0] return series def get_last_file_name(site_folder, file_prefix):", "filed: temp_series = filed.readlines() for line in temp_series: array_line =", "file \"\"\" series = {} for filename in listdir(site_folder): is_file", "all series for a variable_id in site_id \"\"\" element =", "# pylint: disable=I0011,W0631 return { KEY_VARIABLE_ID: element, KEY_SITE_ID: site_id, KEY_VALUE:", "last error of a site_id file storage \"\"\" return self.get_last_series(site_id,", "\"\"\" self.insert(series) def insert(self, message): \"\"\"Insert data in file\"\"\" file_folder", "disable=I0011,W0631 return { KEY_VARIABLE_ID: element, KEY_SITE_ID: site_id, KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE:", "message[KEY_VARIABLE_ID] = KEY_ERROR self.insert(message) def store_error(self, error): \"\"\"Function that create", "timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not exists(file_folder): makedirs(file_folder) raw_file = \"%s/%s.tsv.%s\" %", "\"\"\"This method retrieve number of error published for a day_date", "= datetime.strptime(new_date, \"%Y-%m-%d\") if old_date is None or new_date >", "last.decode(\"utf-8\").replace(\"\\n\", \"\") # pylint: disable=I0011,W0631 return { KEY_VARIABLE_ID: element, KEY_SITE_ID:", "if old_date is None or new_date > old_date: return new_date,", "return 0 def get_number_of_series(self, site_id, day_date): \"\"\"This method retrieve number", "\"%s/%s.tsv.%s\" % ( file_folder, message[KEY_VARIABLE_ID], file_date) extracted = open(raw_file, \"a+\")", "line in open(file_path)) def get_series_per_file(site_folder, file_prefix): \"\"\"Get series per file", "data in file\"\"\" file_folder = \"%s/%s\" % (self.folder_path, message[KEY_SITE_ID]) file_date", "len(array_line) >= 2: series[array_line[1]] = array_line[0] return series def get_last_file_name(site_folder,", "Error to insert in DB :type message: str. \"\"\" LOGGER.error(message)", "new_filename except ValueError: LOGGER.error(\"Not right file\") return old_date, last_filename class", "import pytz import dateutil.parser from django.conf import settings from django.utils", "series: Representation of a series :type series: dict. \"\"\" self.insert(series)", "of a series :type series: dict. \"\"\" self.insert(series) def insert(self,", "if exists(site_folder): for filename in listdir(site_folder): if \"%s.tsv\" % KEY_ERROR", "os.path import isfile, join, exists from time import tzname from", "variable_id file_prefix = \"%s.tsv.\" % element site_folder = \"%s/%s\" %", "\"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\" def get_lines_number(file_path): \"\"\"Get lines number \"\"\"", "-*- coding: utf8 -*- # -*- Mode: Python; py-indent-offset: 4", ":type message: str. \"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID] = KEY_ERROR self.insert(message) def", "= open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % ( message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE]))", "from time import tzname from datetime import datetime import pytz", "site_id \"\"\" element = variable_id file_prefix = \"%s.tsv.\" % element", "value of variable_id in site_id \"\"\" element = variable_id file_prefix", "help us to store and load data over several file\"\"\"", "= filename.replace(\".tsv.%s\" % day_date, \"\") series_numbers = get_lines_number(file_path) series.append([var_id, series_numbers])", "insert in DB :type message: str. \"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID] =", "series = get_series_per_file(site_folder, file_prefix) else: series = {} return series", "= array_line[0] return series def get_last_file_name(site_folder, file_prefix): \"\"\"Get last filename", "\"%s/%s\" % (self.folder_path, message[KEY_SITE_ID]) file_date = timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not", "LOGGER.debug(last) # pylint: disable=I0011,W0631 last = last.decode(\"utf-8\").replace(\"\\n\", \"\") # pylint:", "lines number \"\"\" return sum(1 for line in open(file_path)) def", "listdir(site_folder): if \"%s.tsv\" % KEY_ERROR not in filename and day_date", "a day_date \"\"\" element = KEY_ERROR site_folder = \"%s/%s\" %", "datetime import datetime import pytz import dateutil.parser from django.conf import", "KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] } return None def", "ValueError: LOGGER.error(\"Not right file\") return old_date, last_filename class FileStorage(object): \"\"\"Class", "or new_date > old_date: return new_date, new_filename except ValueError: LOGGER.error(\"Not", "\"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): series = get_series_per_file(site_folder, file_prefix)", "filed2: for last in filed2: pass except IsADirectoryError: return None", "for a day_date \"\"\" site_folder = \"%s/%s\" % (self.folder_path, site_id)", "series for a variable_id in site_id \"\"\" element = variable_id", "return series def set_data_location(self, folder_path): \"\"\"Set data folder space\"\"\" self.folder_path", "= \"%s/%s.tsv.%s\" % ( file_folder, message[KEY_VARIABLE_ID], file_date) extracted = open(raw_file,", "KEY_SITE_ID: site_id, KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3]", "last filename \"\"\" old_date = None last_filename = \"\" for", "return self.get_last_series(site_id, KEY_ERROR) def get_number_of_error(self, site_id, day_date): \"\"\"This method retrieve", "last_filename class FileStorage(object): \"\"\"Class that help us to store and", "to insert in DB :type message: str. \"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID]", "site_id, day_date): \"\"\"This method retrieve number of error published for", "= \"%s/%s\" % (self.folder_path, site_id) series = [] if exists(site_folder):", "series = {} for filename in listdir(site_folder): is_file = isfile(join(site_folder,", "store error in errors collection and in log :param message:", "temp_series: array_line = line.split(\"\\t\") if len(array_line) >= 2: series[array_line[1]] =", "import timezone from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE,", "\"\"\"Retrieve last value of variable_id in site_id \"\"\" element =", "KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE: tzname[0] }", "\"\" for new_filename in listdir(site_folder): is_file = isfile(join(site_folder, new_filename)) if", "% (self.folder_path, site_id) series = [] if exists(site_folder): for filename", "element = variable_id file_prefix = \"%s.tsv.\" % element site_folder =", "day_date): \"\"\"This method retrieve number of series published for a", "series = [] if exists(site_folder): for filename in listdir(site_folder): if", "exists from time import tzname from datetime import datetime import", ":type series: dict. \"\"\" self.insert(series) def insert(self, message): \"\"\"Insert data", "= KEY_ERROR site_folder = \"%s/%s\" % (self.folder_path, site_id) filename =", "site_id file storage \"\"\" return self.get_last_series(site_id, KEY_ERROR) def get_number_of_error(self, site_id,", "makedirs from os.path import isfile, join, exists from time import", "return None LOGGER.debug(last) # pylint: disable=I0011,W0631 last = last.decode(\"utf-8\").replace(\"\\n\", \"\")", "site_id) series = [] if exists(site_folder): for filename in listdir(site_folder):", "formed message :type error: str. \"\"\" message = { KEY_VALUE:", "load data over several file\"\"\" def __init__(self, folder_path): \"\"\"Constructor\"\"\" self.folder_path", "number \"\"\" return sum(1 for line in open(file_path)) def get_series_per_file(site_folder,", "update_last_file_name(file_prefix, old_date, last_filename, new_filename): \"\"\"Update last file name \"\"\" try:", "( file_folder, message[KEY_VARIABLE_ID], file_date) extracted = open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" %", "storage adapter for timevortex project\"\"\" import os from os import", "(self.folder_path, site_id) if exists(site_folder): series = get_series_per_file(site_folder, file_prefix) else: series", "self.insert(message) def get_series(self, site_id, variable_id): \"\"\"Retrieve all series for a", "% (self.folder_path, message[KEY_SITE_ID]) file_date = timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not exists(file_folder):", "= \"%s.tsv.%s\" % (element, day_date) file_path = \"%s/%s\" % (site_folder,", "\"\"\"Set data folder space\"\"\" self.folder_path = folder_path def get_sites_list(self): \"\"\"Get", "from datetime import datetime import pytz import dateutil.parser from django.conf", "self.insert(message) def store_error(self, error): \"\"\"Function that create valid error message", "} LOGGER.error(error) self.insert(message) def get_series(self, site_id, variable_id): \"\"\"Retrieve all series", "of series published for a day_date \"\"\" site_folder = \"%s/%s\"", "% ( message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close() def insert_error(self, message):", "return series def get_last_file_name(site_folder, file_prefix): \"\"\"Get last filename \"\"\" old_date", "last_filename, new_filename) return last_filename def update_last_file_name(file_prefix, old_date, last_filename, new_filename): \"\"\"Update", "with open(complete_filename, \"r\") as filed: temp_series = filed.readlines() for line", "get_last_file_name(site_folder, file_prefix): \"\"\"Get last filename \"\"\" old_date = None last_filename", "in DB :type message: str. \"\"\" LOGGER.error(message) message[KEY_VARIABLE_ID] = KEY_ERROR", "valid error message :param error: Mal formed message :type error:", "\"%s/%s\" % (site_folder, filename) if exists(site_folder) and exists(file_path): return get_lines_number(file_path)", "\"\"\" message = { KEY_VALUE: error, KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID,", "= \"%s/%s\" % (site_folder, last_filename) try: with open(last_filename, \"rb\") as", "def get_number_of_error(self, site_id, day_date): \"\"\"This method retrieve number of error", "import datetime import pytz import dateutil.parser from django.conf import settings", "[] if exists(site_folder): for filename in listdir(site_folder): if \"%s.tsv\" %", "KEY_NON_DST_TIMEZONE: tzname[0] } LOGGER.error(error) self.insert(message) def get_series(self, site_id, variable_id): \"\"\"Retrieve", "get_lines_number(file_path): \"\"\"Get lines number \"\"\" return sum(1 for line in", "element site_folder = \"%s/%s\" % (self.folder_path, site_id) if exists(site_folder): series", "= \"\" for new_filename in listdir(site_folder): is_file = isfile(join(site_folder, new_filename))", ":type error: str. \"\"\" message = { KEY_VALUE: error, KEY_VARIABLE_ID:", "def get_sites_list(self): \"\"\"Get sites list\"\"\" return os.listdir(self.folder_path) FILE_STORAGE_SPACE = FileStorage(getattr(settings,", "old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename) return last_filename def", "retrieve number of error published for a day_date \"\"\" element", "return new_date, new_filename except ValueError: LOGGER.error(\"Not right file\") return old_date,", "for a variable_id in site_id \"\"\" element = variable_id file_prefix", "= new_filename.replace(file_prefix, \"\") new_date = datetime.strptime(new_date, \"%Y-%m-%d\") if old_date is", "= variable_id file_prefix = \"%s.tsv.\" % element site_folder = \"%s/%s\"", "series per file \"\"\" series = {} for filename in", "import os from os import listdir, makedirs from os.path import", "file_prefix) last_filename = \"%s/%s\" % (site_folder, last_filename) try: with open(last_filename,", "per file \"\"\" series = {} for filename in listdir(site_folder):", "over several file\"\"\" def __init__(self, folder_path): \"\"\"Constructor\"\"\" self.folder_path = folder_path", "% element site_folder = \"%s/%s\" % (self.folder_path, site_id) if exists(site_folder):", "old_date, last_filename, new_filename) return last_filename def update_last_file_name(file_prefix, old_date, last_filename, new_filename):", "if exists(site_folder): last_filename = get_last_file_name(site_folder, file_prefix) last_filename = \"%s/%s\" %", "% (self.folder_path, site_id) filename = \"%s.tsv.%s\" % (element, day_date) file_path", "exists(site_folder): series = get_series_per_file(site_folder, file_prefix) else: series = {} return", "message[KEY_VARIABLE_ID], file_date) extracted = open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % ( message[KEY_VALUE],", "os from os import listdir, makedirs from os.path import isfile,", "new_date = new_filename.replace(file_prefix, \"\") new_date = datetime.strptime(new_date, \"%Y-%m-%d\") if old_date", "day_date): \"\"\"This method retrieve number of error published for a", "for a day_date \"\"\" element = KEY_ERROR site_folder = \"%s/%s\"", "(site_folder, last_filename) try: with open(last_filename, \"rb\") as filed2: for last", "(self.folder_path, site_id) if exists(site_folder): last_filename = get_last_file_name(site_folder, file_prefix) last_filename =", "last_filename = get_last_file_name(site_folder, file_prefix) last_filename = \"%s/%s\" % (site_folder, last_filename)", "insert_series(self, series): \"\"\"Insert series in DB :param series: Representation of", "= \"%s/%s\" % (self.folder_path, message[KEY_SITE_ID]) file_date = timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if", "KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER", "for filename in listdir(site_folder): is_file = isfile(join(site_folder, filename)) if is_file", "def store_error(self, error): \"\"\"Function that create valid error message :param", "2: series[array_line[1]] = array_line[0] return series def get_last_file_name(site_folder, file_prefix): \"\"\"Get", "for new_filename in listdir(site_folder): is_file = isfile(join(site_folder, new_filename)) if is_file", "\"\"\" return self.get_last_series(site_id, KEY_ERROR) def get_number_of_error(self, site_id, day_date): \"\"\"This method", "\"\"\" site_folder = \"%s/%s\" % (self.folder_path, site_id) series = []", "variable_id in site_id \"\"\" element = variable_id file_prefix = \"%s.tsv.\"", "\"\"\" try: new_date = new_filename.replace(file_prefix, \"\") new_date = datetime.strptime(new_date, \"%Y-%m-%d\")", "new_filename)) if is_file and file_prefix in new_filename: old_date, last_filename =", "KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE: tzname[0] } LOGGER.error(error) self.insert(message) def", "\"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % ( message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close() def", "% (self.folder_path, site_id) if exists(site_folder): last_filename = get_last_file_name(site_folder, file_prefix) last_filename", "file\"\"\" def __init__(self, folder_path): \"\"\"Constructor\"\"\" self.folder_path = folder_path if not", "KEY_ERROR self.insert(message) def store_error(self, error): \"\"\"Function that create valid error", "and file_prefix in new_filename: old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename,", "if is_file and file_prefix in filename: complete_filename = \"%s/%s\" %", "error, KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE:", "None LOGGER.debug(last) # pylint: disable=I0011,W0631 last = last.decode(\"utf-8\").replace(\"\\n\", \"\") #", "Mode: Python; py-indent-offset: 4 -*- \"\"\"File storage adapter for timevortex", "in open(file_path)) def get_series_per_file(site_folder, file_prefix): \"\"\"Get series per file \"\"\"", "of variable_id in site_id \"\"\" element = variable_id file_prefix =", "message): \"\"\"Insert data in file\"\"\" file_folder = \"%s/%s\" % (self.folder_path,", "new_filename: old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename) return last_filename", "last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] } return None def get_last_error(self, site_id): \"\"\"Retrieve", "filename) var_id = filename.replace(\".tsv.%s\" % day_date, \"\") series_numbers = get_lines_number(file_path)", "(site_folder, filename) if exists(site_folder) and exists(file_path): return get_lines_number(file_path) return 0", "= last.decode(\"utf-8\").replace(\"\\n\", \"\") # pylint: disable=I0011,W0631 return { KEY_VARIABLE_ID: element,", "in listdir(site_folder): if \"%s.tsv\" % KEY_ERROR not in filename and", "= update_last_file_name(file_prefix, old_date, last_filename, new_filename) return last_filename def update_last_file_name(file_prefix, old_date,", "site_id) filename = \"%s.tsv.%s\" % (element, day_date) file_path = \"%s/%s\"", "\"\"\"Retrieve last error of a site_id file storage \"\"\" return", "errors collection and in log :param message: Error to insert", "last_filename, new_filename): \"\"\"Update last file name \"\"\" try: new_date =", "\"\"\" series = {} for filename in listdir(site_folder): is_file =", "that help us to store and load data over several", "site_id): \"\"\"Retrieve last error of a site_id file storage \"\"\"", "extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % ( message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close() def insert_error(self,", "not exists(file_folder): makedirs(file_folder) raw_file = \"%s/%s.tsv.%s\" % ( file_folder, message[KEY_VARIABLE_ID],", "timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER =", "\"\"\" return sum(1 for line in open(file_path)) def get_series_per_file(site_folder, file_prefix):", "last value of variable_id in site_id \"\"\" element = variable_id", "insert(self, message): \"\"\"Insert data in file\"\"\" file_folder = \"%s/%s\" %", "file_prefix in new_filename: old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename)", "% day_date, \"\") series_numbers = get_lines_number(file_path) series.append([var_id, series_numbers]) return series", "= isfile(join(site_folder, new_filename)) if is_file and file_prefix in new_filename: old_date,", "a day_date \"\"\" site_folder = \"%s/%s\" % (self.folder_path, site_id) series", "\"\") new_date = datetime.strptime(new_date, \"%Y-%m-%d\") if old_date is None or", "KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] } return None def get_last_error(self, site_id):", "% (site_folder, filename) with open(complete_filename, \"r\") as filed: temp_series =", "dateutil.parser from django.conf import settings from django.utils import timezone from", "series def get_last_file_name(site_folder, file_prefix): \"\"\"Get last filename \"\"\" old_date =", "None def get_last_error(self, site_id): \"\"\"Retrieve last error of a site_id", "-*- Mode: Python; py-indent-offset: 4 -*- \"\"\"File storage adapter for", "var_id = filename.replace(\".tsv.%s\" % day_date, \"\") series_numbers = get_lines_number(file_path) series.append([var_id,", "in errors collection and in log :param message: Error to", "makedirs(file_folder) raw_file = \"%s/%s.tsv.%s\" % ( file_folder, message[KEY_VARIABLE_ID], file_date) extracted", "storage \"\"\" return self.get_last_series(site_id, KEY_ERROR) def get_number_of_error(self, site_id, day_date): \"\"\"This", "SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\" def get_lines_number(file_path): \"\"\"Get lines number \"\"\" return", "file_path = \"%s/%s\" % (site_folder, filename) if exists(site_folder) and exists(file_path):", "file_date = timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not exists(file_folder): makedirs(file_folder) raw_file =", "def get_last_error(self, site_id): \"\"\"Retrieve last error of a site_id file", "\"\"\"Constructor\"\"\" self.folder_path = folder_path if not exists(self.folder_path): makedirs(self.folder_path) def insert_series(self,", "% (site_folder, filename) if exists(site_folder) and exists(file_path): return get_lines_number(file_path) return", "django.utils import timezone from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID,", "right file\") return old_date, last_filename class FileStorage(object): \"\"\"Class that help", "import tzname from datetime import datetime import pytz import dateutil.parser", "complete_filename = \"%s/%s\" % (site_folder, filename) with open(complete_filename, \"r\") as", "django.conf import settings from django.utils import timezone from timevortex.utils.globals import", "coding: utf8 -*- # -*- Mode: Python; py-indent-offset: 4 -*-", "new_filename in listdir(site_folder): is_file = isfile(join(site_folder, new_filename)) if is_file and", "return get_lines_number(file_path) return 0 def get_number_of_series(self, site_id, day_date): \"\"\"This method", "open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % ( message[KEY_VALUE], message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close()", "for timevortex project\"\"\" import os from os import listdir, makedirs", "import settings from django.utils import timezone from timevortex.utils.globals import LOGGER,", "isfile(join(site_folder, new_filename)) if is_file and file_prefix in new_filename: old_date, last_filename", "\"%s/%s\" % (site_folder, last_filename) try: with open(last_filename, \"rb\") as filed2:", "= folder_path def get_sites_list(self): \"\"\"Get sites list\"\"\" return os.listdir(self.folder_path) FILE_STORAGE_SPACE", "in site_id \"\"\" element = variable_id file_prefix = \"%s.tsv.\" %", "element = KEY_ERROR site_folder = \"%s/%s\" % (self.folder_path, site_id) filename", "\"\"\"This method retrieve number of series published for a day_date", "method retrieve number of series published for a day_date \"\"\"", "def get_lines_number(file_path): \"\"\"Get lines number \"\"\" return sum(1 for line", "KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE: tzname[0]", "makedirs(self.folder_path) def insert_series(self, series): \"\"\"Insert series in DB :param series:", "is_file = isfile(join(site_folder, filename)) if is_file and file_prefix in filename:", "return series def get_last_series(self, site_id, variable_id): \"\"\"Retrieve last value of", "message: Error to insert in DB :type message: str. \"\"\"", "pytz import dateutil.parser from django.conf import settings from django.utils import", "return None def get_last_error(self, site_id): \"\"\"Retrieve last error of a", "IsADirectoryError: return None LOGGER.debug(last) # pylint: disable=I0011,W0631 last = last.decode(\"utf-8\").replace(\"\\n\",", "4 -*- \"\"\"File storage adapter for timevortex project\"\"\" import os", "% (self.folder_path, site_id) if exists(site_folder): series = get_series_per_file(site_folder, file_prefix) else:", "% (site_folder, last_filename) try: with open(last_filename, \"rb\") as filed2: for", "KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE: last.split(\"\\t\")[3] } return", "KEY_VALUE: error, KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1],", "a site_id file storage \"\"\" return self.get_last_series(site_id, KEY_ERROR) def get_number_of_error(self,", "of error published for a day_date \"\"\" element = KEY_ERROR", "SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\" def get_lines_number(file_path): \"\"\"Get lines", "\"\"\"File storage adapter for timevortex project\"\"\" import os from os", "\"\"\" element = variable_id file_prefix = \"%s.tsv.\" % element site_folder", "import listdir, makedirs from os.path import isfile, join, exists from", "KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE,", "if exists(site_folder) and exists(file_path): return get_lines_number(file_path) return 0 def get_number_of_series(self,", "line.split(\"\\t\") if len(array_line) >= 2: series[array_line[1]] = array_line[0] return series", "DB :param series: Representation of a series :type series: dict.", "{} for filename in listdir(site_folder): is_file = isfile(join(site_folder, filename)) if", "# -*- coding: utf8 -*- # -*- Mode: Python; py-indent-offset:", "series :type series: dict. \"\"\" self.insert(series) def insert(self, message): \"\"\"Insert", "os import listdir, makedirs from os.path import isfile, join, exists", "timezone from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE", "file_folder, message[KEY_VARIABLE_ID], file_date) extracted = open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % (", "def set_data_location(self, folder_path): \"\"\"Set data folder space\"\"\" self.folder_path = folder_path", "data folder space\"\"\" self.folder_path = folder_path def get_sites_list(self): \"\"\"Get sites", "error: Mal formed message :type error: str. \"\"\" message =", "filename: file_path = \"%s/%s\" % (site_folder, filename) var_id = filename.replace(\".tsv.%s\"", "folder_path def get_sites_list(self): \"\"\"Get sites list\"\"\" return os.listdir(self.folder_path) FILE_STORAGE_SPACE =", "listdir(site_folder): is_file = isfile(join(site_folder, filename)) if is_file and file_prefix in", "file_prefix): \"\"\"Get series per file \"\"\" series = {} for", "create valid error message :param error: Mal formed message :type", "get_lines_number(file_path) return 0 def get_number_of_series(self, site_id, day_date): \"\"\"This method retrieve", "error in errors collection and in log :param message: Error", "= [] if exists(site_folder): for filename in listdir(site_folder): if \"%s.tsv\"", "} return None def get_last_error(self, site_id): \"\"\"Retrieve last error of", "class FileStorage(object): \"\"\"Class that help us to store and load", "(self.folder_path, site_id) series = [] if exists(site_folder): for filename in", "\"\"\"Get sites list\"\"\" return os.listdir(self.folder_path) FILE_STORAGE_SPACE = FileStorage(getattr(settings, SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER))", "exists(self.folder_path): makedirs(self.folder_path) def insert_series(self, series): \"\"\"Insert series in DB :param", "\"r\") as filed: temp_series = filed.readlines() for line in temp_series:", "KEY_VALUE, KEY_DATE from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER =", "published for a day_date \"\"\" element = KEY_ERROR site_folder =", "from django.conf import settings from django.utils import timezone from timevortex.utils.globals", "__init__(self, folder_path): \"\"\"Constructor\"\"\" self.folder_path = folder_path if not exists(self.folder_path): makedirs(self.folder_path)", "if is_file and file_prefix in new_filename: old_date, last_filename = update_last_file_name(file_prefix,", "published for a day_date \"\"\" site_folder = \"%s/%s\" % (self.folder_path,", "\"%s/%s\" % (self.folder_path, site_id) series = [] if exists(site_folder): for", "filed2: pass except IsADirectoryError: return None LOGGER.debug(last) # pylint: disable=I0011,W0631", "filename \"\"\" old_date = None last_filename = \"\" for new_filename", "a variable_id in site_id \"\"\" element = variable_id file_prefix =", "def update_last_file_name(file_prefix, old_date, last_filename, new_filename): \"\"\"Update last file name \"\"\"", "message = { KEY_VALUE: error, KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE:", "= \"/tmp/data/\" def get_lines_number(file_path): \"\"\"Get lines number \"\"\" return sum(1", "new_date = datetime.strptime(new_date, \"%Y-%m-%d\") if old_date is None or new_date", "is_file and file_prefix in new_filename: old_date, last_filename = update_last_file_name(file_prefix, old_date,", "\"rb\") as filed2: for last in filed2: pass except IsADirectoryError:", "(self.folder_path, site_id) filename = \"%s.tsv.%s\" % (element, day_date) file_path =", "in filed2: pass except IsADirectoryError: return None LOGGER.debug(last) # pylint:", "0 def get_number_of_series(self, site_id, day_date): \"\"\"This method retrieve number of", "variable_id): \"\"\"Retrieve last value of variable_id in site_id \"\"\" element", "a series :type series: dict. \"\"\" self.insert(series) def insert(self, message):", "= timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not exists(file_folder): makedirs(file_folder) raw_file = \"%s/%s.tsv.%s\"", "KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID", "for line in open(file_path)) def get_series_per_file(site_folder, file_prefix): \"\"\"Get series per", "except ValueError: LOGGER.error(\"Not right file\") return old_date, last_filename class FileStorage(object):", "dict. \"\"\" self.insert(series) def insert(self, message): \"\"\"Insert data in file\"\"\"", "= None last_filename = \"\" for new_filename in listdir(site_folder): is_file", "to store and load data over several file\"\"\" def __init__(self,", "site_folder = \"%s/%s\" % (self.folder_path, site_id) filename = \"%s.tsv.%s\" %", "element, KEY_SITE_ID: site_id, KEY_VALUE: last.split(\"\\t\")[0], KEY_DATE: last.split(\"\\t\")[1], KEY_DST_TIMEZONE: last.split(\"\\t\")[2], KEY_NON_DST_TIMEZONE:", "new_filename) return last_filename def update_last_file_name(file_prefix, old_date, last_filename, new_filename): \"\"\"Update last", "several file\"\"\" def __init__(self, folder_path): \"\"\"Constructor\"\"\" self.folder_path = folder_path if", "\"%s.tsv\" % KEY_ERROR not in filename and day_date in filename:", "series_numbers]) return series def set_data_location(self, folder_path): \"\"\"Set data folder space\"\"\"", "and exists(file_path): return get_lines_number(file_path) return 0 def get_number_of_series(self, site_id, day_date):", "filename = \"%s.tsv.%s\" % (element, day_date) file_path = \"%s/%s\" %", "and load data over several file\"\"\" def __init__(self, folder_path): \"\"\"Constructor\"\"\"", "space\"\"\" self.folder_path = folder_path def get_sites_list(self): \"\"\"Get sites list\"\"\" return", "import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\"", "pylint: disable=I0011,W0631 return { KEY_VARIABLE_ID: element, KEY_SITE_ID: site_id, KEY_VALUE: last.split(\"\\t\")[0],", "time import tzname from datetime import datetime import pytz import", "insert_error(self, message): \"\"\"Function that store error in errors collection and", "KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE: tzname[0] } LOGGER.error(error) self.insert(message) def get_series(self, site_id,", "for last in filed2: pass except IsADirectoryError: return None LOGGER.debug(last)", "import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from timevortex.utils.globals import", "datetime.utcnow().isoformat('T'), KEY_DST_TIMEZONE: tzname[1], KEY_NON_DST_TIMEZONE: tzname[0] } LOGGER.error(error) self.insert(message) def get_series(self,", "if len(array_line) >= 2: series[array_line[1]] = array_line[0] return series def", "open(last_filename, \"rb\") as filed2: for last in filed2: pass except", "is_file and file_prefix in filename: complete_filename = \"%s/%s\" % (site_folder,", "(self.folder_path, message[KEY_SITE_ID]) file_date = timezone.localtime( dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not exists(file_folder): makedirs(file_folder)", "SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\" def get_lines_number(file_path): \"\"\"Get", "def get_series_per_file(site_folder, file_prefix): \"\"\"Get series per file \"\"\" series =", "def get_last_file_name(site_folder, file_prefix): \"\"\"Get last filename \"\"\" old_date = None", "exists(site_folder): for filename in listdir(site_folder): if \"%s.tsv\" % KEY_ERROR not", "as filed: temp_series = filed.readlines() for line in temp_series: array_line", "\"\"\" old_date = None last_filename = \"\" for new_filename in", "= { KEY_VALUE: error, KEY_VARIABLE_ID: KEY_ERROR, KEY_SITE_ID: SYSTEM_SITE_ID, KEY_DATE: datetime.utcnow().isoformat('T'),", "pass except IsADirectoryError: return None LOGGER.debug(last) # pylint: disable=I0011,W0631 last", "in log :param message: Error to insert in DB :type", "message :type error: str. \"\"\" message = { KEY_VALUE: error,", "isfile, join, exists from time import tzname from datetime import", "= {} return series def get_last_series(self, site_id, variable_id): \"\"\"Retrieve last", "\"\"\"Function that store error in errors collection and in log", "= {} for filename in listdir(site_folder): is_file = isfile(join(site_folder, filename))", "folder space\"\"\" self.folder_path = folder_path def get_sites_list(self): \"\"\"Get sites list\"\"\"", "number of error published for a day_date \"\"\" element =", "last = last.decode(\"utf-8\").replace(\"\\n\", \"\") # pylint: disable=I0011,W0631 return { KEY_VARIABLE_ID:", "(site_folder, filename) var_id = filename.replace(\".tsv.%s\" % day_date, \"\") series_numbers =", "data over several file\"\"\" def __init__(self, folder_path): \"\"\"Constructor\"\"\" self.folder_path =", "from os.path import isfile, join, exists from time import tzname", "in listdir(site_folder): is_file = isfile(join(site_folder, filename)) if is_file and file_prefix", "return old_date, last_filename class FileStorage(object): \"\"\"Class that help us to", "site_id, variable_id): \"\"\"Retrieve all series for a variable_id in site_id", "return last_filename def update_last_file_name(file_prefix, old_date, last_filename, new_filename): \"\"\"Update last file", "\"\") series_numbers = get_lines_number(file_path) series.append([var_id, series_numbers]) return series def set_data_location(self,", "\"\"\"Insert series in DB :param series: Representation of a series", "from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER", "file name \"\"\" try: new_date = new_filename.replace(file_prefix, \"\") new_date =", "filename)) if is_file and file_prefix in filename: complete_filename = \"%s/%s\"", "and day_date in filename: file_path = \"%s/%s\" % (site_folder, filename)", "#!/usr/bin/python3 # -*- coding: utf8 -*- # -*- Mode: Python;", "open(file_path)) def get_series_per_file(site_folder, file_prefix): \"\"\"Get series per file \"\"\" series", "store and load data over several file\"\"\" def __init__(self, folder_path):", "error message :param error: Mal formed message :type error: str.", "dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime(\"%Y-%m-%d\") if not exists(file_folder): makedirs(file_folder) raw_file = \"%s/%s.tsv.%s\" % (", "def get_number_of_series(self, site_id, day_date): \"\"\"This method retrieve number of series", "join, exists from time import tzname from datetime import datetime", "array_line = line.split(\"\\t\") if len(array_line) >= 2: series[array_line[1]] = array_line[0]", "message[KEY_DATE], message[KEY_DST_TIMEZONE], message[KEY_NON_DST_TIMEZONE])) extracted.close() def insert_error(self, message): \"\"\"Function that store", "filename: complete_filename = \"%s/%s\" % (site_folder, filename) with open(complete_filename, \"r\")", "= \"SETTINGS_FILE_STORAGE_FOLDER\" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = \"/tmp/data/\" def get_lines_number(file_path): \"\"\"Get lines number", "folder_path): \"\"\"Constructor\"\"\" self.folder_path = folder_path if not exists(self.folder_path): makedirs(self.folder_path) def", "file_date) extracted = open(raw_file, \"a+\") extracted.write(\"%s\\t%s\\t%s\\t%s\\n\" % ( message[KEY_VALUE], message[KEY_DATE],", "-*- # -*- Mode: Python; py-indent-offset: 4 -*- \"\"\"File storage", "timevortex project\"\"\" import os from os import listdir, makedirs from", "file\") return old_date, last_filename class FileStorage(object): \"\"\"Class that help us", "try: new_date = new_filename.replace(file_prefix, \"\") new_date = datetime.strptime(new_date, \"%Y-%m-%d\") if", "from django.utils import timezone from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID," ]
[ "log_entry = self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig = { 'version': 1,", "do_reply(update, text, keyboard=None): if keyboard is None: markup = ReplyKeyboardRemove()", "Filters) from redis import Redis from tg_logging import create_logger from", "[ RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)), RegexHandler('^Показать ответ$',", "format_answer, format_question QUESTION, ATTEMPT = range(2) def main(): class LoggerTelegramBot(logging.Handler):", "вопрос)$', partial(handle_new_question, rediser)), RegexHandler('^Показать ответ$', partial(display_answer, rediser)), MessageHandler(Filters.text, partial(handle_attempt, rediser))],", "'handlers': { 'handler': { '()': LoggerTelegramBot, 'formatter': 'formatter' } },", "Начинаем?', [['Да!']]) return QUESTION def handle_new_question(rediser, bot, update): new_question, new_answer", "MessageHandler, RegexHandler, ConversationHandler, Filters) from redis import Redis from tg_logging", "telegram.ext import (Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters) from redis", "partial(display_answer, rediser)), MessageHandler(Filters.text, partial(handle_attempt, rediser))], }, fallbacks=[CommandHandler('cancel', do_exit)] ) dp.add_handler(conv_handler)", "new_answer = get_question_and_answer() chat_id = update.message.chat_id rediser.set(chat_id, new_answer) do_reply(update, format_question(new_question))", "в чате-викторине! Начинаем?', [['Да!']]) return QUESTION def handle_new_question(rediser, bot, update):", "rediser = Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD')) updater = Updater(token_tg)", "tg_logging import create_logger from quiz_tools import get_question_and_answer, format_answer, format_question QUESTION,", "ответ$', partial(display_answer, rediser)), MessageHandler(Filters.text, partial(handle_attempt, rediser))], }, fallbacks=[CommandHandler('cancel', do_exit)] )", "do_reply(update, text, reply_keyboard) return ATTEMPT def do_exit(bot, update): text =", "logging import logging.config from functools import partial from dotenv import", "emit(self, record): log_entry = self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig = {", "'formatter': 'formatter' } }, 'loggers': { 'tg_logger': { 'handlers': ['handler'],", "ATTEMPT def display_answer(rediser, bot, update): chat_id = update.message.chat_id answer =", "do_exit), RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)), RegexHandler('^Показать ответ$', partial(display_answer, rediser)),", "def start(bot, update): do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?', [['Да!']])", "rediser.get(chat_id).decode() do_reply(update, answer, [['Новый вопрос', 'Выход']]) return QUESTION def handle_attempt(rediser,", "answer, [['Новый вопрос', 'Выход']]) return QUESTION def handle_attempt(rediser, bot, update):", "format_question QUESTION, ATTEMPT = range(2) def main(): class LoggerTelegramBot(logging.Handler): def", "<reponame>olegush/quiz-bot<filename>main_tg.py import os import logging import logging.config from functools import", "раз.' reply_keyboard = [['Показать ответ', 'Другой вопрос', 'Выход']] do_reply(update, text,", "= [['Показать ответ', 'Другой вопрос', 'Выход']] do_reply(update, text, reply_keyboard) return", "text = 'До скорой встречи! Желаете начать заново? Жмите /start'", "%(message)s' } } } load_dotenv() chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN') bot =", "chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN') bot = Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger = logging.getLogger('tg_logger')", "'level': 'INFO' } }, 'formatters': { 'formatter': { 'format': '%(asctime)s", "'format': '%(asctime)s - %(levelname)s - %(message)s' } } } load_dotenv()", "resize_keyboard=True) return update.message.reply_text(text, reply_markup=markup) def start(bot, update): do_reply(update, 'Привет знатоку", "= ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return update.message.reply_text(text, reply_markup=markup) def start(bot, update): do_reply(update,", "'handlers': ['handler'], 'level': 'INFO' } }, 'formatters': { 'formatter': {", "ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return update.message.reply_text(text, reply_markup=markup) def start(bot, update): do_reply(update, 'Привет", "'()': LoggerTelegramBot, 'formatter': 'formatter' } }, 'loggers': { 'tg_logger': {", "= update.message.chat_id answer = rediser.get(chat_id).decode() do_reply(update, answer, [['Новый вопрос', 'Выход']])", "new_question, new_answer = get_question_and_answer() chat_id = update.message.chat_id rediser.set(chat_id, new_answer) do_reply(update,", "== format_answer(answer): text = 'Правильно! \\n\\n {}'.format(answer) reply_keyboard = [['Новый", "'Выход']]) return QUESTION def handle_attempt(rediser, bot, update): chat_id = update.message.chat_id", "text = 'Правильно! \\n\\n {}'.format(answer) reply_keyboard = [['Новый вопрос', 'Выход']]", "\\n\\n {}'.format(answer) reply_keyboard = [['Новый вопрос', 'Выход']] else: text =", "}, 'formatters': { 'formatter': { 'format': '%(asctime)s - %(levelname)s -", "logging.getLogger('tg_logger') handler = LoggerTelegramBot() logger.addHandler(handler) rediser = Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'),", "updater.start_polling() updater.idle() def do_reply(update, text, keyboard=None): if keyboard is None:", "update): do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?', [['Да!']]) return QUESTION", "do_reply(update, format_question(new_question)) return ATTEMPT def display_answer(rediser, bot, update): chat_id =", "%(levelname)s - %(message)s' } } } load_dotenv() chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN')", "[['Новый вопрос', 'Выход']]) return QUESTION def handle_attempt(rediser, bot, update): chat_id", "import logging.config from functools import partial from dotenv import load_dotenv", "display_answer(rediser, bot, update): chat_id = update.message.chat_id answer = rediser.get(chat_id).decode() do_reply(update,", "- %(message)s' } } } load_dotenv() chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN') bot", "{}'.format(answer) reply_keyboard = [['Новый вопрос', 'Выход']] else: text = 'Неверно!", "[['Новый вопрос', 'Выход']] else: text = 'Неверно! Попробуйте еще раз.'", "if attempt == format_answer(answer): text = 'Правильно! \\n\\n {}'.format(answer) reply_keyboard", "return update.message.reply_text(text, reply_markup=markup) def start(bot, update): do_reply(update, 'Привет знатоку в", "Желаете начать заново? Жмите /start' do_reply(update, text) return ConversationHandler.END if", "{ '()': LoggerTelegramBot, 'formatter': 'formatter' } }, 'loggers': { 'tg_logger':", "def do_exit(bot, update): text = 'До скорой встречи! Желаете начать", "= 'До скорой встречи! Желаете начать заново? Жмите /start' do_reply(update,", "RegexHandler, ConversationHandler, Filters) from redis import Redis from tg_logging import", "'formatters': { 'formatter': { 'format': '%(asctime)s - %(levelname)s - %(message)s'", "partial(handle_attempt, rediser))], }, fallbacks=[CommandHandler('cancel', do_exit)] ) dp.add_handler(conv_handler) updater.start_polling() updater.idle() def", "handler = LoggerTelegramBot() logger.addHandler(handler) rediser = Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0,", "start)], states={ QUESTION: [ RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text, partial(handle_new_question, rediser))], ATTEMPT:", "handle_new_question(rediser, bot, update): new_question, new_answer = get_question_and_answer() chat_id = update.message.chat_id", "text, keyboard=None): if keyboard is None: markup = ReplyKeyboardRemove() return", "partial from dotenv import load_dotenv from telegram import Bot, ReplyKeyboardMarkup,", "встречи! Желаете начать заново? Жмите /start' do_reply(update, text) return ConversationHandler.END", "return update.message.reply_text(text, reply_markup=markup) markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return update.message.reply_text(text, reply_markup=markup)", "QUESTION def handle_new_question(rediser, bot, update): new_question, new_answer = get_question_and_answer() chat_id", "= update.message.chat_id attempt = update.message.text.strip().lower() answer = rediser.get(chat_id).decode() if attempt", "MessageHandler(Filters.text, partial(handle_new_question, rediser))], ATTEMPT: [ RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый вопрос|Другой вопрос)$',", "RegexHandler('^Показать ответ$', partial(display_answer, rediser)), MessageHandler(Filters.text, partial(handle_attempt, rediser))], }, fallbacks=[CommandHandler('cancel', do_exit)]", "update.message.reply_text(text, reply_markup=markup) markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return update.message.reply_text(text, reply_markup=markup) def", "= update.message.text.strip().lower() answer = rediser.get(chat_id).decode() if attempt == format_answer(answer): text", "[['Да!']]) return QUESTION def handle_new_question(rediser, bot, update): new_question, new_answer =", "1, 'handlers': { 'handler': { '()': LoggerTelegramBot, 'formatter': 'formatter' }", "'formatter' } }, 'loggers': { 'tg_logger': { 'handlers': ['handler'], 'level':", "ответ', 'Другой вопрос', 'Выход']] do_reply(update, text, reply_keyboard) return ATTEMPT def", "import Redis from tg_logging import create_logger from quiz_tools import get_question_and_answer,", "[['Показать ответ', 'Другой вопрос', 'Выход']] do_reply(update, text, reply_keyboard) return ATTEMPT", "= Updater(token_tg) dp = updater.dispatcher logger.info(dp) conv_handler = ConversationHandler( entry_points=[CommandHandler('start',", "'formatter': { 'format': '%(asctime)s - %(levelname)s - %(message)s' } }", "} } } load_dotenv() chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN') bot = Bot(token=os.getenv('TOKEN_TG'))", "logger.addHandler(handler) rediser = Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD')) updater =", "Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD')) updater = Updater(token_tg) dp =", "attempt == format_answer(answer): text = 'Правильно! \\n\\n {}'.format(answer) reply_keyboard =", "answer = rediser.get(chat_id).decode() do_reply(update, answer, [['Новый вопрос', 'Выход']]) return QUESTION", "functools import partial from dotenv import load_dotenv from telegram import", "/start' do_reply(update, text) return ConversationHandler.END if __name__ == '__main__': main()", "os import logging import logging.config from functools import partial from", "from quiz_tools import get_question_and_answer, format_answer, format_question QUESTION, ATTEMPT = range(2)", "do_exit(bot, update): text = 'До скорой встречи! Желаете начать заново?", "text = 'Неверно! Попробуйте еще раз.' reply_keyboard = [['Показать ответ',", "bot = Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger = logging.getLogger('tg_logger') handler = LoggerTelegramBot()", "ReplyKeyboardRemove from telegram.ext import (Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters)", "} } load_dotenv() chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN') bot = Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig)", "load_dotenv() chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN') bot = Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger =", "if keyboard is None: markup = ReplyKeyboardRemove() return update.message.reply_text(text, reply_markup=markup)", "None: markup = ReplyKeyboardRemove() return update.message.reply_text(text, reply_markup=markup) markup = ReplyKeyboardMarkup(keyboard,", "RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)), RegexHandler('^Показать ответ$', partial(display_answer,", "import load_dotenv from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext", "update.message.chat_id attempt = update.message.text.strip().lower() answer = rediser.get(chat_id).decode() if attempt ==", "return ATTEMPT def do_exit(bot, update): text = 'До скорой встречи!", "do_reply(update, answer, [['Новый вопрос', 'Выход']]) return QUESTION def handle_attempt(rediser, bot,", "RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text, partial(handle_new_question, rediser))], ATTEMPT: [ RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый", "= ConversationHandler( entry_points=[CommandHandler('start', start)], states={ QUESTION: [ RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text,", "from redis import Redis from tg_logging import create_logger from quiz_tools", "LoggerTelegramBot(logging.Handler): def emit(self, record): log_entry = self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig", "conv_handler = ConversationHandler( entry_points=[CommandHandler('start', start)], states={ QUESTION: [ RegexHandler('^Выход$', do_exit),", "RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)), RegexHandler('^Показать ответ$', partial(display_answer, rediser)), MessageHandler(Filters.text,", "rediser)), MessageHandler(Filters.text, partial(handle_attempt, rediser))], }, fallbacks=[CommandHandler('cancel', do_exit)] ) dp.add_handler(conv_handler) updater.start_polling()", "MessageHandler(Filters.text, partial(handle_attempt, rediser))], }, fallbacks=[CommandHandler('cancel', do_exit)] ) dp.add_handler(conv_handler) updater.start_polling() updater.idle()", "(Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters) from redis import Redis", "logging.config.dictConfig(dictLogConfig) logger = logging.getLogger('tg_logger') handler = LoggerTelegramBot() logger.addHandler(handler) rediser =", "знатоку в чате-викторине! Начинаем?', [['Да!']]) return QUESTION def handle_new_question(rediser, bot,", "update): chat_id = update.message.chat_id answer = rediser.get(chat_id).decode() do_reply(update, answer, [['Новый", "= rediser.get(chat_id).decode() do_reply(update, answer, [['Новый вопрос', 'Выход']]) return QUESTION def", "update.message.chat_id answer = rediser.get(chat_id).decode() do_reply(update, answer, [['Новый вопрос', 'Выход']]) return", "'tg_logger': { 'handlers': ['handler'], 'level': 'INFO' } }, 'formatters': {", "'Выход']] do_reply(update, text, reply_keyboard) return ATTEMPT def do_exit(bot, update): text", "import os import logging import logging.config from functools import partial", "create_logger from quiz_tools import get_question_and_answer, format_answer, format_question QUESTION, ATTEMPT =", "class LoggerTelegramBot(logging.Handler): def emit(self, record): log_entry = self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry)", "= { 'version': 1, 'handlers': { 'handler': { '()': LoggerTelegramBot,", "{ 'format': '%(asctime)s - %(levelname)s - %(message)s' } } }", "Updater(token_tg) dp = updater.dispatcher logger.info(dp) conv_handler = ConversationHandler( entry_points=[CommandHandler('start', start)],", "чате-викторине! Начинаем?', [['Да!']]) return QUESTION def handle_new_question(rediser, bot, update): new_question,", "return QUESTION def handle_new_question(rediser, bot, update): new_question, new_answer = get_question_and_answer()", "updater = Updater(token_tg) dp = updater.dispatcher logger.info(dp) conv_handler = ConversationHandler(", "import get_question_and_answer, format_answer, format_question QUESTION, ATTEMPT = range(2) def main():", "}, fallbacks=[CommandHandler('cancel', do_exit)] ) dp.add_handler(conv_handler) updater.start_polling() updater.idle() def do_reply(update, text,", "keyboard=None): if keyboard is None: markup = ReplyKeyboardRemove() return update.message.reply_text(text,", "rediser))], ATTEMPT: [ RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)),", "= ReplyKeyboardRemove() return update.message.reply_text(text, reply_markup=markup) markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return", "ATTEMPT = range(2) def main(): class LoggerTelegramBot(logging.Handler): def emit(self, record):", "QUESTION def handle_attempt(rediser, bot, update): chat_id = update.message.chat_id attempt =", "import (Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters) from redis import", "заново? Жмите /start' do_reply(update, text) return ConversationHandler.END if __name__ ==", "dp = updater.dispatcher logger.info(dp) conv_handler = ConversationHandler( entry_points=[CommandHandler('start', start)], states={", "= Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD')) updater = Updater(token_tg) dp", "format_question(new_question)) return ATTEMPT def display_answer(rediser, bot, update): chat_id = update.message.chat_id", "do_exit), MessageHandler(Filters.text, partial(handle_new_question, rediser))], ATTEMPT: [ RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый вопрос|Другой", "markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return update.message.reply_text(text, reply_markup=markup) def start(bot, update):", "rediser))], }, fallbacks=[CommandHandler('cancel', do_exit)] ) dp.add_handler(conv_handler) updater.start_polling() updater.idle() def do_reply(update,", "'Привет знатоку в чате-викторине! Начинаем?', [['Да!']]) return QUESTION def handle_new_question(rediser,", "= os.getenv('CHAT_ID_TG_ADMIN') bot = Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger = logging.getLogger('tg_logger') handler", "начать заново? Жмите /start' do_reply(update, text) return ConversationHandler.END if __name__", "bot, update): new_question, new_answer = get_question_and_answer() chat_id = update.message.chat_id rediser.set(chat_id,", "update.message.chat_id rediser.set(chat_id, new_answer) do_reply(update, format_question(new_question)) return ATTEMPT def display_answer(rediser, bot,", "record): log_entry = self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig = { 'version':", "port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD')) updater = Updater(token_tg) dp = updater.dispatcher logger.info(dp)", "def main(): class LoggerTelegramBot(logging.Handler): def emit(self, record): log_entry = self.format(record)", "password=os.getenv('REDIS_PWD')) updater = Updater(token_tg) dp = updater.dispatcher logger.info(dp) conv_handler =", "import logging import logging.config from functools import partial from dotenv", "{ 'handler': { '()': LoggerTelegramBot, 'formatter': 'formatter' } }, 'loggers':", "ATTEMPT def do_exit(bot, update): text = 'До скорой встречи! Желаете", "bot, update): chat_id = update.message.chat_id answer = rediser.get(chat_id).decode() do_reply(update, answer,", "dictLogConfig = { 'version': 1, 'handlers': { 'handler': { '()':", "}, 'loggers': { 'tg_logger': { 'handlers': ['handler'], 'level': 'INFO' }", "range(2) def main(): class LoggerTelegramBot(logging.Handler): def emit(self, record): log_entry =", "= Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger = logging.getLogger('tg_logger') handler = LoggerTelegramBot() logger.addHandler(handler)", "get_question_and_answer() chat_id = update.message.chat_id rediser.set(chat_id, new_answer) do_reply(update, format_question(new_question)) return ATTEMPT", "вопрос', 'Выход']] else: text = 'Неверно! Попробуйте еще раз.' reply_keyboard", "= self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig = { 'version': 1, 'handlers':", "from functools import partial from dotenv import load_dotenv from telegram", "{ 'formatter': { 'format': '%(asctime)s - %(levelname)s - %(message)s' }", "- %(levelname)s - %(message)s' } } } load_dotenv() chat_id_tg_admin =", "QUESTION: [ RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text, partial(handle_new_question, rediser))], ATTEMPT: [ RegexHandler('^Выход$',", "= updater.dispatcher logger.info(dp) conv_handler = ConversationHandler( entry_points=[CommandHandler('start', start)], states={ QUESTION:", "text=log_entry) dictLogConfig = { 'version': 1, 'handlers': { 'handler': {", "ATTEMPT: [ RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)), RegexHandler('^Показать", "def handle_new_question(rediser, bot, update): new_question, new_answer = get_question_and_answer() chat_id =", "LoggerTelegramBot() logger.addHandler(handler) rediser = Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD')) updater", "['handler'], 'level': 'INFO' } }, 'formatters': { 'formatter': { 'format':", "LoggerTelegramBot, 'formatter': 'formatter' } }, 'loggers': { 'tg_logger': { 'handlers':", "host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD')) updater = Updater(token_tg) dp = updater.dispatcher", "return ATTEMPT def display_answer(rediser, bot, update): chat_id = update.message.chat_id answer", "= 'Неверно! Попробуйте еще раз.' reply_keyboard = [['Показать ответ', 'Другой", "'version': 1, 'handlers': { 'handler': { '()': LoggerTelegramBot, 'formatter': 'formatter'", "{ 'handlers': ['handler'], 'level': 'INFO' } }, 'formatters': { 'formatter':", "get_question_and_answer, format_answer, format_question QUESTION, ATTEMPT = range(2) def main(): class", "fallbacks=[CommandHandler('cancel', do_exit)] ) dp.add_handler(conv_handler) updater.start_polling() updater.idle() def do_reply(update, text, keyboard=None):", "= LoggerTelegramBot() logger.addHandler(handler) rediser = Redis( host=os.getenv('REDIS_HOST'), port=os.getenv('REDIS_PORT'), db=0, password=os.getenv('REDIS_PWD'))", "partial(handle_new_question, rediser))], ATTEMPT: [ RegexHandler('^Выход$', do_exit), RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question,", "is None: markup = ReplyKeyboardRemove() return update.message.reply_text(text, reply_markup=markup) markup =", "= [['Новый вопрос', 'Выход']] else: text = 'Неверно! Попробуйте еще", "'Правильно! \\n\\n {}'.format(answer) reply_keyboard = [['Новый вопрос', 'Выход']] else: text", "new_answer) do_reply(update, format_question(new_question)) return ATTEMPT def display_answer(rediser, bot, update): chat_id", "Redis from tg_logging import create_logger from quiz_tools import get_question_and_answer, format_answer,", "Жмите /start' do_reply(update, text) return ConversationHandler.END if __name__ == '__main__':", "reply_keyboard) return ATTEMPT def do_exit(bot, update): text = 'До скорой", "else: text = 'Неверно! Попробуйте еще раз.' reply_keyboard = [['Показать", "def do_reply(update, text, keyboard=None): if keyboard is None: markup =", "'handler': { '()': LoggerTelegramBot, 'formatter': 'formatter' } }, 'loggers': {", "} }, 'formatters': { 'formatter': { 'format': '%(asctime)s - %(levelname)s", "handle_attempt(rediser, bot, update): chat_id = update.message.chat_id attempt = update.message.text.strip().lower() answer", "= update.message.chat_id rediser.set(chat_id, new_answer) do_reply(update, format_question(new_question)) return ATTEMPT def display_answer(rediser,", "Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext import (Updater, CommandHandler, MessageHandler, RegexHandler,", "dp.add_handler(conv_handler) updater.start_polling() updater.idle() def do_reply(update, text, keyboard=None): if keyboard is", "ConversationHandler, Filters) from redis import Redis from tg_logging import create_logger", "start(bot, update): do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?', [['Да!']]) return", "update): text = 'До скорой встречи! Желаете начать заново? Жмите", "ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext import (Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler,", "import partial from dotenv import load_dotenv from telegram import Bot,", "db=0, password=os.getenv('REDIS_PWD')) updater = Updater(token_tg) dp = updater.dispatcher logger.info(dp) conv_handler", "answer = rediser.get(chat_id).decode() if attempt == format_answer(answer): text = 'Правильно!", "os.getenv('CHAT_ID_TG_ADMIN') bot = Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger = logging.getLogger('tg_logger') handler =", "logging.config from functools import partial from dotenv import load_dotenv from", "rediser.set(chat_id, new_answer) do_reply(update, format_question(new_question)) return ATTEMPT def display_answer(rediser, bot, update):", "update.message.text.strip().lower() answer = rediser.get(chat_id).decode() if attempt == format_answer(answer): text =", "вопрос', 'Выход']]) return QUESTION def handle_attempt(rediser, bot, update): chat_id =", "self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig = { 'version': 1, 'handlers': {", "CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters) from redis import Redis from", "from telegram.ext import (Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters) from", "states={ QUESTION: [ RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text, partial(handle_new_question, rediser))], ATTEMPT: [", "'loggers': { 'tg_logger': { 'handlers': ['handler'], 'level': 'INFO' } },", "QUESTION, ATTEMPT = range(2) def main(): class LoggerTelegramBot(logging.Handler): def emit(self,", "updater.dispatcher logger.info(dp) conv_handler = ConversationHandler( entry_points=[CommandHandler('start', start)], states={ QUESTION: [", "reply_keyboard = [['Показать ответ', 'Другой вопрос', 'Выход']] do_reply(update, text, reply_keyboard)", "'Другой вопрос', 'Выход']] do_reply(update, text, reply_keyboard) return ATTEMPT def do_exit(bot,", "from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext import (Updater,", "updater.idle() def do_reply(update, text, keyboard=None): if keyboard is None: markup", "return QUESTION def handle_attempt(rediser, bot, update): chat_id = update.message.chat_id attempt", "update): chat_id = update.message.chat_id attempt = update.message.text.strip().lower() answer = rediser.get(chat_id).decode()", ") dp.add_handler(conv_handler) updater.start_polling() updater.idle() def do_reply(update, text, keyboard=None): if keyboard", "text, reply_keyboard) return ATTEMPT def do_exit(bot, update): text = 'До", "bot, update): chat_id = update.message.chat_id attempt = update.message.text.strip().lower() answer =", "from dotenv import load_dotenv from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove", "markup = ReplyKeyboardRemove() return update.message.reply_text(text, reply_markup=markup) markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)", "from tg_logging import create_logger from quiz_tools import get_question_and_answer, format_answer, format_question", "Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger = logging.getLogger('tg_logger') handler = LoggerTelegramBot() logger.addHandler(handler) rediser", "logger = logging.getLogger('tg_logger') handler = LoggerTelegramBot() logger.addHandler(handler) rediser = Redis(", "import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext import (Updater, CommandHandler, MessageHandler,", "rediser)), RegexHandler('^Показать ответ$', partial(display_answer, rediser)), MessageHandler(Filters.text, partial(handle_attempt, rediser))], }, fallbacks=[CommandHandler('cancel',", "do_exit)] ) dp.add_handler(conv_handler) updater.start_polling() updater.idle() def do_reply(update, text, keyboard=None): if", "= range(2) def main(): class LoggerTelegramBot(logging.Handler): def emit(self, record): log_entry", "ReplyKeyboardRemove() return update.message.reply_text(text, reply_markup=markup) markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return update.message.reply_text(text,", "chat_id = update.message.chat_id answer = rediser.get(chat_id).decode() do_reply(update, answer, [['Новый вопрос',", "quiz_tools import get_question_and_answer, format_answer, format_question QUESTION, ATTEMPT = range(2) def", "chat_id = update.message.chat_id attempt = update.message.text.strip().lower() answer = rediser.get(chat_id).decode() if", "main(): class LoggerTelegramBot(logging.Handler): def emit(self, record): log_entry = self.format(record) bot.send_message(chat_id=chat_id_tg_admin,", "attempt = update.message.text.strip().lower() answer = rediser.get(chat_id).decode() if attempt == format_answer(answer):", "rediser.get(chat_id).decode() if attempt == format_answer(answer): text = 'Правильно! \\n\\n {}'.format(answer)", "format_answer(answer): text = 'Правильно! \\n\\n {}'.format(answer) reply_keyboard = [['Новый вопрос',", "chat_id = update.message.chat_id rediser.set(chat_id, new_answer) do_reply(update, format_question(new_question)) return ATTEMPT def", "def handle_attempt(rediser, bot, update): chat_id = update.message.chat_id attempt = update.message.text.strip().lower()", "[ RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text, partial(handle_new_question, rediser))], ATTEMPT: [ RegexHandler('^Выход$', do_exit),", "def emit(self, record): log_entry = self.format(record) bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig =", "do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?', [['Да!']]) return QUESTION def", "'Выход']] else: text = 'Неверно! Попробуйте еще раз.' reply_keyboard =", "reply_markup=markup) def start(bot, update): do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?',", "вопрос|Другой вопрос)$', partial(handle_new_question, rediser)), RegexHandler('^Показать ответ$', partial(display_answer, rediser)), MessageHandler(Filters.text, partial(handle_attempt,", "скорой встречи! Желаете начать заново? Жмите /start' do_reply(update, text) return", "redis import Redis from tg_logging import create_logger from quiz_tools import", "ConversationHandler( entry_points=[CommandHandler('start', start)], states={ QUESTION: [ RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text, partial(handle_new_question,", "= rediser.get(chat_id).decode() if attempt == format_answer(answer): text = 'Правильно! \\n\\n", "'До скорой встречи! Желаете начать заново? Жмите /start' do_reply(update, text)", "update): new_question, new_answer = get_question_and_answer() chat_id = update.message.chat_id rediser.set(chat_id, new_answer)", "{ 'tg_logger': { 'handlers': ['handler'], 'level': 'INFO' } }, 'formatters':", "bot.send_message(chat_id=chat_id_tg_admin, text=log_entry) dictLogConfig = { 'version': 1, 'handlers': { 'handler':", "dotenv import load_dotenv from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from", "partial(handle_new_question, rediser)), RegexHandler('^Показать ответ$', partial(display_answer, rediser)), MessageHandler(Filters.text, partial(handle_attempt, rediser))], },", "reply_keyboard = [['Новый вопрос', 'Выход']] else: text = 'Неверно! Попробуйте", "def display_answer(rediser, bot, update): chat_id = update.message.chat_id answer = rediser.get(chat_id).decode()", "update.message.reply_text(text, reply_markup=markup) def start(bot, update): do_reply(update, 'Привет знатоку в чате-викторине!", "telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext import (Updater, CommandHandler,", "{ 'version': 1, 'handlers': { 'handler': { '()': LoggerTelegramBot, 'formatter':", "Попробуйте еще раз.' reply_keyboard = [['Показать ответ', 'Другой вопрос', 'Выход']]", "еще раз.' reply_keyboard = [['Показать ответ', 'Другой вопрос', 'Выход']] do_reply(update,", "вопрос', 'Выход']] do_reply(update, text, reply_keyboard) return ATTEMPT def do_exit(bot, update):", "keyboard is None: markup = ReplyKeyboardRemove() return update.message.reply_text(text, reply_markup=markup) markup", "'%(asctime)s - %(levelname)s - %(message)s' } } } load_dotenv() chat_id_tg_admin", "} }, 'loggers': { 'tg_logger': { 'handlers': ['handler'], 'level': 'INFO'", "import create_logger from quiz_tools import get_question_and_answer, format_answer, format_question QUESTION, ATTEMPT", "logger.info(dp) conv_handler = ConversationHandler( entry_points=[CommandHandler('start', start)], states={ QUESTION: [ RegexHandler('^Выход$',", "load_dotenv from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext import", "} load_dotenv() chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN') bot = Bot(token=os.getenv('TOKEN_TG')) logging.config.dictConfig(dictLogConfig) logger", "= logging.getLogger('tg_logger') handler = LoggerTelegramBot() logger.addHandler(handler) rediser = Redis( host=os.getenv('REDIS_HOST'),", "reply_markup=markup) markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True) return update.message.reply_text(text, reply_markup=markup) def start(bot,", "'INFO' } }, 'formatters': { 'formatter': { 'format': '%(asctime)s -", "= 'Правильно! \\n\\n {}'.format(answer) reply_keyboard = [['Новый вопрос', 'Выход']] else:", "'Неверно! Попробуйте еще раз.' reply_keyboard = [['Показать ответ', 'Другой вопрос',", "= get_question_and_answer() chat_id = update.message.chat_id rediser.set(chat_id, new_answer) do_reply(update, format_question(new_question)) return", "entry_points=[CommandHandler('start', start)], states={ QUESTION: [ RegexHandler('^Выход$', do_exit), MessageHandler(Filters.text, partial(handle_new_question, rediser))]," ]
[ "obtain poor results. If the scale is too large (>50),", "be None, negative or positive integer. If None, then fourier", "inputs, training=None, mask=None): features = self.network(inputs) output = self.final_dense(features) return", "too large (>50), convergence will be fast but results will", "50]. use_bias: Boolean whether to use bias or not. #", "fourier_features class FourierFeatureMLP(tf.keras.Model): def __init__(self, units: int, final_units: int, gaussian_projection:", "Try grid search for scales in the range [10 -", "in the final layer. activation: Activation in the hidden layers.", "Number of layers in the network. gaussian_projection: Projection dimension for", "final_activation: str = \"linear\", num_layers: int = 1, gaussian_scale: float", "layer. Args: units: Number of hidden units in the intermediate", "of hidden units in the final layer. activation: Activation in", "layer. Can be None, negative or positive integer. If None,", "matrix (basic projection) without gaussian kernel. If >=1, uses gaussian", "to use bias or not. # References: - [Fourier Features", "bias_initializer='he_uniform', **kwargs)) self.network = tf.keras.Sequential(layers) self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias,", "MLP with optional FourierFeatureProjection layer. Args: units: Number of hidden", "in range(num_layers - 1): layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs)) self.network", "activation: str = 'relu', final_activation: str = \"linear\", num_layers: int", "layers. final_activation: Activation function of the final layer. num_layers: Number", "a multi-layer MLP with optional FourierFeatureProjection layer. Args: units: Number", "in the range [10 - 50]. use_bias: Boolean whether to", "High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to create", "dimension for the gaussian kernel in fourier feature projection layer.", "str = 'relu', final_activation: str = \"linear\", num_layers: int =", "Fourier Feature Projection model from the paper [Fourier Features Let", "# References: - [Fourier Features Let Networks Learn High Frequency", "If the scale is too large (>50), convergence will be", "convergence will slow down and obtain poor results. If the", "**kwargs): \"\"\" Fourier Feature Projection model from the paper [Fourier", ">=1, uses gaussian projection matrix of specified dim. gaussian_scale: Scale", "[] if gaussian_projection is not None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs", "class FourierFeatureMLP(tf.keras.Model): def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int],", "gaussian_projection is not None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs )) for", "scale is too small, convergence will slow down and obtain", "Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/).", "final_activation: Activation function of the final layer. num_layers: Number of", "small, convergence will slow down and obtain poor results. If", "gaussian_scale: float = 1.0, use_bias: bool = True, **kwargs): \"\"\"", "will be fast but results will be grainy. Try grid", "Features Let Networks Learn High Frequency Functions in Low Dimensional", "the final layer. activation: Activation in the hidden layers. final_activation:", "for the gaussian kernel in fourier feature projection layer. Can", "the gaussian kernel in fourier feature projection layer. Can be", "gaussian_projection: Optional[int], activation: str = 'relu', final_activation: str = \"linear\",", "of the final layer. num_layers: Number of layers in the", "and obtain poor results. If the scale is too large", "If the scale is too small, convergence will slow down", "if gaussian_projection is not None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs ))", "Boolean whether to use bias or not. # References: -", "Args: units: Number of hidden units in the intermediate layers.", "feature projection layer. Can be None, negative or positive integer.", "activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs) def call(self, inputs, training=None, mask=None): features", "the network. gaussian_projection: Projection dimension for the gaussian kernel in", "or positive integer. If None, then fourier feature map layer", "final_units: Number of hidden units in the final layer. activation:", "Optional from tf_fourier_features import fourier_features class FourierFeatureMLP(tf.keras.Model): def __init__(self, units:", "import fourier_features class FourierFeatureMLP(tf.keras.Model): def __init__(self, units: int, final_units: int,", "slow down and obtain poor results. If the scale is", "= 1.0, use_bias: bool = True, **kwargs): \"\"\" Fourier Feature", "integer. If None, then fourier feature map layer is not", "dim. gaussian_scale: Scale of the gaussian kernel in fourier feature", "use_bias=use_bias, bias_initializer='he_uniform', **kwargs) def call(self, inputs, training=None, mask=None): features =", "= 'relu', final_activation: str = \"linear\", num_layers: int = 1,", "_ in range(num_layers - 1): layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs))", "then fourier feature map layer is not used. If <=0,", "fourier feature projection layer. Note: If the scale is too", "= [] if gaussian_projection is not None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale,", "will slow down and obtain poor results. If the scale", "fast but results will be grainy. Try grid search for", "import tensorflow as tf from typing import Optional from tf_fourier_features", "typing import Optional from tf_fourier_features import fourier_features class FourierFeatureMLP(tf.keras.Model): def", "- 50]. use_bias: Boolean whether to use bias or not.", "Optional[int], activation: str = 'relu', final_activation: str = \"linear\", num_layers:", "in the network. gaussian_projection: Projection dimension for the gaussian kernel", "int, final_units: int, gaussian_projection: Optional[int], activation: str = 'relu', final_activation:", "Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/)", "use_bias: bool = True, **kwargs): \"\"\" Fourier Feature Projection model", "= \"linear\", num_layers: int = 1, gaussian_scale: float = 1.0,", "use_bias: Boolean whether to use bias or not. # References:", "gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs )) for _ in range(num_layers - 1):", "is not used. If <=0, uses identity matrix (basic projection)", "- 1): layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs)) self.network = tf.keras.Sequential(layers)", "the scale is too small, convergence will slow down and", "str = \"linear\", num_layers: int = 1, gaussian_scale: float =", "create a multi-layer MLP with optional FourierFeatureProjection layer. Args: units:", "None, negative or positive integer. If None, then fourier feature", "with optional FourierFeatureProjection layer. Args: units: Number of hidden units", "**kwargs)) self.network = tf.keras.Sequential(layers) self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform',", "units in the intermediate layers. final_units: Number of hidden units", "specified dim. gaussian_scale: Scale of the gaussian kernel in fourier", "= tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs) def call(self, inputs, training=None,", "use bias or not. # References: - [Fourier Features Let", "training=None, mask=None): features = self.network(inputs) output = self.final_dense(features) return output", "Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used", "search for scales in the range [10 - 50]. use_bias:", "matrix of specified dim. gaussian_scale: Scale of the gaussian kernel", "Can be None, negative or positive integer. If None, then", "grid search for scales in the range [10 - 50].", "Note: If the scale is too small, convergence will slow", "Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to create a multi-layer", "- [Fourier Features Let Networks Learn High Frequency Functions in", "True, **kwargs): \"\"\" Fourier Feature Projection model from the paper", "bias or not. # References: - [Fourier Features Let Networks", "Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to create a", "units: Number of hidden units in the intermediate layers. final_units:", "uses identity matrix (basic projection) without gaussian kernel. If >=1,", "Number of hidden units in the final layer. activation: Activation", "not used. If <=0, uses identity matrix (basic projection) without", "multi-layer MLP with optional FourierFeatureProjection layer. Args: units: Number of", "scale is too large (>50), convergence will be fast but", "for scales in the range [10 - 50]. use_bias: Boolean", "Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to", "the paper [Fourier Features Let Networks Learn High Frequency Functions", "intermediate layers. final_units: Number of hidden units in the final", "gaussian_projection: Projection dimension for the gaussian kernel in fourier feature", "gaussian_scale: Scale of the gaussian kernel in fourier feature projection", "too small, convergence will slow down and obtain poor results.", "Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs) layers = [] if gaussian_projection is", "tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs) def call(self, inputs, training=None, mask=None):", "References: - [Fourier Features Let Networks Learn High Frequency Functions", "def call(self, inputs, training=None, mask=None): features = self.network(inputs) output =", "layers. final_units: Number of hidden units in the final layer.", "int = 1, gaussian_scale: float = 1.0, use_bias: bool =", "def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int], activation: str", "large (>50), convergence will be fast but results will be", "the intermediate layers. final_units: Number of hidden units in the", "of the gaussian kernel in fourier feature projection layer. Note:", "projection layer. Can be None, negative or positive integer. If", "\"\"\" super().__init__(**kwargs) layers = [] if gaussian_projection is not None:", "units in the final layer. activation: Activation in the hidden", "from the paper [Fourier Features Let Networks Learn High Frequency", "fourier feature projection layer. Can be None, negative or positive", "<=0, uses identity matrix (basic projection) without gaussian kernel. If", "paper [Fourier Features Let Networks Learn High Frequency Functions in", "of specified dim. gaussian_scale: Scale of the gaussian kernel in", "layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs)) self.network = tf.keras.Sequential(layers) self.final_dense =", "self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs) def call(self, inputs,", "down and obtain poor results. If the scale is too", "optional FourierFeatureProjection layer. Args: units: Number of hidden units in", "Projection model from the paper [Fourier Features Let Networks Learn", "[10 - 50]. use_bias: Boolean whether to use bias or", "be fast but results will be grainy. Try grid search", "None, then fourier feature map layer is not used. If", "in the hidden layers. final_activation: Activation function of the final", "= True, **kwargs): \"\"\" Fourier Feature Projection model from the", "feature projection layer. Note: If the scale is too small,", "units: int, final_units: int, gaussian_projection: Optional[int], activation: str = 'relu',", "the range [10 - 50]. use_bias: Boolean whether to use", "gaussian kernel in fourier feature projection layer. Note: If the", "kernel. If >=1, uses gaussian projection matrix of specified dim.", "Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\"", "layers = [] if gaussian_projection is not None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection,", "from typing import Optional from tf_fourier_features import fourier_features class FourierFeatureMLP(tf.keras.Model):", "FourierFeatureProjection layer. Args: units: Number of hidden units in the", "Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to create a multi-layer MLP with optional FourierFeatureProjection", "(basic projection) without gaussian kernel. If >=1, uses gaussian projection", "the hidden layers. final_activation: Activation function of the final layer.", "projection) without gaussian kernel. If >=1, uses gaussian projection matrix", "not. # References: - [Fourier Features Let Networks Learn High", "FourierFeatureMLP(tf.keras.Model): def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int], activation:", "hidden units in the final layer. activation: Activation in the", "will be grainy. Try grid search for scales in the", "network. gaussian_projection: Projection dimension for the gaussian kernel in fourier", "Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs) layers = []", "bias_initializer='he_uniform', **kwargs) def call(self, inputs, training=None, mask=None): features = self.network(inputs)", "1.0, use_bias: bool = True, **kwargs): \"\"\" Fourier Feature Projection", "layers in the network. gaussian_projection: Projection dimension for the gaussian", "gaussian kernel in fourier feature projection layer. Can be None,", "the scale is too large (>50), convergence will be fast", "(>50), convergence will be fast but results will be grainy.", "= tf.keras.Sequential(layers) self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs) def", "positive integer. If None, then fourier feature map layer is", "**kwargs) def call(self, inputs, training=None, mask=None): features = self.network(inputs) output", "Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs) layers = [] if gaussian_projection", "Used to create a multi-layer MLP with optional FourierFeatureProjection layer.", "kernel in fourier feature projection layer. Can be None, negative", "negative or positive integer. If None, then fourier feature map", "High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs) layers", "Scale of the gaussian kernel in fourier feature projection layer.", "projection layer. Note: If the scale is too small, convergence", "function of the final layer. num_layers: Number of layers in", "num_layers: Number of layers in the network. gaussian_projection: Projection dimension", "identity matrix (basic projection) without gaussian kernel. If >=1, uses", "int, gaussian_projection: Optional[int], activation: str = 'relu', final_activation: str =", "used. If <=0, uses identity matrix (basic projection) without gaussian", "1): layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs)) self.network = tf.keras.Sequential(layers) self.final_dense", "fourier feature map layer is not used. If <=0, uses", "'relu', final_activation: str = \"linear\", num_layers: int = 1, gaussian_scale:", "\"linear\", num_layers: int = 1, gaussian_scale: float = 1.0, use_bias:", "Number of hidden units in the intermediate layers. final_units: Number", "gaussian projection matrix of specified dim. gaussian_scale: Scale of the", "the final layer. num_layers: Number of layers in the network.", "Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to create a multi-layer MLP with optional", "poor results. If the scale is too large (>50), convergence", "is too large (>50), convergence will be fast but results", "layer is not used. If <=0, uses identity matrix (basic", "__init__(self, units: int, final_units: int, gaussian_projection: Optional[int], activation: str =", "layer. activation: Activation in the hidden layers. final_activation: Activation function", "hidden layers. final_activation: Activation function of the final layer. num_layers:", "If <=0, uses identity matrix (basic projection) without gaussian kernel.", "grainy. Try grid search for scales in the range [10", "layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs )) for _ in range(num_layers -", "bool = True, **kwargs): \"\"\" Fourier Feature Projection model from", "gaussian_scale=gaussian_scale, **kwargs )) for _ in range(num_layers - 1): layers.append(tf.keras.layers.Dense(units,", "as tf from typing import Optional from tf_fourier_features import fourier_features", "be grainy. Try grid search for scales in the range", ")) for _ in range(num_layers - 1): layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias,", "use_bias=use_bias, bias_initializer='he_uniform', **kwargs)) self.network = tf.keras.Sequential(layers) self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation,", "activation: Activation in the hidden layers. final_activation: Activation function of", "in fourier feature projection layer. Can be None, negative or", "gaussian kernel. If >=1, uses gaussian projection matrix of specified", "final layer. num_layers: Number of layers in the network. gaussian_projection:", "If >=1, uses gaussian projection matrix of specified dim. gaussian_scale:", "float = 1.0, use_bias: bool = True, **kwargs): \"\"\" Fourier", "results. If the scale is too large (>50), convergence will", "[Fourier Features Let Networks Learn High Frequency Functions in Low", "kernel in fourier feature projection layer. Note: If the scale", "= 1, gaussian_scale: float = 1.0, use_bias: bool = True,", "without gaussian kernel. If >=1, uses gaussian projection matrix of", "results will be grainy. Try grid search for scales in", "scales in the range [10 - 50]. use_bias: Boolean whether", "Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs) layers = [] if gaussian_projection is not", "map layer is not used. If <=0, uses identity matrix", "layer. Note: If the scale is too small, convergence will", "is too small, convergence will slow down and obtain poor", "None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs )) for _ in range(num_layers", "of layers in the network. gaussian_projection: Projection dimension for the", "projection matrix of specified dim. gaussian_scale: Scale of the gaussian", "Projection dimension for the gaussian kernel in fourier feature projection", "layer. num_layers: Number of layers in the network. gaussian_projection: Projection", "in the intermediate layers. final_units: Number of hidden units in", "call(self, inputs, training=None, mask=None): features = self.network(inputs) output = self.final_dense(features)", "final_units: int, gaussian_projection: Optional[int], activation: str = 'relu', final_activation: str", "range(num_layers - 1): layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs)) self.network =", "to create a multi-layer MLP with optional FourierFeatureProjection layer. Args:", "or not. # References: - [Fourier Features Let Networks Learn", "\"\"\" Fourier Feature Projection model from the paper [Fourier Features", "1, gaussian_scale: float = 1.0, use_bias: bool = True, **kwargs):", "super().__init__(**kwargs) layers = [] if gaussian_projection is not None: layers.append(fourier_features.FourierFeatureProjection(", "self.network = tf.keras.Sequential(layers) self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs)", "Activation in the hidden layers. final_activation: Activation function of the", "If None, then fourier feature map layer is not used.", "activation=activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs)) self.network = tf.keras.Sequential(layers) self.final_dense = tf.keras.layers.Dense(final_units,", "tf.keras.Sequential(layers) self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation, use_bias=use_bias, bias_initializer='he_uniform', **kwargs) def call(self,", "hidden units in the intermediate layers. final_units: Number of hidden", "Feature Projection model from the paper [Fourier Features Let Networks", "the gaussian kernel in fourier feature projection layer. Note: If", "from tf_fourier_features import fourier_features class FourierFeatureMLP(tf.keras.Model): def __init__(self, units: int,", "tensorflow as tf from typing import Optional from tf_fourier_features import", "in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to create a multi-layer MLP", "Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/). Used to create a multi-layer MLP with", "in fourier feature projection layer. Note: If the scale is", "range [10 - 50]. use_bias: Boolean whether to use bias", "but results will be grainy. Try grid search for scales", "Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs)", "is not None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs )) for _", "not None: layers.append(fourier_features.FourierFeatureProjection( gaussian_projection=gaussian_projection, gaussian_scale=gaussian_scale, **kwargs )) for _ in", "import Optional from tf_fourier_features import fourier_features class FourierFeatureMLP(tf.keras.Model): def __init__(self,", "whether to use bias or not. # References: - [Fourier", "**kwargs )) for _ in range(num_layers - 1): layers.append(tf.keras.layers.Dense(units, activation=activation,", "for _ in range(num_layers - 1): layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias, bias_initializer='he_uniform',", "in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs) layers = [] if", "uses gaussian projection matrix of specified dim. gaussian_scale: Scale of", "num_layers: int = 1, gaussian_scale: float = 1.0, use_bias: bool", "tf from typing import Optional from tf_fourier_features import fourier_features class", "Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/) \"\"\" super().__init__(**kwargs) layers =", "final layer. activation: Activation in the hidden layers. final_activation: Activation", "Activation function of the final layer. num_layers: Number of layers", "of hidden units in the intermediate layers. final_units: Number of", "feature map layer is not used. If <=0, uses identity", "tf_fourier_features import fourier_features class FourierFeatureMLP(tf.keras.Model): def __init__(self, units: int, final_units:", "model from the paper [Fourier Features Let Networks Learn High", "convergence will be fast but results will be grainy. Try" ]
[ "= lremove(urlparse.urlparse(base).netloc, 'www.') for (url, referer) in clerk: url =", "self.visited = set() self.to_visit = set() def enqueue(self, url, referer):", "unicode(s, encoding='utf-8') else: return s def force_bytes(str_or_unicode): if isinstance(str_or_unicode, unicode):", "referer) in clerk: url = force_bytes(url) referer = force_bytes(referer) response", "get_pages(base, VisitOnlyOnceClerk(), session=session): rules = applicable_robot_rules(robots, response.url) robots_meta = canonical", "robotparser.RobotFileParser(base + '/robots.txt') robots.read() writer.writerow(headers) session = requests.session() session.verify =", "yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except NotHtmlException: pass def force_unicode(s): if", "= urlparse.urlparse(link) if lremove(parsed.netloc, 'www.') == base_domain: clerk.enqueue(link, url) yield", "import collections import time import requests from eek import robotparser", "def get_links(response): if 300 <= response.status_code < 400 and response.headers['location']:", "'www.') == base_domain: clerk.enqueue(link, url) yield referer, response def metadata_spider(base,", "lremove(parsed.netloc, 'www.') == base_domain: clerk.enqueue(link, url) yield referer, response def", ")[0] try: html = beautify(response) for i in html.find_all('a', href=True):", "lxml except ImportError: HTML_PARSER = None else: HTML_PARSER = 'lxml'", "IndexError): pass try: description = html.head.find_all('meta', {\"name\": \"description\"})[0]['content'] except (AttributeError,", "'www.') for (url, referer) in clerk: url = force_bytes(url) referer", "return match and match.group(1) or None class NotHtmlException(Exception): pass class", "from a Content-Type header. >>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html')", "False))[0] except NotHtmlException: pass def force_unicode(s): if isinstance(s, str): return", "get_pages(base, clerk, session=requests.session()): clerk.enqueue(base, base) base_domain = lremove(urlparse.urlparse(base).netloc, 'www.') for", "content_type: if not html_re.search(content_type): raise NotHtmlException encoding = encoding_from_content_type(content_type) else:", "charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html') >>> \"\"\" if not content_type: return", "canonical = html.find_all('link', {\"rel\": \"canonical\"})[0]['href'] except IndexError: pass try: title", "don't want to add a url multiple times just because", "applicable_robot_rules(robots, response.url) robots_meta = canonical = title = description =", "content_type: return None match = encoding_re.search(content_type) return match and match.group(1)", "{\"name\": \"robots\"})) try: canonical = html.find_all('link', {\"rel\": \"canonical\"})[0]['href'] except IndexError:", "VisitOnlyOnceClerk(), session=session): for link in get_links(response): print ' \"%s\" ->", "grep_spider(base, pattern, delay=0, insensitive=False, insecure=False): flags = 0 if insensitive:", "need to keep track of referers, but we don't want", "response.status_code, ])) if delay: time.sleep(delay) def grep_spider(base, pattern, delay=0, insensitive=False,", "import time import requests from eek import robotparser # this", "< 400 and response.headers['location']: # redirect yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'],", "= response.headers.get('content-type') if content_type: if not html_re.search(content_type): raise NotHtmlException encoding", "def force_bytes(str_or_unicode): if isinstance(str_or_unicode, unicode): return str_or_unicode.encode('utf-8') else: return str_or_unicode", "in get_pages(base, VisitOnlyOnceClerk(), session=session): for line in response.content.split('\\n'): if pattern.search(line):", "try: html = beautify(response) for i in html.find_all('a', href=True): yield", "(force_bytes(response.url), force_bytes(link)) if delay: time.sleep(delay) print \"}\" def applicable_robot_rules(robots, url):", "try: return BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding, ) except UnicodeEncodeError: raise", "def encoding_from_content_type(content_type): \"\"\" Extracts the charset from a Content-Type header.", "= 0 if insensitive: flags |= re.IGNORECASE pattern = re.compile(pattern,", "in get_links(response): parsed = urlparse.urlparse(link) if lremove(parsed.netloc, 'www.') == base_domain:", "insensitive=False, insecure=False): flags = 0 if insensitive: flags |= re.IGNORECASE", "urlparse import csv import sys import re import collections import", "We need to keep track of referers, but we don't", "','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta, canonical, referer, response.status_code, ])) if delay:", "VisitOnlyOnceClerk(), session=session): for line in response.content.split('\\n'): if pattern.search(line): print u'%s:%s'", "for i in html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except", "content_type = response.headers.get('content-type') if content_type: if not html_re.search(content_type): raise NotHtmlException", "for line in response.content.split('\\n'): if pattern.search(line): print u'%s:%s' % (force_unicode(response.url),", "base_domain: clerk.enqueue(link, url) yield referer, response def metadata_spider(base, output=sys.stdout, delay=0,", "pass try: description = html.head.find_all('meta', {\"name\": \"description\"})[0]['content'] except (AttributeError, IndexError,", "response = session.get( url, headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'}, allow_redirects=False,", "VisitOnlyOnceClerk(object): def __init__(self): self.visited = set() self.to_visit = set() def", "if not url in self.visited: self.to_visit.add(UrlTask((url, referer))) def __bool__(self): return", "pass def force_unicode(s): if isinstance(s, str): return unicode(s, encoding='utf-8') else:", "not insecure for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): rules", "'utf-8' >>> encoding_from_content_type('text/html') >>> \"\"\" if not content_type: return None", "\"robots\"})) try: canonical = html.find_all('link', {\"rel\": \"canonical\"})[0]['href'] except IndexError: pass", "title = description = keywords = '' try: html =", "writer = csv.writer(output) robots = robotparser.RobotFileParser(base + '/robots.txt') robots.read() writer.writerow(headers)", "re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re = re.compile(\"text/html\") headers = ['url', 'title', 'description', 'keywords',", "lremove('foo.com', 'www.') 'foo.com' \"\"\" if string.startswith(prefix): return string[len(prefix):] else: return", "enqueue(self, url, referer): if not url in self.visited: self.to_visit.add(UrlTask((url, referer)))", "{\"name\": \"description\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass try: keywords =", "% (force_bytes(response.url), force_bytes(link)) if delay: time.sleep(delay) print \"}\" def applicable_robot_rules(robots,", "= None try: return BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding, ) except", "= beautify(response) robots_meta = ','.join(i['content'] for i in html.find_all('meta', {\"name\":", "re.compile(\"text/html\") headers = ['url', 'title', 'description', 'keywords', 'allow', 'disallow', 'noindex',", "\"canonical\"})[0]['href'] except IndexError: pass try: title = html.head.title.contents[0] except (AttributeError,", "import lxml except ImportError: HTML_PARSER = None else: HTML_PARSER =", "string def beautify(response): content_type = response.headers.get('content-type') if content_type: if not", "pages \"\"\" def __hash__(self): return hash(self[0]) def __eq__(self, other): return", "robots = robotparser.RobotFileParser(base + '/robots.txt') robots.read() writer.writerow(headers) session = requests.session()", "delay: time.sleep(delay) def grep_spider(base, pattern, delay=0, insensitive=False, insecure=False): flags =", "time.sleep(delay) print \"}\" def applicable_robot_rules(robots, url): rules = collections.defaultdict(list) if", "html = beautify(response) robots_meta = ','.join(i['content'] for i in html.find_all('meta',", "= applicable_robot_rules(robots, response.url) robots_meta = canonical = title = description", "None match = encoding_re.search(content_type) return match and match.group(1) or None", "robotparser # this project's version from bs4 import BeautifulSoup try:", "robots_meta = ','.join(i['content'] for i in html.find_all('meta', {\"name\": \"robots\"})) try:", "response.url) robots_meta = canonical = title = description = keywords", "collections.defaultdict(list) if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*') for entry in robots.entries: rules[entry.allowance(url)].extend(entry.useragents) return", "(AttributeError, IndexError, KeyError): pass except NotHtmlException: pass writer.writerow(map(force_bytes, [ response.url,", "{\"name\": \"keywords\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass except NotHtmlException: pass", "description, keywords, ','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta, canonical, referer, response.status_code, ]))", "IndexError, KeyError): pass try: keywords = html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content'] except", "'meta robots', 'canonical', 'referer', 'status'] def encoding_from_content_type(content_type): \"\"\" Extracts the", "in get_pages(base, VisitOnlyOnceClerk(), session=session): for link in get_links(response): print '", "header. >>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html') >>> \"\"\" if", "insecure=False): writer = csv.writer(output) robots = robotparser.RobotFileParser(base + '/robots.txt') robots.read()", "project's version from bs4 import BeautifulSoup try: import lxml except", "in get_pages(base, VisitOnlyOnceClerk(), session=session): rules = applicable_robot_rules(robots, response.url) robots_meta =", ">>> lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com', 'www.') 'foo.com' \"\"\" if", "add a url multiple times just because it was referenced", "session=session): rules = applicable_robot_rules(robots, response.url) robots_meta = canonical = title", "encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re = re.compile(\"text/html\") headers = ['url', 'title',", "time import requests from eek import robotparser # this project's", "'canonical', 'referer', 'status'] def encoding_from_content_type(content_type): \"\"\" Extracts the charset from", "= encoding_from_content_type(content_type) else: encoding = None try: return BeautifulSoup( response.content,", "= set() self.to_visit = set() def enqueue(self, url, referer): if", "NotHtmlException encoding = encoding_from_content_type(content_type) else: encoding = None try: return", "pass writer.writerow(map(force_bytes, [ response.url, title, description, keywords, ','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']),", "return hash(self[0]) def __eq__(self, other): return self[0] == other[0] class", "if delay: time.sleep(delay) def grep_spider(base, pattern, delay=0, insensitive=False, insecure=False): flags", "= description = keywords = '' try: html = beautify(response)", "referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): for line in response.content.split('\\n'):", "insecure for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): rules =", "lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com', 'www.') 'foo.com' \"\"\" if string.startswith(prefix):", "if insensitive: flags |= re.IGNORECASE pattern = re.compile(pattern, flags) session", "set() def enqueue(self, url, referer): if not url in self.visited:", "HTML_PARSER = None else: HTML_PARSER = 'lxml' encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\")", "\"%s\" -> \"%s\";' % (force_bytes(response.url), force_bytes(link)) if delay: time.sleep(delay) print", "title, description, keywords, ','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta, canonical, referer, response.status_code,", "spider'}, allow_redirects=False, ) for link in get_links(response): parsed = urlparse.urlparse(link)", "sys import re import collections import time import requests from", "pass except NotHtmlException: pass writer.writerow(map(force_bytes, [ response.url, title, description, keywords,", "[ response.url, title, description, keywords, ','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta, canonical,", "string[len(prefix):] else: return string def beautify(response): content_type = response.headers.get('content-type') if", "try: html = beautify(response) robots_meta = ','.join(i['content'] for i in", "pattern.search(line): print u'%s:%s' % (force_unicode(response.url), force_unicode(line)) if delay: time.sleep(delay) def", "NotHtmlException(Exception): pass class UrlTask(tuple): \"\"\" We need to keep track", "force_bytes(referer) response = session.get( url, headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'},", "(url, referer) def lremove(string, prefix): \"\"\" Remove a prefix from", "this project's version from bs4 import BeautifulSoup try: import lxml", "html.head.find_all('meta', {\"name\": \"description\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass try: keywords", "collections import time import requests from eek import robotparser #", "def __iter__(self): while self.to_visit: (url, referer) = self.to_visit.pop() self.visited.add(url) yield", "import csv import sys import re import collections import time", "response in get_pages(base, VisitOnlyOnceClerk(), session=session): for link in get_links(response): print", "# redirect yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False) )[0] try: html", "def __hash__(self): return hash(self[0]) def __eq__(self, other): return self[0] ==", "NotHtmlException: pass writer.writerow(map(force_bytes, [ response.url, title, description, keywords, ','.join(rules['allow']), ','.join(rules['disallow']),", "= html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass except", "for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): for line in", "session.get( url, headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'}, allow_redirects=False, ) for", "time.sleep(delay) def grep_spider(base, pattern, delay=0, insensitive=False, insecure=False): flags = 0", "but we don't want to add a url multiple times", ">>> lremove('foo.com', 'www.') 'foo.com' \"\"\" if string.startswith(prefix): return string[len(prefix):] else:", "'disallow', 'noindex', 'meta robots', 'canonical', 'referer', 'status'] def encoding_from_content_type(content_type): \"\"\"", "get_links(response): parsed = urlparse.urlparse(link) if lremove(parsed.netloc, 'www.') == base_domain: clerk.enqueue(link,", "== base_domain: clerk.enqueue(link, url) yield referer, response def metadata_spider(base, output=sys.stdout,", "except IndexError: pass try: title = html.head.title.contents[0] except (AttributeError, IndexError):", "print \"digraph links {\" session = requests.session() session.verify = not", "except UnicodeEncodeError: raise NotHtmlException def get_links(response): if 300 <= response.status_code", "isinstance(str_or_unicode, unicode): return str_or_unicode.encode('utf-8') else: return str_or_unicode def get_pages(base, clerk,", "response.headers['location'], False) )[0] try: html = beautify(response) for i in", "robots_meta, canonical, referer, response.status_code, ])) if delay: time.sleep(delay) def grep_spider(base,", "encoding='utf-8') else: return s def force_bytes(str_or_unicode): if isinstance(str_or_unicode, unicode): return", "= re.compile(\"text/html\") headers = ['url', 'title', 'description', 'keywords', 'allow', 'disallow',", "re import collections import time import requests from eek import", "allow_redirects=False, ) for link in get_links(response): parsed = urlparse.urlparse(link) if", "(AttributeError, IndexError): pass try: description = html.head.find_all('meta', {\"name\": \"description\"})[0]['content'] except", "lremove(urlparse.urlparse(base).netloc, 'www.') for (url, referer) in clerk: url = force_bytes(url)", "'User-Agent': 'Fusionbox spider'}, allow_redirects=False, ) for link in get_links(response): parsed", "ImportError: HTML_PARSER = None else: HTML_PARSER = 'lxml' encoding_re =", "= not insecure for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session):", "session.verify = not insecure for referer, response in get_pages(base, VisitOnlyOnceClerk(),", "delay=0, insecure=False): print \"digraph links {\" session = requests.session() session.verify", "raise NotHtmlException def get_links(response): if 300 <= response.status_code < 400", "match and match.group(1) or None class NotHtmlException(Exception): pass class UrlTask(tuple):", "referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): rules = applicable_robot_rules(robots, response.url)", "'foo.com' >>> lremove('foo.com', 'www.') 'foo.com' \"\"\" if string.startswith(prefix): return string[len(prefix):]", "response.content.split('\\n'): if pattern.search(line): print u'%s:%s' % (force_unicode(response.url), force_unicode(line)) if delay:", "flags) session = requests.session() session.verify = not insecure for referer,", "if not content_type: return None match = encoding_re.search(content_type) return match", "description = html.head.find_all('meta', {\"name\": \"description\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass", "\"description\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass try: keywords = html.head.find_all('meta',", "','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta, canonical, referer, response.status_code, ])) if delay: time.sleep(delay)", "= re.compile(pattern, flags) session = requests.session() session.verify = not insecure", "to add a url multiple times just because it was", "the charset from a Content-Type header. >>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8'", "insecure for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): for line", "self.visited: self.to_visit.add(UrlTask((url, referer))) def __bool__(self): return bool(self.to_visit) def __iter__(self): while", "= ['url', 'title', 'description', 'keywords', 'allow', 'disallow', 'noindex', 'meta robots',", "yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False) )[0] try: html = beautify(response)", "referers, but we don't want to add a url multiple", "urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False) )[0] try: html = beautify(response) for", "= '' try: html = beautify(response) robots_meta = ','.join(i['content'] for", "__eq__(self, other): return self[0] == other[0] class VisitOnlyOnceClerk(object): def __init__(self):", "for link in get_links(response): parsed = urlparse.urlparse(link) if lremove(parsed.netloc, 'www.')", "html.head.title.contents[0] except (AttributeError, IndexError): pass try: description = html.head.find_all('meta', {\"name\":", "except (AttributeError, IndexError, KeyError): pass except NotHtmlException: pass writer.writerow(map(force_bytes, [", "delay=0, insensitive=False, insecure=False): flags = 0 if insensitive: flags |=", "insecure=False): flags = 0 if insensitive: flags |= re.IGNORECASE pattern", "= html.find_all('link', {\"rel\": \"canonical\"})[0]['href'] except IndexError: pass try: title =", "<filename>eek/spider.py import urlparse import csv import sys import re import", "def graphviz_spider(base, delay=0, insecure=False): print \"digraph links {\" session =", "just because it was referenced on multiple pages \"\"\" def", "encoding = encoding_from_content_type(content_type) else: encoding = None try: return BeautifulSoup(", "response.url, title, description, keywords, ','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta, canonical, referer,", "= force_bytes(referer) response = session.get( url, headers={'Referer': referer, 'User-Agent': 'Fusionbox", "pass try: title = html.head.title.contents[0] except (AttributeError, IndexError): pass try:", "get_pages(base, VisitOnlyOnceClerk(), session=session): for line in response.content.split('\\n'): if pattern.search(line): print", "-> \"%s\";' % (force_bytes(response.url), force_bytes(link)) if delay: time.sleep(delay) print \"}\"", "if string.startswith(prefix): return string[len(prefix):] else: return string def beautify(response): content_type", "it exists. >>> lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com', 'www.') 'foo.com'", "i in html.find_all('meta', {\"name\": \"robots\"})) try: canonical = html.find_all('link', {\"rel\":", "delay: time.sleep(delay) def graphviz_spider(base, delay=0, insecure=False): print \"digraph links {\"", "encoding_from_content_type('text/html') >>> \"\"\" if not content_type: return None match =", "isinstance(s, str): return unicode(s, encoding='utf-8') else: return s def force_bytes(str_or_unicode):", "html_re.search(content_type): raise NotHtmlException encoding = encoding_from_content_type(content_type) else: encoding = None", "= html.head.title.contents[0] except (AttributeError, IndexError): pass try: description = html.head.find_all('meta',", "keywords, ','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta, canonical, referer, response.status_code, ])) if", "= re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re = re.compile(\"text/html\") headers = ['url', 'title', 'description',", "return unicode(s, encoding='utf-8') else: return s def force_bytes(str_or_unicode): if isinstance(str_or_unicode,", "'lxml' encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re = re.compile(\"text/html\") headers = ['url',", "None try: return BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding, ) except UnicodeEncodeError:", "link in get_links(response): print ' \"%s\" -> \"%s\";' % (force_bytes(response.url),", "not insecure for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): for", "'www.') 'foo.com' >>> lremove('foo.com', 'www.') 'foo.com' \"\"\" if string.startswith(prefix): return", "try: keywords = html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content'] except (AttributeError, IndexError, KeyError):", "import robotparser # this project's version from bs4 import BeautifulSoup", "keywords = html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass", "html = beautify(response) for i in html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url,", "session=session): for link in get_links(response): print ' \"%s\" -> \"%s\";'", "response.content, features=HTML_PARSER, from_encoding=encoding, ) except UnicodeEncodeError: raise NotHtmlException def get_links(response):", "encoding_from_content_type(content_type) else: encoding = None try: return BeautifulSoup( response.content, features=HTML_PARSER,", "get_pages(base, VisitOnlyOnceClerk(), session=session): for link in get_links(response): print ' \"%s\"", "self.to_visit.pop() self.visited.add(url) yield (url, referer) def lremove(string, prefix): \"\"\" Remove", "= session.get( url, headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'}, allow_redirects=False, )", "' \"%s\" -> \"%s\";' % (force_bytes(response.url), force_bytes(link)) if delay: time.sleep(delay)", "= ','.join(i['content'] for i in html.find_all('meta', {\"name\": \"robots\"})) try: canonical", "def beautify(response): content_type = response.headers.get('content-type') if content_type: if not html_re.search(content_type):", "in get_links(response): print ' \"%s\" -> \"%s\";' % (force_bytes(response.url), force_bytes(link))", "if not html_re.search(content_type): raise NotHtmlException encoding = encoding_from_content_type(content_type) else: encoding", "url in self.visited: self.to_visit.add(UrlTask((url, referer))) def __bool__(self): return bool(self.to_visit) def", "clerk.enqueue(base, base) base_domain = lremove(urlparse.urlparse(base).netloc, 'www.') for (url, referer) in", "= set() def enqueue(self, url, referer): if not url in", "return BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding, ) except UnicodeEncodeError: raise NotHtmlException", "line in response.content.split('\\n'): if pattern.search(line): print u'%s:%s' % (force_unicode(response.url), force_unicode(line))", "\"\"\" We need to keep track of referers, but we", "try: description = html.head.find_all('meta', {\"name\": \"description\"})[0]['content'] except (AttributeError, IndexError, KeyError):", "'keywords', 'allow', 'disallow', 'noindex', 'meta robots', 'canonical', 'referer', 'status'] def", ">>> encoding_from_content_type('text/html') >>> \"\"\" if not content_type: return None match", "not url in self.visited: self.to_visit.add(UrlTask((url, referer))) def __bool__(self): return bool(self.to_visit)", ") for link in get_links(response): parsed = urlparse.urlparse(link) if lremove(parsed.netloc,", "beautify(response): content_type = response.headers.get('content-type') if content_type: if not html_re.search(content_type): raise", "re.IGNORECASE pattern = re.compile(pattern, flags) session = requests.session() session.verify =", "if pattern.search(line): print u'%s:%s' % (force_unicode(response.url), force_unicode(line)) if delay: time.sleep(delay)", "while self.to_visit: (url, referer) = self.to_visit.pop() self.visited.add(url) yield (url, referer)", "a Content-Type header. >>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html') >>>", "None class NotHtmlException(Exception): pass class UrlTask(tuple): \"\"\" We need to", "exists. >>> lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com', 'www.') 'foo.com' \"\"\"", "if delay: time.sleep(delay) def graphviz_spider(base, delay=0, insecure=False): print \"digraph links", "else: return string def beautify(response): content_type = response.headers.get('content-type') if content_type:", "response def metadata_spider(base, output=sys.stdout, delay=0, insecure=False): writer = csv.writer(output) robots", "it was referenced on multiple pages \"\"\" def __hash__(self): return", "rules = applicable_robot_rules(robots, response.url) robots_meta = canonical = title =", "'status'] def encoding_from_content_type(content_type): \"\"\" Extracts the charset from a Content-Type", "= robotparser.RobotFileParser(base + '/robots.txt') robots.read() writer.writerow(headers) session = requests.session() session.verify", "html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except NotHtmlException: pass def", "import BeautifulSoup try: import lxml except ImportError: HTML_PARSER = None", "clerk: url = force_bytes(url) referer = force_bytes(referer) response = session.get(", "class VisitOnlyOnceClerk(object): def __init__(self): self.visited = set() self.to_visit = set()", "\"}\" def applicable_robot_rules(robots, url): rules = collections.defaultdict(list) if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*')", "requests from eek import robotparser # this project's version from", "(url, referer) = self.to_visit.pop() self.visited.add(url) yield (url, referer) def lremove(string,", "import requests from eek import robotparser # this project's version", "keep track of referers, but we don't want to add", "session=requests.session()): clerk.enqueue(base, base) base_domain = lremove(urlparse.urlparse(base).netloc, 'www.') for (url, referer)", "get_links(response): print ' \"%s\" -> \"%s\";' % (force_bytes(response.url), force_bytes(link)) if", "from eek import robotparser # this project's version from bs4", "KeyError): pass try: keywords = html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content'] except (AttributeError,", "a string, if it exists. >>> lremove('www.foo.com', 'www.') 'foo.com' >>>", "__init__(self): self.visited = set() self.to_visit = set() def enqueue(self, url,", "pass try: keywords = html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content'] except (AttributeError, IndexError,", "referer, response.status_code, ])) if delay: time.sleep(delay) def grep_spider(base, pattern, delay=0,", "keywords = '' try: html = beautify(response) robots_meta = ','.join(i['content']", "in clerk: url = force_bytes(url) referer = force_bytes(referer) response =", "\"\"\" if not content_type: return None match = encoding_re.search(content_type) return", "def force_unicode(s): if isinstance(s, str): return unicode(s, encoding='utf-8') else: return", "force_bytes(str_or_unicode): if isinstance(str_or_unicode, unicode): return str_or_unicode.encode('utf-8') else: return str_or_unicode def", "= title = description = keywords = '' try: html", "if isinstance(s, str): return unicode(s, encoding='utf-8') else: return s def", "except NotHtmlException: pass writer.writerow(map(force_bytes, [ response.url, title, description, keywords, ','.join(rules['allow']),", "'www.') 'foo.com' \"\"\" if string.startswith(prefix): return string[len(prefix):] else: return string", "str): return unicode(s, encoding='utf-8') else: return s def force_bytes(str_or_unicode): if", "UrlTask(tuple): \"\"\" We need to keep track of referers, but", "html.find_all('meta', {\"name\": \"robots\"})) try: canonical = html.find_all('link', {\"rel\": \"canonical\"})[0]['href'] except", "i in html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except NotHtmlException:", "urlparse.urljoin(response.url, response.headers['location'], False) )[0] try: html = beautify(response) for i", "and response.headers['location']: # redirect yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False) )[0]", "pattern, delay=0, insensitive=False, insecure=False): flags = 0 if insensitive: flags", "encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html') >>> \"\"\" if not content_type:", "__bool__(self): return bool(self.to_visit) def __iter__(self): while self.to_visit: (url, referer) =", "== other[0] class VisitOnlyOnceClerk(object): def __init__(self): self.visited = set() self.to_visit", "string.startswith(prefix): return string[len(prefix):] else: return string def beautify(response): content_type =", "referer = force_bytes(referer) response = session.get( url, headers={'Referer': referer, 'User-Agent':", "on multiple pages \"\"\" def __hash__(self): return hash(self[0]) def __eq__(self,", "description = keywords = '' try: html = beautify(response) robots_meta", "or None class NotHtmlException(Exception): pass class UrlTask(tuple): \"\"\" We need", "delay: time.sleep(delay) print \"}\" def applicable_robot_rules(robots, url): rules = collections.defaultdict(list)", "try: import lxml except ImportError: HTML_PARSER = None else: HTML_PARSER", "force_unicode(line)) if delay: time.sleep(delay) def graphviz_spider(base, delay=0, insecure=False): print \"digraph", "in html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except NotHtmlException: pass", "'' try: html = beautify(response) robots_meta = ','.join(i['content'] for i", "referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): for link in get_links(response):", "% (force_unicode(response.url), force_unicode(line)) if delay: time.sleep(delay) def graphviz_spider(base, delay=0, insecure=False):", "html_re = re.compile(\"text/html\") headers = ['url', 'title', 'description', 'keywords', 'allow',", "else: return str_or_unicode def get_pages(base, clerk, session=requests.session()): clerk.enqueue(base, base) base_domain", "multiple times just because it was referenced on multiple pages", "import urlparse import csv import sys import re import collections", "session=session): for line in response.content.split('\\n'): if pattern.search(line): print u'%s:%s' %", "'title', 'description', 'keywords', 'allow', 'disallow', 'noindex', 'meta robots', 'canonical', 'referer',", "clerk.enqueue(link, url) yield referer, response def metadata_spider(base, output=sys.stdout, delay=0, insecure=False):", "and match.group(1) or None class NotHtmlException(Exception): pass class UrlTask(tuple): \"\"\"", "import re import collections import time import requests from eek", "= beautify(response) for i in html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'],", "['url', 'title', 'description', 'keywords', 'allow', 'disallow', 'noindex', 'meta robots', 'canonical',", "s def force_bytes(str_or_unicode): if isinstance(str_or_unicode, unicode): return str_or_unicode.encode('utf-8') else: return", "insecure for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): for link", "other[0] class VisitOnlyOnceClerk(object): def __init__(self): self.visited = set() self.to_visit =", "html.find_all('link', {\"rel\": \"canonical\"})[0]['href'] except IndexError: pass try: title = html.head.title.contents[0]", "def get_pages(base, clerk, session=requests.session()): clerk.enqueue(base, base) base_domain = lremove(urlparse.urlparse(base).netloc, 'www.')", "requests.session() session.verify = not insecure for referer, response in get_pages(base,", "charset from a Content-Type header. >>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>>", "base_domain = lremove(urlparse.urlparse(base).netloc, 'www.') for (url, referer) in clerk: url", "referer) def lremove(string, prefix): \"\"\" Remove a prefix from a", "return None match = encoding_re.search(content_type) return match and match.group(1) or", "try: canonical = html.find_all('link', {\"rel\": \"canonical\"})[0]['href'] except IndexError: pass try:", "force_bytes(url) referer = force_bytes(referer) response = session.get( url, headers={'Referer': referer,", "applicable_robot_rules(robots, url): rules = collections.defaultdict(list) if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*') for entry", "encoding_re.search(content_type) return match and match.group(1) or None class NotHtmlException(Exception): pass", "i['href'], False))[0] except NotHtmlException: pass def force_unicode(s): if isinstance(s, str):", "\"keywords\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass except NotHtmlException: pass writer.writerow(map(force_bytes,", "parsed = urlparse.urlparse(link) if lremove(parsed.netloc, 'www.') == base_domain: clerk.enqueue(link, url)", "','.join(i['content'] for i in html.find_all('meta', {\"name\": \"robots\"})) try: canonical =", "def enqueue(self, url, referer): if not url in self.visited: self.to_visit.add(UrlTask((url,", "in response.content.split('\\n'): if pattern.search(line): print u'%s:%s' % (force_unicode(response.url), force_unicode(line)) if", "return string def beautify(response): content_type = response.headers.get('content-type') if content_type: if", "referer, response def metadata_spider(base, output=sys.stdout, delay=0, insecure=False): writer = csv.writer(output)", "\"\"\" def __hash__(self): return hash(self[0]) def __eq__(self, other): return self[0]", "else: return s def force_bytes(str_or_unicode): if isinstance(str_or_unicode, unicode): return str_or_unicode.encode('utf-8')", "def lremove(string, prefix): \"\"\" Remove a prefix from a string,", "if it exists. >>> lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com', 'www.')", "if delay: time.sleep(delay) print \"}\" def applicable_robot_rules(robots, url): rules =", "print \"}\" def applicable_robot_rules(robots, url): rules = collections.defaultdict(list) if robots.default_entry:", "<= response.status_code < 400 and response.headers['location']: # redirect yield urlparse.urldefrag(", "headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'}, allow_redirects=False, ) for link in", "canonical, referer, response.status_code, ])) if delay: time.sleep(delay) def grep_spider(base, pattern,", "def applicable_robot_rules(robots, url): rules = collections.defaultdict(list) if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*') for", "= collections.defaultdict(list) if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*') for entry in robots.entries: rules[entry.allowance(url)].extend(entry.useragents)", "times just because it was referenced on multiple pages \"\"\"", "'Fusionbox spider'}, allow_redirects=False, ) for link in get_links(response): parsed =", "'/robots.txt') robots.read() writer.writerow(headers) session = requests.session() session.verify = not insecure", "300 <= response.status_code < 400 and response.headers['location']: # redirect yield", "response in get_pages(base, VisitOnlyOnceClerk(), session=session): for line in response.content.split('\\n'): if", "re.compile(pattern, flags) session = requests.session() session.verify = not insecure for", "writer.writerow(map(force_bytes, [ response.url, title, description, keywords, ','.join(rules['allow']), ','.join(rules['disallow']), ','.join(rules['noindex']), robots_meta,", "yield (url, referer) def lremove(string, prefix): \"\"\" Remove a prefix", "encoding = None try: return BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding, )", "html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass except NotHtmlException:", "url): rules = collections.defaultdict(list) if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*') for entry in", "def grep_spider(base, pattern, delay=0, insensitive=False, insecure=False): flags = 0 if", "pass class UrlTask(tuple): \"\"\" We need to keep track of", "rules = collections.defaultdict(list) if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*') for entry in robots.entries:", "0 if insensitive: flags |= re.IGNORECASE pattern = re.compile(pattern, flags)", "= None else: HTML_PARSER = 'lxml' encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re", "was referenced on multiple pages \"\"\" def __hash__(self): return hash(self[0])", "BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding, ) except UnicodeEncodeError: raise NotHtmlException def", "metadata_spider(base, output=sys.stdout, delay=0, insecure=False): writer = csv.writer(output) robots = robotparser.RobotFileParser(base", "\"\"\" Extracts the charset from a Content-Type header. >>> encoding_from_content_type('text/html;", "version from bs4 import BeautifulSoup try: import lxml except ImportError:", "features=HTML_PARSER, from_encoding=encoding, ) except UnicodeEncodeError: raise NotHtmlException def get_links(response): if", "we don't want to add a url multiple times just", "if lremove(parsed.netloc, 'www.') == base_domain: clerk.enqueue(link, url) yield referer, response", "time.sleep(delay) def graphviz_spider(base, delay=0, insecure=False): print \"digraph links {\" session", "= encoding_re.search(content_type) return match and match.group(1) or None class NotHtmlException(Exception):", "\"%s\";' % (force_bytes(response.url), force_bytes(link)) if delay: time.sleep(delay) print \"}\" def", "\"digraph links {\" session = requests.session() session.verify = not insecure", "insensitive: flags |= re.IGNORECASE pattern = re.compile(pattern, flags) session =", "urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except NotHtmlException: pass def force_unicode(s): if isinstance(s,", "def metadata_spider(base, output=sys.stdout, delay=0, insecure=False): writer = csv.writer(output) robots =", "if content_type: if not html_re.search(content_type): raise NotHtmlException encoding = encoding_from_content_type(content_type)", "robots_meta = canonical = title = description = keywords =", "(force_unicode(response.url), force_unicode(line)) if delay: time.sleep(delay) def graphviz_spider(base, delay=0, insecure=False): print", "in self.visited: self.to_visit.add(UrlTask((url, referer))) def __bool__(self): return bool(self.to_visit) def __iter__(self):", "match = encoding_re.search(content_type) return match and match.group(1) or None class", "href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except NotHtmlException: pass def force_unicode(s):", "url, referer): if not url in self.visited: self.to_visit.add(UrlTask((url, referer))) def", "response.headers.get('content-type') if content_type: if not html_re.search(content_type): raise NotHtmlException encoding =", "else: HTML_PARSER = 'lxml' encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re = re.compile(\"text/html\")", "= force_bytes(url) referer = force_bytes(referer) response = session.get( url, headers={'Referer':", "+ '/robots.txt') robots.read() writer.writerow(headers) session = requests.session() session.verify = not", "])) if delay: time.sleep(delay) def grep_spider(base, pattern, delay=0, insensitive=False, insecure=False):", "try: title = html.head.title.contents[0] except (AttributeError, IndexError): pass try: description", "track of referers, but we don't want to add a", "str_or_unicode.encode('utf-8') else: return str_or_unicode def get_pages(base, clerk, session=requests.session()): clerk.enqueue(base, base)", "'noindex', 'meta robots', 'canonical', 'referer', 'status'] def encoding_from_content_type(content_type): \"\"\" Extracts", "of referers, but we don't want to add a url", "string, if it exists. >>> lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com',", "flags |= re.IGNORECASE pattern = re.compile(pattern, flags) session = requests.session()", "self[0] == other[0] class VisitOnlyOnceClerk(object): def __init__(self): self.visited = set()", "IndexError, KeyError): pass except NotHtmlException: pass writer.writerow(map(force_bytes, [ response.url, title,", "from_encoding=encoding, ) except UnicodeEncodeError: raise NotHtmlException def get_links(response): if 300", "return str_or_unicode.encode('utf-8') else: return str_or_unicode def get_pages(base, clerk, session=requests.session()): clerk.enqueue(base,", "force_bytes(link)) if delay: time.sleep(delay) print \"}\" def applicable_robot_rules(robots, url): rules", "writer.writerow(headers) session = requests.session() session.verify = not insecure for referer,", "class UrlTask(tuple): \"\"\" We need to keep track of referers,", "flags = 0 if insensitive: flags |= re.IGNORECASE pattern =", "encoding_from_content_type(content_type): \"\"\" Extracts the charset from a Content-Type header. >>>", "eek import robotparser # this project's version from bs4 import", "because it was referenced on multiple pages \"\"\" def __hash__(self):", "bs4 import BeautifulSoup try: import lxml except ImportError: HTML_PARSER =", "yield referer, response def metadata_spider(base, output=sys.stdout, delay=0, insecure=False): writer =", "referenced on multiple pages \"\"\" def __hash__(self): return hash(self[0]) def", "from a string, if it exists. >>> lremove('www.foo.com', 'www.') 'foo.com'", "a prefix from a string, if it exists. >>> lremove('www.foo.com',", "except ImportError: HTML_PARSER = None else: HTML_PARSER = 'lxml' encoding_re", "headers = ['url', 'title', 'description', 'keywords', 'allow', 'disallow', 'noindex', 'meta", "a url multiple times just because it was referenced on", "urlparse.urlparse(link) if lremove(parsed.netloc, 'www.') == base_domain: clerk.enqueue(link, url) yield referer,", "get_links(response): if 300 <= response.status_code < 400 and response.headers['location']: #", "unicode): return str_or_unicode.encode('utf-8') else: return str_or_unicode def get_pages(base, clerk, session=requests.session()):", "insecure=False): print \"digraph links {\" session = requests.session() session.verify =", "self.to_visit = set() def enqueue(self, url, referer): if not url", "url multiple times just because it was referenced on multiple", "clerk, session=requests.session()): clerk.enqueue(base, base) base_domain = lremove(urlparse.urlparse(base).netloc, 'www.') for (url,", "(url, referer) in clerk: url = force_bytes(url) referer = force_bytes(referer)", "else: encoding = None try: return BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding,", "bool(self.to_visit) def __iter__(self): while self.to_visit: (url, referer) = self.to_visit.pop() self.visited.add(url)", "'referer', 'status'] def encoding_from_content_type(content_type): \"\"\" Extracts the charset from a", "= requests.session() session.verify = not insecure for referer, response in", "if robots.default_entry: rules[robots.default_entry.allowance(url)].append('*') for entry in robots.entries: rules[entry.allowance(url)].extend(entry.useragents) return rules", "NotHtmlException def get_links(response): if 300 <= response.status_code < 400 and", "UnicodeEncodeError: raise NotHtmlException def get_links(response): if 300 <= response.status_code <", "hash(self[0]) def __eq__(self, other): return self[0] == other[0] class VisitOnlyOnceClerk(object):", "except (AttributeError, IndexError, KeyError): pass try: keywords = html.head.find_all('meta', {\"name\":", "links {\" session = requests.session() session.verify = not insecure for", "\"\"\" if string.startswith(prefix): return string[len(prefix):] else: return string def beautify(response):", "None else: HTML_PARSER = 'lxml' encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re =", "return str_or_unicode def get_pages(base, clerk, session=requests.session()): clerk.enqueue(base, base) base_domain =", "return bool(self.to_visit) def __iter__(self): while self.to_visit: (url, referer) = self.to_visit.pop()", "= keywords = '' try: html = beautify(response) robots_meta =", "(AttributeError, IndexError, KeyError): pass try: keywords = html.head.find_all('meta', {\"name\": \"keywords\"})[0]['content']", "to keep track of referers, but we don't want to", "title = html.head.title.contents[0] except (AttributeError, IndexError): pass try: description =", "False) )[0] try: html = beautify(response) for i in html.find_all('a',", "from bs4 import BeautifulSoup try: import lxml except ImportError: HTML_PARSER", "for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): for link in", "u'%s:%s' % (force_unicode(response.url), force_unicode(line)) if delay: time.sleep(delay) def graphviz_spider(base, delay=0,", "400 and response.headers['location']: # redirect yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False)", "redirect yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False) )[0] try: html =", "if 300 <= response.status_code < 400 and response.headers['location']: # redirect", "str_or_unicode def get_pages(base, clerk, session=requests.session()): clerk.enqueue(base, base) base_domain = lremove(urlparse.urlparse(base).netloc,", "force_unicode(s): if isinstance(s, str): return unicode(s, encoding='utf-8') else: return s", "= self.to_visit.pop() self.visited.add(url) yield (url, referer) def lremove(string, prefix): \"\"\"", "\"\"\" Remove a prefix from a string, if it exists.", "VisitOnlyOnceClerk(), session=session): rules = applicable_robot_rules(robots, response.url) robots_meta = canonical =", "{\" session = requests.session() session.verify = not insecure for referer,", "output=sys.stdout, delay=0, insecure=False): writer = csv.writer(output) robots = robotparser.RobotFileParser(base +", ") except UnicodeEncodeError: raise NotHtmlException def get_links(response): if 300 <=", "referer))) def __bool__(self): return bool(self.to_visit) def __iter__(self): while self.to_visit: (url,", ">>> \"\"\" if not content_type: return None match = encoding_re.search(content_type)", "csv import sys import re import collections import time import", "lremove(string, prefix): \"\"\" Remove a prefix from a string, if", "set() self.to_visit = set() def enqueue(self, url, referer): if not", "not content_type: return None match = encoding_re.search(content_type) return match and", "# this project's version from bs4 import BeautifulSoup try: import", "'foo.com' \"\"\" if string.startswith(prefix): return string[len(prefix):] else: return string def", "response.headers['location']: # redirect yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False) )[0] try:", "not html_re.search(content_type): raise NotHtmlException encoding = encoding_from_content_type(content_type) else: encoding =", "self.to_visit.add(UrlTask((url, referer))) def __bool__(self): return bool(self.to_visit) def __iter__(self): while self.to_visit:", "return s def force_bytes(str_or_unicode): if isinstance(str_or_unicode, unicode): return str_or_unicode.encode('utf-8') else:", "for link in get_links(response): print ' \"%s\" -> \"%s\";' %", "|= re.IGNORECASE pattern = re.compile(pattern, flags) session = requests.session() session.verify", "response.status_code < 400 and response.headers['location']: # redirect yield urlparse.urldefrag( urlparse.urljoin(response.url,", "print u'%s:%s' % (force_unicode(response.url), force_unicode(line)) if delay: time.sleep(delay) def graphviz_spider(base,", "referer) = self.to_visit.pop() self.visited.add(url) yield (url, referer) def lremove(string, prefix):", "__hash__(self): return hash(self[0]) def __eq__(self, other): return self[0] == other[0]", "= csv.writer(output) robots = robotparser.RobotFileParser(base + '/robots.txt') robots.read() writer.writerow(headers) session", "csv.writer(output) robots = robotparser.RobotFileParser(base + '/robots.txt') robots.read() writer.writerow(headers) session =", "canonical = title = description = keywords = '' try:", "want to add a url multiple times just because it", "referer, 'User-Agent': 'Fusionbox spider'}, allow_redirects=False, ) for link in get_links(response):", ">>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html') >>> \"\"\" if not", "'description', 'keywords', 'allow', 'disallow', 'noindex', 'meta robots', 'canonical', 'referer', 'status']", "'allow', 'disallow', 'noindex', 'meta robots', 'canonical', 'referer', 'status'] def encoding_from_content_type(content_type):", "Extracts the charset from a Content-Type header. >>> encoding_from_content_type('text/html; charset=utf-8')", "return self[0] == other[0] class VisitOnlyOnceClerk(object): def __init__(self): self.visited =", "prefix from a string, if it exists. >>> lremove('www.foo.com', 'www.')", "for (url, referer) in clerk: url = force_bytes(url) referer =", "for i in html.find_all('meta', {\"name\": \"robots\"})) try: canonical = html.find_all('link',", "multiple pages \"\"\" def __hash__(self): return hash(self[0]) def __eq__(self, other):", "except (AttributeError, IndexError): pass try: description = html.head.find_all('meta', {\"name\": \"description\"})[0]['content']", "in html.find_all('meta', {\"name\": \"robots\"})) try: canonical = html.find_all('link', {\"rel\": \"canonical\"})[0]['href']", "def __init__(self): self.visited = set() self.to_visit = set() def enqueue(self,", "base) base_domain = lremove(urlparse.urlparse(base).netloc, 'www.') for (url, referer) in clerk:", "graphviz_spider(base, delay=0, insecure=False): print \"digraph links {\" session = requests.session()", "return string[len(prefix):] else: return string def beautify(response): content_type = response.headers.get('content-type')", "class NotHtmlException(Exception): pass class UrlTask(tuple): \"\"\" We need to keep", "robots.read() writer.writerow(headers) session = requests.session() session.verify = not insecure for", "__iter__(self): while self.to_visit: (url, referer) = self.to_visit.pop() self.visited.add(url) yield (url,", "beautify(response) for i in html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0]", "BeautifulSoup try: import lxml except ImportError: HTML_PARSER = None else:", "robots', 'canonical', 'referer', 'status'] def encoding_from_content_type(content_type): \"\"\" Extracts the charset", "match.group(1) or None class NotHtmlException(Exception): pass class UrlTask(tuple): \"\"\" We", "if isinstance(str_or_unicode, unicode): return str_or_unicode.encode('utf-8') else: return str_or_unicode def get_pages(base,", "url) yield referer, response def metadata_spider(base, output=sys.stdout, delay=0, insecure=False): writer", "response in get_pages(base, VisitOnlyOnceClerk(), session=session): rules = applicable_robot_rules(robots, response.url) robots_meta", "= 'lxml' encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re = re.compile(\"text/html\") headers =", "= html.head.find_all('meta', {\"name\": \"description\"})[0]['content'] except (AttributeError, IndexError, KeyError): pass try:", "Remove a prefix from a string, if it exists. >>>", "NotHtmlException: pass def force_unicode(s): if isinstance(s, str): return unicode(s, encoding='utf-8')", "session = requests.session() session.verify = not insecure for referer, response", "KeyError): pass except NotHtmlException: pass writer.writerow(map(force_bytes, [ response.url, title, description,", "for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session): rules = applicable_robot_rules(robots,", "prefix): \"\"\" Remove a prefix from a string, if it", "self.visited.add(url) yield (url, referer) def lremove(string, prefix): \"\"\" Remove a", "IndexError: pass try: title = html.head.title.contents[0] except (AttributeError, IndexError): pass", "delay=0, insecure=False): writer = csv.writer(output) robots = robotparser.RobotFileParser(base + '/robots.txt')", "beautify(response) robots_meta = ','.join(i['content'] for i in html.find_all('meta', {\"name\": \"robots\"}))", "link in get_links(response): parsed = urlparse.urlparse(link) if lremove(parsed.netloc, 'www.') ==", "self.to_visit: (url, referer) = self.to_visit.pop() self.visited.add(url) yield (url, referer) def", "','.join(rules['noindex']), robots_meta, canonical, referer, response.status_code, ])) if delay: time.sleep(delay) def", "def __eq__(self, other): return self[0] == other[0] class VisitOnlyOnceClerk(object): def", "{\"rel\": \"canonical\"})[0]['href'] except IndexError: pass try: title = html.head.title.contents[0] except", "except NotHtmlException: pass def force_unicode(s): if isinstance(s, str): return unicode(s,", "pattern = re.compile(pattern, flags) session = requests.session() session.verify = not", "url = force_bytes(url) referer = force_bytes(referer) response = session.get( url,", "raise NotHtmlException encoding = encoding_from_content_type(content_type) else: encoding = None try:", "print ' \"%s\" -> \"%s\";' % (force_bytes(response.url), force_bytes(link)) if delay:", "referer): if not url in self.visited: self.to_visit.add(UrlTask((url, referer))) def __bool__(self):", "import sys import re import collections import time import requests", "Content-Type header. >>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html') >>> \"\"\"", "HTML_PARSER = 'lxml' encoding_re = re.compile(\"charset\\s*=\\s*(\\S+?)(;|$)\") html_re = re.compile(\"text/html\") headers", "url, headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'}, allow_redirects=False, ) for link", "= canonical = title = description = keywords = ''", "other): return self[0] == other[0] class VisitOnlyOnceClerk(object): def __init__(self): self.visited", "def __bool__(self): return bool(self.to_visit) def __iter__(self): while self.to_visit: (url, referer)" ]
[ "plant, a numeric vector `diff` `cross - self` for each", "filename = 'zea_mays.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv'", "a numeric vector `diff` `cross - self` for each pair", "levels `1` `2` `3` `4` `cross` height of cross fertilized", "difference, `cross - self`). Later in the book (section 21),", "<NAME>. (1985) *Data: a collection of problems from many fields", "In the *Design of Experiments*, Fisher (1935) used these data", "`self` height of self fertilized plant, a numeric vector `diff`", "file or otherwise file will be downloaded and extracted there.", "pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns':", "self-fertilization, but otherwise grown under identical conditions. His goal was", "to directory which either stores file or otherwise file will", "test, treating each paired difference as having (randomly) either a", "5 columns and dictionary `metadata` of column headers (feature names).", "height of cross fertilized plant, a numeric vector `self` height", "as np import os import sys from observations.util import maybe_download_and_extract", "cross fertilized plant, a numeric vector `self` height of self", "'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0,", "student and research worker*. New York: Springer. Data retrieved from:", "rows and 5 columns and dictionary `metadata` of column headers", "under identical conditions. His goal was to demonstrate the greater", "1/8th) of the plants in each pair. In the *Design", "otherwise file will be downloaded and extracted there. Filename is", "pair <NAME>. (1876). *The Effect of Cross- and Self-fertilization in", "15 rows and 5 columns and dictionary `metadata` of column", "import print_function import csv import numpy as np import os", "the *Design of Experiments*, Fisher (1935) used these data to", "Pairs Darwin (1876) studied the growth of pairs of zea", "division from __future__ import print_function import csv import numpy as", "`2` `3` `4` `cross` height of cross fertilized plant, a", "for the student and research worker*. New York: Springer. Data", "example of a non-parametric permutation test, treating each paired difference", "(aka corn) seedlings, one produced by cross-fertilization and the other", "a non-parametric permutation test, treating each paired difference as having", "sign. A data frame with 15 observations on the following", "and dictionary `metadata` of column headers (feature names). \"\"\" import", "with 15 rows and 5 columns and dictionary `metadata` of", "file will be downloaded and extracted there. Filename is `zea_mays.csv`.", "with levels `1` `2` `3` `4` `cross` height of cross", "Cross- and Self-fertilization in the Vegetable Kingdom*, 2nd Ed. London:", "`4` `cross` height of cross fertilized plant, a numeric vector", "Ed. London: <NAME>. <NAME>. and <NAME>. (1985) *Data: a collection", "import numpy as np import os import sys from observations.util", "but otherwise grown under identical conditions. His goal was to", "2nd Ed. London: <NAME>. <NAME>. and <NAME>. (1985) *Data: a", "from __future__ import absolute_import from __future__ import division from __future__", "this data to illustrate an early example of a non-parametric", "<NAME>. <NAME>. and <NAME>. (1985) *Data: a collection of problems", "vector `pot` pot, a factor with levels `1` `2` `3`", "np.ndarray `x_train` with 15 rows and 5 columns and dictionary", "the following 4 variables. `pair` pair number, a numeric vector", "on the mean difference, `cross - self`). Later in the", "the nearest 1/8th) of the plants in each pair. In", "of self fertilized plant, a numeric vector `diff` `cross -", "`cross - self`). Later in the book (section 21), he", "in each pair. In the *Design of Experiments*, Fisher (1935)", "an early example of a non-parametric permutation test, treating each", "utf-8 -*- from __future__ import absolute_import from __future__ import division", "may (aka corn) seedlings, one produced by cross-fertilization and the", "`pot` pot, a factor with levels `1` `2` `3` `4`", "filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False) data =", "os import sys from observations.util import maybe_download_and_extract def zea_mays(path): \"\"\"Darwin's", "(randomly) either a positive or negative sign. A data frame", "which either stores file or otherwise file will be downloaded", "non-parametric permutation test, treating each paired difference as having (randomly)", "from observations.util import maybe_download_and_extract def zea_mays(path): \"\"\"Darwin's Heights of Cross-", "columns and dictionary `metadata` of column headers (feature names). \"\"\"", "May Pairs Darwin (1876) studied the growth of pairs of", "self` for each pair <NAME>. (1876). *The Effect of Cross-", "filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns}", "London: <NAME>. <NAME>. and <NAME>. (1985) *Data: a collection of", "import pandas as pd path = os.path.expanduser(path) filename = 'zea_mays.csv'", "Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path: str. Path to", "height (inches, to the nearest 1/8th) of the plants in", "paired t-test (well, a one-sample test on the mean difference,", "print_function import csv import numpy as np import os import", "His goal was to demonstrate the greater vigour of the", "conditions. His goal was to demonstrate the greater vigour of", "the other produced by self-fertilization, but otherwise grown under identical", "difference as having (randomly) either a positive or negative sign.", "to demonstrate the greater vigour of the cross-fertilized plants. The", "the mean difference, `cross - self`). Later in the book", "cross-fertilized plants. The data recorded are the final height (inches,", "a paired t-test (well, a one-sample test on the mean", "corn) seedlings, one produced by cross-fertilization and the other produced", "url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False) data = pd.read_csv(os.path.join(path,", "of Cross- and Self-fertilization in the Vegetable Kingdom*, 2nd Ed.", "__future__ import print_function import csv import numpy as np import", "of Experiments*, Fisher (1935) used these data to illustrate a", "following 4 variables. `pair` pair number, a numeric vector `pot`", "Heights of Cross- and Self-fertilized Zea May Pairs Darwin (1876)", "Path to directory which either stores file or otherwise file", "= 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False) data = pd.read_csv(os.path.join(path, filename),", "identical conditions. His goal was to demonstrate the greater vigour", "headers (feature names). \"\"\" import pandas as pd path =", "final height (inches, to the nearest 1/8th) of the plants", "Zea May Pairs Darwin (1876) studied the growth of pairs", "`x_train` with 15 rows and 5 columns and dictionary `metadata`", "plants. The data recorded are the final height (inches, to", "each paired difference as having (randomly) either a positive or", "path: str. Path to directory which either stores file or", "are the final height (inches, to the nearest 1/8th) of", "Self-fertilization in the Vegetable Kingdom*, 2nd Ed. London: <NAME>. <NAME>.", "illustrate an early example of a non-parametric permutation test, treating", "Kingdom*, 2nd Ed. London: <NAME>. <NAME>. and <NAME>. (1985) *Data:", "zea_mays(path): \"\"\"Darwin's Heights of Cross- and Self-fertilized Zea May Pairs", "(1935) used these data to illustrate a paired t-test (well,", "Later in the book (section 21), he used this data", "illustrate a paired t-test (well, a one-sample test on the", "retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path: str. Path to directory which", "import os import sys from observations.util import maybe_download_and_extract def zea_mays(path):", "problems from many fields for the student and research worker*.", "each pair. In the *Design of Experiments*, Fisher (1935) used", "test on the mean difference, `cross - self`). Later in", "- self` for each pair <NAME>. (1876). *The Effect of", "`cross - self` for each pair <NAME>. (1876). *The Effect", "of pairs of zea may (aka corn) seedlings, one produced", "from __future__ import division from __future__ import print_function import csv", "vector `diff` `cross - self` for each pair <NAME>. (1876).", "Returns: Tuple of np.ndarray `x_train` with 15 rows and 5", "he used this data to illustrate an early example of", "15 observations on the following 4 variables. `pair` pair number,", "from: `https://www.stat.cmu.edu/StatDat/` Args: path: str. Path to directory which either", "other produced by self-fertilization, but otherwise grown under identical conditions.", "pair. In the *Design of Experiments*, Fisher (1935) used these", "self fertilized plant, a numeric vector `diff` `cross - self`", "data to illustrate an early example of a non-parametric permutation", "produced by self-fertilization, but otherwise grown under identical conditions. His", "csv import numpy as np import os import sys from", "a positive or negative sign. A data frame with 15", "pd path = os.path.expanduser(path) filename = 'zea_mays.csv' if not os.path.exists(os.path.join(path,", "number, a numeric vector `pot` pot, a factor with levels", "extracted there. Filename is `zea_mays.csv`. Returns: Tuple of np.ndarray `x_train`", "- self`). Later in the book (section 21), he used", "(1876). *The Effect of Cross- and Self-fertilization in the Vegetable", "frame with 15 observations on the following 4 variables. `pair`", "in the Vegetable Kingdom*, 2nd Ed. London: <NAME>. <NAME>. and", "the book (section 21), he used this data to illustrate", "and <NAME>. (1985) *Data: a collection of problems from many", "having (randomly) either a positive or negative sign. A data", "negative sign. A data frame with 15 observations on the", "path = os.path.expanduser(path) filename = 'zea_mays.csv' if not os.path.exists(os.path.join(path, filename)):", "`https://www.stat.cmu.edu/StatDat/` Args: path: str. Path to directory which either stores", "= os.path.expanduser(path) filename = 'zea_mays.csv' if not os.path.exists(os.path.join(path, filename)): url", "maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True)", "(feature names). \"\"\" import pandas as pd path = os.path.expanduser(path)", "`zea_mays.csv`. Returns: Tuple of np.ndarray `x_train` with 15 rows and", "pair number, a numeric vector `pot` pot, a factor with", "Fisher (1935) used these data to illustrate a paired t-test", "plant, a numeric vector `self` height of self fertilized plant,", "`cross` height of cross fertilized plant, a numeric vector `self`", "resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values", "os.path.expanduser(path) filename = 'zea_mays.csv' if not os.path.exists(os.path.join(path, filename)): url =", "of a non-parametric permutation test, treating each paired difference as", "by self-fertilization, but otherwise grown under identical conditions. His goal", "was to demonstrate the greater vigour of the cross-fertilized plants.", "import sys from observations.util import maybe_download_and_extract def zea_mays(path): \"\"\"Darwin's Heights", "Effect of Cross- and Self-fertilization in the Vegetable Kingdom*, 2nd", "data to illustrate a paired t-test (well, a one-sample test", "import absolute_import from __future__ import division from __future__ import print_function", "York: Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path: str. Path", "of the cross-fertilized plants. The data recorded are the final", "a numeric vector `self` height of self fertilized plant, a", "of the plants in each pair. In the *Design of", "nearest 1/8th) of the plants in each pair. In the", "maybe_download_and_extract def zea_mays(path): \"\"\"Darwin's Heights of Cross- and Self-fertilized Zea", "(1876) studied the growth of pairs of zea may (aka", "as pd path = os.path.expanduser(path) filename = 'zea_mays.csv' if not", "import division from __future__ import print_function import csv import numpy", "of problems from many fields for the student and research", "url, save_file_name='zea_mays.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train", "cross-fertilization and the other produced by self-fertilization, but otherwise grown", "worker*. New York: Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path:", "and Self-fertilized Zea May Pairs Darwin (1876) studied the growth", "data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata", "(1985) *Data: a collection of problems from many fields for", "New York: Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path: str.", "collection of problems from many fields for the student and", "a factor with levels `1` `2` `3` `4` `cross` height", "a collection of problems from many fields for the student", "numpy as np import os import sys from observations.util import", "the cross-fertilized plants. The data recorded are the final height", "pairs of zea may (aka corn) seedlings, one produced by", "(well, a one-sample test on the mean difference, `cross -", "def zea_mays(path): \"\"\"Darwin's Heights of Cross- and Self-fertilized Zea May", "coding: utf-8 -*- from __future__ import absolute_import from __future__ import", "Cross- and Self-fertilized Zea May Pairs Darwin (1876) studied the", "if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv',", "Self-fertilized Zea May Pairs Darwin (1876) studied the growth of", "used this data to illustrate an early example of a", "either a positive or negative sign. A data frame with", "t-test (well, a one-sample test on the mean difference, `cross", "(inches, to the nearest 1/8th) of the plants in each", "4 variables. `pair` pair number, a numeric vector `pot` pot,", "*The Effect of Cross- and Self-fertilization in the Vegetable Kingdom*,", "stores file or otherwise file will be downloaded and extracted", "observations on the following 4 variables. `pair` pair number, a", "early example of a non-parametric permutation test, treating each paired", "<gh_stars>100-1000 # -*- coding: utf-8 -*- from __future__ import absolute_import", "(section 21), he used this data to illustrate an early", "in the book (section 21), he used this data to", "the greater vigour of the cross-fertilized plants. The data recorded", "directory which either stores file or otherwise file will be", "The data recorded are the final height (inches, to the", "names). \"\"\" import pandas as pd path = os.path.expanduser(path) filename", "pandas as pd path = os.path.expanduser(path) filename = 'zea_mays.csv' if", "numeric vector `pot` pot, a factor with levels `1` `2`", "each pair <NAME>. (1876). *The Effect of Cross- and Self-fertilization", "and research worker*. New York: Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/`", "`3` `4` `cross` height of cross fertilized plant, a numeric", "\"\"\"Darwin's Heights of Cross- and Self-fertilized Zea May Pairs Darwin", "vigour of the cross-fertilized plants. The data recorded are the", "and the other produced by self-fertilization, but otherwise grown under", "on the following 4 variables. `pair` pair number, a numeric", "numeric vector `diff` `cross - self` for each pair <NAME>.", "a one-sample test on the mean difference, `cross - self`).", "import csv import numpy as np import os import sys", "of np.ndarray `x_train` with 15 rows and 5 columns and", "one-sample test on the mean difference, `cross - self`). Later", "and Self-fertilization in the Vegetable Kingdom*, 2nd Ed. London: <NAME>.", "sys from observations.util import maybe_download_and_extract def zea_mays(path): \"\"\"Darwin's Heights of", "__future__ import absolute_import from __future__ import division from __future__ import", "greater vigour of the cross-fertilized plants. The data recorded are", "zea may (aka corn) seedlings, one produced by cross-fertilization and", "Filename is `zea_mays.csv`. Returns: Tuple of np.ndarray `x_train` with 15", "<NAME>. and <NAME>. (1985) *Data: a collection of problems from", "pot, a factor with levels `1` `2` `3` `4` `cross`", "observations.util import maybe_download_and_extract def zea_mays(path): \"\"\"Darwin's Heights of Cross- and", "from __future__ import print_function import csv import numpy as np", "factor with levels `1` `2` `3` `4` `cross` height of", "save_file_name='zea_mays.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train =", "treating each paired difference as having (randomly) either a positive", "fertilized plant, a numeric vector `diff` `cross - self` for", "21), he used this data to illustrate an early example", "either stores file or otherwise file will be downloaded and", "not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False)", "self`). Later in the book (section 21), he used this", "fields for the student and research worker*. New York: Springer.", "or otherwise file will be downloaded and extracted there. Filename", "grown under identical conditions. His goal was to demonstrate the", "of Cross- and Self-fertilized Zea May Pairs Darwin (1876) studied", "be downloaded and extracted there. Filename is `zea_mays.csv`. Returns: Tuple", "Darwin (1876) studied the growth of pairs of zea may", "there. Filename is `zea_mays.csv`. Returns: Tuple of np.ndarray `x_train` with", "for each pair <NAME>. (1876). *The Effect of Cross- and", "used these data to illustrate a paired t-test (well, a", "of cross fertilized plant, a numeric vector `self` height of", "the plants in each pair. In the *Design of Experiments*,", "os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False) data", "downloaded and extracted there. Filename is `zea_mays.csv`. Returns: Tuple of", "\"\"\" import pandas as pd path = os.path.expanduser(path) filename =", "otherwise grown under identical conditions. His goal was to demonstrate", "growth of pairs of zea may (aka corn) seedlings, one", "recorded are the final height (inches, to the nearest 1/8th)", "*Design of Experiments*, Fisher (1935) used these data to illustrate", "permutation test, treating each paired difference as having (randomly) either", "from many fields for the student and research worker*. New", "A data frame with 15 observations on the following 4", "'zea_mays.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url,", "import maybe_download_and_extract def zea_mays(path): \"\"\"Darwin's Heights of Cross- and Self-fertilized", "data recorded are the final height (inches, to the nearest", "vector `self` height of self fertilized plant, a numeric vector", "one produced by cross-fertilization and the other produced by self-fertilization,", "index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return", "many fields for the student and research worker*. New York:", "seedlings, one produced by cross-fertilization and the other produced by", "dictionary `metadata` of column headers (feature names). \"\"\" import pandas", "the growth of pairs of zea may (aka corn) seedlings,", "produced by cross-fertilization and the other produced by self-fertilization, but", "x_train = data.values metadata = {'columns': data.columns} return x_train, metadata", "`pair` pair number, a numeric vector `pot` pot, a factor", "__future__ import division from __future__ import print_function import csv import", "to illustrate an early example of a non-parametric permutation test,", "book (section 21), he used this data to illustrate an", "the student and research worker*. New York: Springer. Data retrieved", "column headers (feature names). \"\"\" import pandas as pd path", "fertilized plant, a numeric vector `self` height of self fertilized", "a numeric vector `pot` pot, a factor with levels `1`", "studied the growth of pairs of zea may (aka corn)", "and extracted there. Filename is `zea_mays.csv`. Returns: Tuple of np.ndarray", "the final height (inches, to the nearest 1/8th) of the", "# -*- coding: utf-8 -*- from __future__ import absolute_import from", "with 15 observations on the following 4 variables. `pair` pair", "to the nearest 1/8th) of the plants in each pair.", "Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path: str. Path to directory", "and 5 columns and dictionary `metadata` of column headers (feature", "<NAME>. (1876). *The Effect of Cross- and Self-fertilization in the", "Vegetable Kingdom*, 2nd Ed. London: <NAME>. <NAME>. and <NAME>. (1985)", "-*- from __future__ import absolute_import from __future__ import division from", "or negative sign. A data frame with 15 observations on", "Tuple of np.ndarray `x_train` with 15 rows and 5 columns", "by cross-fertilization and the other produced by self-fertilization, but otherwise", "is `zea_mays.csv`. Returns: Tuple of np.ndarray `x_train` with 15 rows", "numeric vector `self` height of self fertilized plant, a numeric", "str. Path to directory which either stores file or otherwise", "-*- coding: utf-8 -*- from __future__ import absolute_import from __future__", "will be downloaded and extracted there. Filename is `zea_mays.csv`. Returns:", "as having (randomly) either a positive or negative sign. A", "positive or negative sign. A data frame with 15 observations", "`1` `2` `3` `4` `cross` height of cross fertilized plant,", "absolute_import from __future__ import division from __future__ import print_function import", "= 'zea_mays.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path,", "*Data: a collection of problems from many fields for the", "data frame with 15 observations on the following 4 variables.", "these data to illustrate a paired t-test (well, a one-sample", "to illustrate a paired t-test (well, a one-sample test on", "research worker*. New York: Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args:", "Args: path: str. Path to directory which either stores file", "variables. `pair` pair number, a numeric vector `pot` pot, a", "parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train,", "of column headers (feature names). \"\"\" import pandas as pd", "of zea may (aka corn) seedlings, one produced by cross-fertilization", "`diff` `cross - self` for each pair <NAME>. (1876). *The", "plants in each pair. In the *Design of Experiments*, Fisher", "= pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata =", "height of self fertilized plant, a numeric vector `diff` `cross", "paired difference as having (randomly) either a positive or negative", "Experiments*, Fisher (1935) used these data to illustrate a paired", "demonstrate the greater vigour of the cross-fertilized plants. The data", "mean difference, `cross - self`). Later in the book (section", "goal was to demonstrate the greater vigour of the cross-fertilized", "the Vegetable Kingdom*, 2nd Ed. London: <NAME>. <NAME>. and <NAME>.", "`metadata` of column headers (feature names). \"\"\" import pandas as", "np import os import sys from observations.util import maybe_download_and_extract def" ]
[ "from .Line import Line @dataclass class Lines: \"\"\"Product / service", "List from dataclasses import dataclass from .Line import Line @dataclass", "typing import List from dataclasses import dataclass from .Line import", "/ service items :param merged_item_indicator: Indicates whether the data exchange", ":param merged_item_indicator: Indicates whether the data exchange contains merged line", "Indicates whether the data exchange contains merged line data due", "due to size reduction :param line: Product / service item", "from dataclasses import dataclass from .Line import Line @dataclass class", "@dataclass class Lines: \"\"\"Product / service items :param merged_item_indicator: Indicates", "items :param merged_item_indicator: Indicates whether the data exchange contains merged", "size reduction :param line: Product / service item \"\"\" merged_item_indicator:", "<filename>ois_api_client/v3_0/dto/Lines.py from typing import List from dataclasses import dataclass from", "dataclass from .Line import Line @dataclass class Lines: \"\"\"Product /", "exchange contains merged line data due to size reduction :param", ":param line: Product / service item \"\"\" merged_item_indicator: bool line:", "class Lines: \"\"\"Product / service items :param merged_item_indicator: Indicates whether", "data exchange contains merged line data due to size reduction", "import dataclass from .Line import Line @dataclass class Lines: \"\"\"Product", "service items :param merged_item_indicator: Indicates whether the data exchange contains", "contains merged line data due to size reduction :param line:", "data due to size reduction :param line: Product / service", "line: Product / service item \"\"\" merged_item_indicator: bool line: List[Line]", ".Line import Line @dataclass class Lines: \"\"\"Product / service items", "Line @dataclass class Lines: \"\"\"Product / service items :param merged_item_indicator:", "from typing import List from dataclasses import dataclass from .Line", "import List from dataclasses import dataclass from .Line import Line", "dataclasses import dataclass from .Line import Line @dataclass class Lines:", "reduction :param line: Product / service item \"\"\" merged_item_indicator: bool", "\"\"\"Product / service items :param merged_item_indicator: Indicates whether the data", "Lines: \"\"\"Product / service items :param merged_item_indicator: Indicates whether the", "to size reduction :param line: Product / service item \"\"\"", "import Line @dataclass class Lines: \"\"\"Product / service items :param", "merged line data due to size reduction :param line: Product", "merged_item_indicator: Indicates whether the data exchange contains merged line data", "the data exchange contains merged line data due to size", "line data due to size reduction :param line: Product /", "whether the data exchange contains merged line data due to" ]
[ "_, line = read_block(f) return doc, line def create_doc(data, out_folder,", "+ os.sep + \"ADI.ALL\" out_folder = \"test_index\" try: # averiguar", "\"\\n\") f.write(data[\"content\"] + \"\\n\") f.write(data[\"authors\"]) def parse_all(s, out_folder): with open(s)", "line line = f.readline() return s, line def read_doc(f): doc", "preguntar si una carpeta o fichero existe en python os.mkdir(out_folder)", "'<NAME>' import os import pprint def read_block(f): s = \"\"", "+ name, 'w') as f: f.write(data[\"title\"] + \"\\n\") f.write(data[\"content\"] +", "line = read_doc(f) create_doc(doc, out_folder, doc_name) # print(\"**********************************\") if __name__", "while line and not line.startswith(\".I\"): if line.startswith(\".T\"): doc[\"title\"], line =", "while line: doc_name = \"d%03d.txt\" % (int(line.strip().split()[-1])) doc, line =", "= read_block(f) elif line.startswith(\".A\"): doc[\"authors\"], line = read_block(f) elif line.startswith(\".W\"):", "def parse_all(s, out_folder): with open(s) as f: line = f.readline()", "doc_name = \"d%03d.txt\" % (int(line.strip().split()[-1])) doc, line = read_doc(f) create_doc(doc,", "os import pprint def read_block(f): s = \"\" line =", "= f.readline() while line and not line.startswith(\".\"): s += line", "name, 'w') as f: f.write(data[\"title\"] + \"\\n\") f.write(data[\"content\"] + \"\\n\")", "out_folder, name): with open(out_folder + os.sep + name, 'w') as", "\"\" line = f.readline() while line and not line.startswith(\".\"): s", "\"\", \"content\": \"\"} line = f.readline() while line and not", "\"adi\" + os.sep + \"ADI.ALL\" out_folder = \"test_index\" try: #", "como preguntar si una carpeta o fichero existe en python", "(int(line.strip().split()[-1])) doc, line = read_doc(f) create_doc(doc, out_folder, doc_name) # print(\"**********************************\")", "= read_doc(f) create_doc(doc, out_folder, doc_name) # print(\"**********************************\") if __name__ ==", "'__main__': s = \"adi\" + os.sep + \"ADI.ALL\" out_folder =", "o fichero existe en python os.mkdir(out_folder) except FileExistsError: pass parse_all(s,", "if line.startswith(\".T\"): doc[\"title\"], line = read_block(f) elif line.startswith(\".A\"): doc[\"authors\"], line", "doc[\"content\"], line = read_block(f) else: _, line = read_block(f) return", "doc = {\"title\": \"\", \"authors\": \"\", \"content\": \"\"} line =", "doc[\"title\"], line = read_block(f) elif line.startswith(\".A\"): doc[\"authors\"], line = read_block(f)", "line and not line.startswith(\".I\"): if line.startswith(\".T\"): doc[\"title\"], line = read_block(f)", "out_folder, doc_name) # print(\"**********************************\") if __name__ == '__main__': s =", "line = f.readline() while line and not line.startswith(\".I\"): if line.startswith(\".T\"):", "elif line.startswith(\".A\"): doc[\"authors\"], line = read_block(f) elif line.startswith(\".W\"): doc[\"content\"], line", "def create_doc(data, out_folder, name): with open(out_folder + os.sep + name,", "f: f.write(data[\"title\"] + \"\\n\") f.write(data[\"content\"] + \"\\n\") f.write(data[\"authors\"]) def parse_all(s,", "= \"adi\" + os.sep + \"ADI.ALL\" out_folder = \"test_index\" try:", "f.readline() while line and not line.startswith(\".\"): s += line line", "doc, line def create_doc(data, out_folder, name): with open(out_folder + os.sep", "\"d%03d.txt\" % (int(line.strip().split()[-1])) doc, line = read_doc(f) create_doc(doc, out_folder, doc_name)", "= \"test_index\" try: # averiguar como preguntar si una carpeta", "= {\"title\": \"\", \"authors\": \"\", \"content\": \"\"} line = f.readline()", "f.readline() # .I while line: doc_name = \"d%03d.txt\" % (int(line.strip().split()[-1]))", "+ \"\\n\") f.write(data[\"content\"] + \"\\n\") f.write(data[\"authors\"]) def parse_all(s, out_folder): with", "s, line def read_doc(f): doc = {\"title\": \"\", \"authors\": \"\",", "open(out_folder + os.sep + name, 'w') as f: f.write(data[\"title\"] +", "averiguar como preguntar si una carpeta o fichero existe en", "f.readline() while line and not line.startswith(\".I\"): if line.startswith(\".T\"): doc[\"title\"], line", "def read_block(f): s = \"\" line = f.readline() while line", "parse_all(s, out_folder): with open(s) as f: line = f.readline() #", "= '<NAME>' import os import pprint def read_block(f): s =", "\"content\": \"\"} line = f.readline() while line and not line.startswith(\".I\"):", "line.startswith(\".W\"): doc[\"content\"], line = read_block(f) else: _, line = read_block(f)", "as f: line = f.readline() # .I while line: doc_name", "doc, line = read_doc(f) create_doc(doc, out_folder, doc_name) # print(\"**********************************\") if", "else: _, line = read_block(f) return doc, line def create_doc(data,", "line: doc_name = \"d%03d.txt\" % (int(line.strip().split()[-1])) doc, line = read_doc(f)", "line = f.readline() return s, line def read_doc(f): doc =", "f: line = f.readline() # .I while line: doc_name =", "s += line line = f.readline() return s, line def", "+ \"\\n\") f.write(data[\"authors\"]) def parse_all(s, out_folder): with open(s) as f:", "= read_block(f) else: _, line = read_block(f) return doc, line", "f.write(data[\"title\"] + \"\\n\") f.write(data[\"content\"] + \"\\n\") f.write(data[\"authors\"]) def parse_all(s, out_folder):", "and not line.startswith(\".\"): s += line line = f.readline() return", "% (int(line.strip().split()[-1])) doc, line = read_doc(f) create_doc(doc, out_folder, doc_name) #", "= f.readline() return s, line def read_doc(f): doc = {\"title\":", "elif line.startswith(\".W\"): doc[\"content\"], line = read_block(f) else: _, line =", "doc[\"authors\"], line = read_block(f) elif line.startswith(\".W\"): doc[\"content\"], line = read_block(f)", "f.readline() return s, line def read_doc(f): doc = {\"title\": \"\",", "fichero existe en python os.mkdir(out_folder) except FileExistsError: pass parse_all(s, out_folder)", "read_block(f): s = \"\" line = f.readline() while line and", "create_doc(data, out_folder, name): with open(out_folder + os.sep + name, 'w')", "pprint def read_block(f): s = \"\" line = f.readline() while", "si una carpeta o fichero existe en python os.mkdir(out_folder) except", "{\"title\": \"\", \"authors\": \"\", \"content\": \"\"} line = f.readline() while", "= \"\" line = f.readline() while line and not line.startswith(\".\"):", "line = read_block(f) elif line.startswith(\".W\"): doc[\"content\"], line = read_block(f) else:", "line def create_doc(data, out_folder, name): with open(out_folder + os.sep +", "not line.startswith(\".I\"): if line.startswith(\".T\"): doc[\"title\"], line = read_block(f) elif line.startswith(\".A\"):", "print(\"**********************************\") if __name__ == '__main__': s = \"adi\" + os.sep", "out_folder = \"test_index\" try: # averiguar como preguntar si una", "doc_name) # print(\"**********************************\") if __name__ == '__main__': s = \"adi\"", "line = read_block(f) elif line.startswith(\".A\"): doc[\"authors\"], line = read_block(f) elif", ".I while line: doc_name = \"d%03d.txt\" % (int(line.strip().split()[-1])) doc, line", "= f.readline() while line and not line.startswith(\".I\"): if line.startswith(\".T\"): doc[\"title\"],", "read_block(f) elif line.startswith(\".W\"): doc[\"content\"], line = read_block(f) else: _, line", "== '__main__': s = \"adi\" + os.sep + \"ADI.ALL\" out_folder", "= \"d%03d.txt\" % (int(line.strip().split()[-1])) doc, line = read_doc(f) create_doc(doc, out_folder,", "while line and not line.startswith(\".\"): s += line line =", "name): with open(out_folder + os.sep + name, 'w') as f:", "\"authors\": \"\", \"content\": \"\"} line = f.readline() while line and", "try: # averiguar como preguntar si una carpeta o fichero", "s = \"adi\" + os.sep + \"ADI.ALL\" out_folder = \"test_index\"", "= read_block(f) elif line.startswith(\".W\"): doc[\"content\"], line = read_block(f) else: _,", "+ \"ADI.ALL\" out_folder = \"test_index\" try: # averiguar como preguntar", "\"test_index\" try: # averiguar como preguntar si una carpeta o", "def read_doc(f): doc = {\"title\": \"\", \"authors\": \"\", \"content\": \"\"}", "'w') as f: f.write(data[\"title\"] + \"\\n\") f.write(data[\"content\"] + \"\\n\") f.write(data[\"authors\"])", "with open(out_folder + os.sep + name, 'w') as f: f.write(data[\"title\"]", "read_block(f) elif line.startswith(\".A\"): doc[\"authors\"], line = read_block(f) elif line.startswith(\".W\"): doc[\"content\"],", "__name__ == '__main__': s = \"adi\" + os.sep + \"ADI.ALL\"", "not line.startswith(\".\"): s += line line = f.readline() return s,", "os.sep + \"ADI.ALL\" out_folder = \"test_index\" try: # averiguar como", "import os import pprint def read_block(f): s = \"\" line", "line = f.readline() while line and not line.startswith(\".\"): s +=", "with open(s) as f: line = f.readline() # .I while", "return doc, line def create_doc(data, out_folder, name): with open(out_folder +", "= read_block(f) return doc, line def create_doc(data, out_folder, name): with", "\"\", \"authors\": \"\", \"content\": \"\"} line = f.readline() while line", "open(s) as f: line = f.readline() # .I while line:", "+ os.sep + name, 'w') as f: f.write(data[\"title\"] + \"\\n\")", "os.sep + name, 'w') as f: f.write(data[\"title\"] + \"\\n\") f.write(data[\"content\"]", "line = read_block(f) else: _, line = read_block(f) return doc,", "read_doc(f) create_doc(doc, out_folder, doc_name) # print(\"**********************************\") if __name__ == '__main__':", "import pprint def read_block(f): s = \"\" line = f.readline()", "as f: f.write(data[\"title\"] + \"\\n\") f.write(data[\"content\"] + \"\\n\") f.write(data[\"authors\"]) def", "and not line.startswith(\".I\"): if line.startswith(\".T\"): doc[\"title\"], line = read_block(f) elif", "line.startswith(\".\"): s += line line = f.readline() return s, line", "return s, line def read_doc(f): doc = {\"title\": \"\", \"authors\":", "line def read_doc(f): doc = {\"title\": \"\", \"authors\": \"\", \"content\":", "out_folder): with open(s) as f: line = f.readline() # .I", "line and not line.startswith(\".\"): s += line line = f.readline()", "read_doc(f): doc = {\"title\": \"\", \"authors\": \"\", \"content\": \"\"} line", "= f.readline() # .I while line: doc_name = \"d%03d.txt\" %", "carpeta o fichero existe en python os.mkdir(out_folder) except FileExistsError: pass", "line = f.readline() # .I while line: doc_name = \"d%03d.txt\"", "f.write(data[\"content\"] + \"\\n\") f.write(data[\"authors\"]) def parse_all(s, out_folder): with open(s) as", "<filename>parsing_documents.py<gh_stars>0 __author__ = '<NAME>' import os import pprint def read_block(f):", "line.startswith(\".T\"): doc[\"title\"], line = read_block(f) elif line.startswith(\".A\"): doc[\"authors\"], line =", "if __name__ == '__main__': s = \"adi\" + os.sep +", "+= line line = f.readline() return s, line def read_doc(f):", "\"\"} line = f.readline() while line and not line.startswith(\".I\"): if", "line = read_block(f) return doc, line def create_doc(data, out_folder, name):", "# print(\"**********************************\") if __name__ == '__main__': s = \"adi\" +", "# averiguar como preguntar si una carpeta o fichero existe", "__author__ = '<NAME>' import os import pprint def read_block(f): s", "una carpeta o fichero existe en python os.mkdir(out_folder) except FileExistsError:", "s = \"\" line = f.readline() while line and not", "# .I while line: doc_name = \"d%03d.txt\" % (int(line.strip().split()[-1])) doc,", "read_block(f) else: _, line = read_block(f) return doc, line def", "read_block(f) return doc, line def create_doc(data, out_folder, name): with open(out_folder", "line.startswith(\".I\"): if line.startswith(\".T\"): doc[\"title\"], line = read_block(f) elif line.startswith(\".A\"): doc[\"authors\"],", "\"\\n\") f.write(data[\"authors\"]) def parse_all(s, out_folder): with open(s) as f: line", "line.startswith(\".A\"): doc[\"authors\"], line = read_block(f) elif line.startswith(\".W\"): doc[\"content\"], line =", "f.write(data[\"authors\"]) def parse_all(s, out_folder): with open(s) as f: line =", "\"ADI.ALL\" out_folder = \"test_index\" try: # averiguar como preguntar si", "create_doc(doc, out_folder, doc_name) # print(\"**********************************\") if __name__ == '__main__': s" ]
[ "sw=4 encoding=utf-8 from django.contrib import admin from groups.models import Group", "sts=4 et sw=4 encoding=utf-8 from django.contrib import admin from groups.models", "ai ts=4 sts=4 et sw=4 encoding=utf-8 from django.contrib import admin", "et sw=4 encoding=utf-8 from django.contrib import admin from groups.models import", "encoding=utf-8 from django.contrib import admin from groups.models import Group admin.site.register(Group)", "python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from", "ts=4 sts=4 et sw=4 encoding=utf-8 from django.contrib import admin from", "<reponame>caktus/rapidsms-groups<gh_stars>1-10 #!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4", "# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from django.contrib", "vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from django.contrib import", "#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "4, 5, 6, np.pi / 4], [1, 2, 3, 4,", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "avod.datasets.kitti import kitti_aug class KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self): boxes_3d = np.array([", "[1, 2, 3, 4, 5, 6, -np.pi / 4] ])", "3, 4, 5, 6, -np.pi / 4] ]) exp_flipped_boxes_3d =", "distributed under the License is distributed on an \"AS IS\"", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved. #", "Technologies Co., Ltd # # Licensed under the Apache License,", "the specific language governing permissions and # limitations under the", "# limitations under the License. # ============================================================================ # Copyright 2021", "-3 * np.pi / 4] ]) flipped_boxes_3d = kitti_aug.flip_boxes_3d(boxes_3d) np.testing.assert_almost_equal(flipped_boxes_3d,", "4], [1, 2, 3, 4, 5, 6, -np.pi / 4]", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "6, -3 * np.pi / 4] ]) flipped_boxes_3d = kitti_aug.flip_boxes_3d(boxes_3d)", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "permissions and # limitations under the License. from npu_bridge.npu_init import", "np from avod.datasets.kitti import kitti_aug class KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self): boxes_3d", "# ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd #", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "the License. from npu_bridge.npu_init import * import unittest import numpy", "not use this file except in compliance with the License.", "class KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self): boxes_3d = np.array([ [1, 2, 3,", "writing, software # distributed under the License is distributed on", "the License. # ============================================================================ # Copyright 2021 Huawei Technologies Co.,", "in writing, software # distributed under the License is distributed", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. from", "KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self): boxes_3d = np.array([ [1, 2, 3, 4,", "# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "/ 4], [-1, 2, 3, 4, 5, 6, -3 *", "kitti_aug class KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self): boxes_3d = np.array([ [1, 2,", "* np.pi / 4] ]) flipped_boxes_3d = kitti_aug.flip_boxes_3d(boxes_3d) np.testing.assert_almost_equal(flipped_boxes_3d, exp_flipped_boxes_3d)", "/ 4] ]) exp_flipped_boxes_3d = np.array([ [-1, 2, 3, 4,", "CONDITIONS OF ANY KIND, either express or implied. # See", "* import unittest import numpy as np from avod.datasets.kitti import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Ltd # # Licensed under the Apache License, Version 2.0", "5, 6, np.pi / 4], [1, 2, 3, 4, 5,", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "as np from avod.datasets.kitti import kitti_aug class KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self):", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "2, 3, 4, 5, 6, -3 * np.pi / 4]", "under the License is distributed on an \"AS IS\" BASIS,", "3, 4, 5, 6, np.pi / 4], [1, 2, 3,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "npu_bridge.npu_init import * import unittest import numpy as np from", "6, np.pi / 4], [1, 2, 3, 4, 5, 6,", "Authors. All Rights Reserved. # # Licensed under the Apache", "6, -np.pi / 4] ]) exp_flipped_boxes_3d = np.array([ [-1, 2,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "Reserved. # # Licensed under the Apache License, Version 2.0", "2021 Huawei Technologies Co., Ltd # # Licensed under the", "and # limitations under the License. from npu_bridge.npu_init import *", "[-1, 2, 3, 4, 5, 6, 3 * np.pi /", "the License for the specific language governing permissions and #", "governing permissions and # limitations under the License. from npu_bridge.npu_init", "from avod.datasets.kitti import kitti_aug class KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self): boxes_3d =", "(the \"License\"); # you may not use this file except", "under the License. from npu_bridge.npu_init import * import unittest import", "from npu_bridge.npu_init import * import unittest import numpy as np", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "Apache License, Version 2.0 (the \"License\"); # you may not", "under the License. # ============================================================================ # Copyright 2021 Huawei Technologies", "# you may not use this file except in compliance", "2, 3, 4, 5, 6, -np.pi / 4] ]) exp_flipped_boxes_3d", "-np.pi / 4] ]) exp_flipped_boxes_3d = np.array([ [-1, 2, 3,", "either express or implied. # See the License for the", "numpy as np from avod.datasets.kitti import kitti_aug class KittiAugTest(unittest.TestCase): def", "5, 6, -3 * np.pi / 4] ]) flipped_boxes_3d =", "OR CONDITIONS OF ANY KIND, either express or implied. #", "= np.array([ [1, 2, 3, 4, 5, 6, np.pi /", "Copyright 2017 The TensorFlow Authors. All Rights Reserved. # #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, #", "in compliance with the License. # You may obtain a", "limitations under the License. # ============================================================================ # Copyright 2021 Huawei", "License. from npu_bridge.npu_init import * import unittest import numpy as", "software # distributed under the License is distributed on an", "[-1, 2, 3, 4, 5, 6, -3 * np.pi /", "import unittest import numpy as np from avod.datasets.kitti import kitti_aug", "2, 3, 4, 5, 6, 3 * np.pi / 4],", "Huawei Technologies Co., Ltd # # Licensed under the Apache", "# # Unless required by applicable law or agreed to", "5, 6, 3 * np.pi / 4], [-1, 2, 3,", "5, 6, -np.pi / 4] ]) exp_flipped_boxes_3d = np.array([ [-1,", "np.pi / 4], [1, 2, 3, 4, 5, 6, -np.pi", "# limitations under the License. from npu_bridge.npu_init import * import", "boxes_3d = np.array([ [1, 2, 3, 4, 5, 6, np.pi", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "import numpy as np from avod.datasets.kitti import kitti_aug class KittiAugTest(unittest.TestCase):", "np.array([ [-1, 2, 3, 4, 5, 6, 3 * np.pi", "limitations under the License. from npu_bridge.npu_init import * import unittest", "2, 3, 4, 5, 6, np.pi / 4], [1, 2,", "law or agreed to in writing, software # distributed under", "============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd # #", "Co., Ltd # # Licensed under the Apache License, Version", "4] ]) exp_flipped_boxes_3d = np.array([ [-1, 2, 3, 4, 5,", "= np.array([ [-1, 2, 3, 4, 5, 6, 3 *", "exp_flipped_boxes_3d = np.array([ [-1, 2, 3, 4, 5, 6, 3", "4, 5, 6, -3 * np.pi / 4] ]) flipped_boxes_3d", "implied. # See the License for the specific language governing", "6, 3 * np.pi / 4], [-1, 2, 3, 4,", "under the Apache License, Version 2.0 (the \"License\"); # you", "and # limitations under the License. # ============================================================================ # Copyright", "\"License\"); # you may not use this file except in", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "permissions and # limitations under the License. # ============================================================================ #", "import * import unittest import numpy as np from avod.datasets.kitti", "import kitti_aug class KittiAugTest(unittest.TestCase): def test_flip_boxes_3d(self): boxes_3d = np.array([ [1,", "4], [-1, 2, 3, 4, 5, 6, -3 * np.pi", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "def test_flip_boxes_3d(self): boxes_3d = np.array([ [1, 2, 3, 4, 5,", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "unittest import numpy as np from avod.datasets.kitti import kitti_aug class", "np.pi / 4], [-1, 2, 3, 4, 5, 6, -3", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "4, 5, 6, 3 * np.pi / 4], [-1, 2,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "test_flip_boxes_3d(self): boxes_3d = np.array([ [1, 2, 3, 4, 5, 6,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "3, 4, 5, 6, -3 * np.pi / 4] ])", "]) exp_flipped_boxes_3d = np.array([ [-1, 2, 3, 4, 5, 6,", "np.array([ [1, 2, 3, 4, 5, 6, np.pi / 4],", "You may obtain a copy of the License at #", "3, 4, 5, 6, 3 * np.pi / 4], [-1,", "language governing permissions and # limitations under the License. #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "* np.pi / 4], [-1, 2, 3, 4, 5, 6,", "[1, 2, 3, 4, 5, 6, np.pi / 4], [1,", "2017 The TensorFlow Authors. All Rights Reserved. # # Licensed", "License. # ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd", "/ 4], [1, 2, 3, 4, 5, 6, -np.pi /", "governing permissions and # limitations under the License. # ============================================================================", "4, 5, 6, -np.pi / 4] ]) exp_flipped_boxes_3d = np.array([", "3 * np.pi / 4], [-1, 2, 3, 4, 5," ]
[ "import SQLAlchemy, Model # class BaseModel(Model): # def save(self): #", "# def save(self): # db.session.add(self) # db.session.commit(self) # def delete(self):", "# db.session.commit(self) # def delete(self): # db.session. db = SQLAlchemy()", "BaseModel(Model): # def save(self): # db.session.add(self) # db.session.commit(self) # def", "flask_sqlalchemy import SQLAlchemy, Model # class BaseModel(Model): # def save(self):", "# class BaseModel(Model): # def save(self): # db.session.add(self) # db.session.commit(self)", "class BaseModel(Model): # def save(self): # db.session.add(self) # db.session.commit(self) #", "<gh_stars>0 from flask_sqlalchemy import SQLAlchemy, Model # class BaseModel(Model): #", "Model # class BaseModel(Model): # def save(self): # db.session.add(self) #", "def save(self): # db.session.add(self) # db.session.commit(self) # def delete(self): #", "# db.session.add(self) # db.session.commit(self) # def delete(self): # db.session. db", "SQLAlchemy, Model # class BaseModel(Model): # def save(self): # db.session.add(self)", "from flask_sqlalchemy import SQLAlchemy, Model # class BaseModel(Model): # def", "db.session.add(self) # db.session.commit(self) # def delete(self): # db.session. db =", "save(self): # db.session.add(self) # db.session.commit(self) # def delete(self): # db.session." ]
[ "None, ['interface_name', 'speed']), } r, ps_info = bash_ro(\"ipmitool sdr type", "info = info.strip() ps_id = info.split(\"|\")[0].strip().split(\" \")[0] health = 10", "size', None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume group and thin pool", "conf = tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES': interfaces, 'VERSION': cmd.version, })", "pv) == 0: multipath_wwid = bash_o(\"udevadm info -n %s |", "os.chmod(service_path, 0644) reload_and_restart_service(service_name) return if linux.read_file(service_path) != service_conf: linux.write_file(service_path, service_conf,", "NOTE(weiw): some storage can not afford frequent TUR. ref: ZSTAC-23416", "0: multipath_wwid = bash_o(\"udevadm info -n %s | grep -E", "'{print $NF}'\" % pv).strip() disks = linux.get_physical_disk(pv, False) for disk", "</Plugin> <Plugin \"interface\"> {% for i in INTERFACES -%} Interface", "prometheus_client import start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY from kvmagent", "collect_node_disk_wwid_last_result metrics = { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk wwid', None,", "keys[0]) < 9: return cls.__collector_cache.get(keys[0]) return None @classmethod def __store_cache__(cls,", "metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values() def collect_lvm_capacity_statistics(): metrics = { 'vg_size':", "LOG_FILE) para = jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() eths = bash_o(\"ls", "value %s: %s\" % (v, e)) return True return True", "except Exception as e: logger.warn(\"got exception in check value %s:", "logger = log.get_logger(__name__) collector_dict = {} # type: Dict[str, threading.Thread]", "False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values() def convert_raid_state_to_int(state): \"\"\"", "group size', None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume group and thin", "pv_name\").strip().splitlines() for pv in pvs: multipath_wwid = None if bash_r(\"dmsetup", "is None else disk_group state = info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state))", "\"interface\"> {% for i in INTERFACES -%} Interface \"{{i}}\" {%", "metrics.values() def collect_lvm_capacity_statistics(): metrics = { 'vg_size': GaugeMetricFamily('vg_size', 'volume group", "} r, ps_info = bash_ro(\"ipmitool sdr type 'power supply'\") #", "-LDInfo -LALL -aAll\") != 0: return metrics.values() raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64", "elif \"unconfigured\" in state: return 15 else: return 100 def", "-%} Interface \"{{i}}\" {% endfor -%} IgnoreSelected false </Plugin> <Plugin", "pid_vm.split() if len(arr) == 2: pid_vm_map[arr[0]] = arr[1] def collect(vm_pid_arr):", "start(self): http_server = kvmagent.get_http_server() http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter) self.install_colletor() start_http_server(7069) def stop(self):", "LoadPlugin interface LoadPlugin memory LoadPlugin network LoadPlugin virt <Plugin aggregation>", "\", \"\").split(\",\") disk_group = filter(lambda x: \"DiskGroup\" in x, kvs)[0]", "metrics.values() def collect_equipment_state(): metrics = { 'power_supply': GaugeMetricFamily('power_supply', 'power supply',", "ReportByCpu true ReportByState true ValuesPercentage true </Plugin> <Plugin disk> Disk", "% service_name service_conf = ''' [Unit] Description=prometheus %s After=network.target [Service]", "collector_dict[k].is_alive(): logger.warn(\"It seems that the collector [%s] has not been", "inbound traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound traffic in", "in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound traffic in packages'),", "latest_collect_result r = f() if not Collector.check(r): logger.warn(\"result from collector", "name PluginInstanceFormat name BlockDevice \"/:hd[a-z]/\" IgnoreSelected true ExtraStats \"vcpu memory\"", "r, pid_cpu_usages_str = bash_ro(\"top -b -n 1 -p %s |", "-F '/' '{print $NF}' | grep -v '^lvm-pv' | sort\"", "''' tmpt = Template(conf) conf = tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES':", "and collector_dict.get(name).is_alive(): continue collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,)) for i", "cpid: bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill -TERM", "else: if not mpid: bash_errorout('collectdmon -- -C %s' % conf_path)", "path: return \"node_exporter\" elif \"pushgateway\" in path: return \"pushgateway\" def", "if not os.path.exists(dir): continue cmd = \"du -bs %s |", "u -C \\\"%s -name\\\" | awk '{print $2,$13}'\" % QEMU_CMD)", "RefreshInterval {{INTERVAL}} HostnameFormat name PluginInstanceFormat name BlockDevice \"/:hd[a-z]/\" IgnoreSelected true", "qemu | awk '{print $1,$9}'\" % vm_pid_arr_str) if r !=", "= os.listdir(\"/sys/devices/virtual/net/\") interfaces = [] for eth in all_eths: eth", "name = name.strip() size, avail = lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size))", "GaugeMetricFamily('physical_network_interface', 'physical network interface', None, ['interface_name', 'speed']), } r, ps_info", "float(avail)) return metrics.values() def convert_raid_state_to_int(state): \"\"\" :type state: str \"\"\"", "import thread from zstacklib.utils.bash import * from zstacklib.utils.ip import get_nic_supported_max_speed", "-aAll\") != 0: return metrics.values() raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL", "with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True else:", "'%s/../../../' % default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics =", "grep -E 'Target Id|State'\").strip().splitlines() target_id = state = \"unknown\" for", "fd.write(conf) need_restart_collectd = True cpid = linux.find_process_by_command('collectd', [conf_path]) mpid =", "info.lower() or \"lost\" in info.lower() else 0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([],", "[] def get_result_run(f, fname): # type: (typing.Callable, str) -> None", "!= 0: return metrics.values() raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll", "collect_equipment_state(): metrics = { 'power_supply': GaugeMetricFamily('power_supply', 'power supply', None, ['ps_id']),", "0: return None if (time.time() - keys[0]) < 9: return", "= 0 all_out_errors = 0 for intf in interfaces: all_in_bytes", "bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll\") != 0: return metrics.values() raid_info =", "def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result # NOTE(weiw): some storage", "= info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return metrics.values() def collect_equipment_state(): metrics", "true ExtraStats \"vcpu memory\" </Plugin> <Plugin network> Server \"localhost\" \"25826\"", "return 10 elif \"unconfigured\" in state: return 15 else: return", "0 for intf in interfaces: all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets +=", "log, binPath) if not os.path.exists(service_path): linux.write_file(service_path, service_conf, True) os.chmod(service_path, 0644)", "['slot_number', 'disk_group']), } if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll\") != 0:", "path: return \"pushgateway\" def reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload && systemctl restart", "isinstance(v, list) or isinstance(v, tuple): for vl in v: if", "def check(cls, v): try: if v is None: return False", "wwids = bash_o(\"udevadm info -n %s | grep -E '^S:", "@classmethod def __store_cache__(cls, ret): # type: (list) -> None cls.__collector_cache.clear()", "100 def convert_disk_state_to_int(state): \"\"\" :type state: str \"\"\" state =", "ReportByState true ValuesPercentage true </Plugin> <Plugin disk> Disk \"/^sd[a-z]$/\" Disk", "= state = disk_group = \"unknown\" for info in disk_info:", "def convert_raid_state_to_int(state): \"\"\" :type state: str \"\"\" state = state.lower()", "float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return", "+ '.log') ARGUMENTS = cmd.startupArguments if not ARGUMENTS: ARGUMENTS =", "if eth == 'lo': continue if eth == 'bonding_masters': continue", "in range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i + n]) return metrics.values() collect_node_disk_wwid_last_time", "collector_dict.get(name) is not None and collector_dict.get(name).is_alive(): continue collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run,", "None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume group and thin pool free", "Exception as e: status = True speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic,", "ret): # type: (list) -> None cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret}) @classmethod", "all outbound traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([],", "% conf_path) else: bash_errorout('kill -TERM %s' % cpid) if need_restart_collectd:", "[] for eth in all_eths: eth = eth.strip(' \\t\\n\\r') if", "| awk '{print $1,$9}'\" % vm_pid_arr_str) if r != 0", "as fd: fd.write(conf) need_restart_collectd = True else: with open(conf_path, 'w')", "info in raid_info: if \"Target Id\" in info: target_id =", "elif not eth: continue else: interfaces.append(eth) all_in_bytes = 0 all_in_packets", "\"unknown\" for info in raid_info: if \"Target Id\" in info:", "--noheading -oname\") if r != 0 or len(o.splitlines()) == 0:", "isinstance(v, tuple): for vl in v: if Collector.check(vl) is False:", "type: Dict[str, threading.Thread] latest_collect_result = {} collectResultLock = threading.RLock() QEMU_CMD", "if isinstance(v, GaugeMetricFamily): return Collector.check(v.samples) if isinstance(v, list) or isinstance(v,", "collect_lvm_capacity_statistics(): metrics = { 'vg_size': GaugeMetricFamily('vg_size', 'volume group size', None,", "-PDList -aAll | grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines() slot_number", "info in disk_info: if \"Slot Number\" in info: slot_number =", "LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log') ARGUMENTS = cmd.startupArguments if", "return \"node_exporter\" elif \"pushgateway\" in path: return \"pushgateway\" def reload_and_restart_service(service_name):", "!= conf: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd =", "need_restart_collectd = True cpid = linux.find_process_by_command('collectd', [conf_path]) mpid = linux.find_process_by_command('collectdmon',", "% vm_pid_arr_str) if r != 0 or len(pid_cpu_usages_str.splitlines()) == 0:", "== 0: return metrics.values() vg_names = o.splitlines() for name in", "GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature', None, ['slot_number', 'disk_group']), } if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64", "aggregation> <Aggregation> #Host \"unspecified\" Plugin \"cpu\" #PluginInstance \"unspecified\" Type \"cpu\"", "return None if (time.time() - keys[0]) < 9: return cls.__collector_cache.get(keys[0])", "int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values() def collect_lvm_capacity_statistics(): metrics = {", "collector %s contains illegal character None, details: \\n%s\" % (fname,", "in kvmagent.metric_collectors: name = \"%s.%s\" % (c.__module__, c.__name__) if collector_dict.get(name)", "eth in eths: eth = eth.strip(' \\t\\n\\r') if eth ==", "[] for eth in eths: eth = eth.strip(' \\t\\n\\r') if", "\"lost\" in info.lower() else 0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc", "not Collector.check(r): logger.warn(\"result from collector %s contains illegal character None,", "\"online\" in state or \"jobd\" in state: return 0 elif", "free size', None, ['vg_name']), } r = bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$'", "def reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload && systemctl restart %s.service\" % service_name)", "calculation result.\" % k) for v in latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret)", "pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split() pid = arr[0] vm_uuid = pid_vm_map[pid]", "'Host all inbound traffic in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all", "or len(pid_cpu_usages_str.splitlines()) == 0: return for pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr", "binPath) if not os.path.exists(service_path): linux.write_file(service_path, service_conf, True) os.chmod(service_path, 0644) reload_and_restart_service(service_name)", "of conf changes, for ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def start_collectd_exporter(cmd):", "+ n]) return metrics.values() collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result = None", "= get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service' % service_name service_conf = '''", "= linux.read_file(\"/sys/class/net/%s/carrier\" % nic) == 1 except Exception as e:", "100 def collect_raid_state(): metrics = { 'raid_state': GaugeMetricFamily('raid_state', 'raid state',", "{} collectResultLock = threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname): res", "5 elif \"failed\" in state: return 10 elif \"unconfigured\" in", "elif eth.startswith('vnic'): continue elif eth.startswith('outer'): continue elif eth.startswith('br_'): continue elif", "float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values() def", "'/var/lib/libvirt', '/tmp/zstack'] metrics = { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity", "+= read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets +=", "After=network.target [Service] ExecStart=/bin/sh -c '%s %s > %s 2>&1' ExecStop=/bin/sh", "metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return metrics.values() def collect_equipment_state(): metrics = {", "%s 2>&1' ExecStop=/bin/sh -c 'pkill -TERM -f %s' Restart=always RestartSec=30s", "0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for w in wwids])], 1) collect_node_disk_wwid_last_result =", "metrics = { 'vg_size': GaugeMetricFamily('vg_size', 'volume group size', None, ['vg_name']),", "(c, name,)) for i in range(7): for t in collector_dict.values():", "for i in INTERFACES -%} Interface \"{{i}}\" {% endfor -%}", "awk '{print $1,$9}'\" % vm_pid_arr_str) if r != 0 or", "tmpt = Template(conf) conf = tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES': interfaces,", "@lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd) @in_bash def start_exporter(cmd): EXPORTER_PATH", "None: wwids.append(multipath_wwid) if len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for w", "return metrics.values() def collect_equipment_state(): metrics = { 'power_supply': GaugeMetricFamily('power_supply', 'power", "state or \"jobd\" in state: return 0 elif \"rebuild\" in", "collect_vm_statistics(): metrics = { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU used", ":type state: str \"\"\" state = state.lower() if \"online\" in", "ref: ZSTAC-23416 if collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time = time.time() elif", "= { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound traffic in bytes'),", "else: state = info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info = bash_o(", "in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound traffic in packages'),", "zstack_env_path = os.environ.get('ZSTACK_HOME', None) if zstack_env_path and zstack_env_path != default_zstack_path:", "return 5 else: return 100 def convert_disk_state_to_int(state): \"\"\" :type state:", "range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i + n]) return metrics.values() collect_node_disk_wwid_last_time =", "all_out_bytes = 0 all_out_packets = 0 all_out_errors = 0 for", "'volume group and thin pool free size', None, ['vg_name']), }", "continue for k in collector_dict.iterkeys(): if collector_dict[k].is_alive(): logger.warn(\"It seems that", "bash_errorout('kill -TERM %s' % cpid) if need_restart_collectd: if not mpid:", "\"vcpu memory\" </Plugin> <Plugin network> Server \"localhost\" \"25826\" </Plugin> '''", "CalculateMaximum false CalculateStddev false </Aggregation> </Plugin> <Plugin cpu> ReportByCpu true", "\"rebuild\" in state: return 5 elif \"failed\" in state: return", "import os.path import threading import typing from prometheus_client import start_http_server", "[Service] ExecStart=/bin/sh -c '%s %s > %s 2>&1' ExecStop=/bin/sh -c", "= disk_group.split(\" \")[-1] elif \"Drive Temperature\" in info: temp =", "= info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else: disk_group = \"JBOD\" if", "'Host all outbound traffic in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all", "return True return True def collect(self): global latest_collect_result ret =", "ARGUMENTS: ARGUMENTS = \"\" os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE) para", "kvmagent import kvmagent from zstacklib.utils import http from zstacklib.utils import", "'lo': continue if eth == 'bonding_masters': continue elif eth.startswith('vnic'): continue", "is None: collect_node_disk_wwid_last_time = time.time() elif time.time() - collect_node_disk_wwid_last_time <", "check(cls, v): try: if v is None: return False if", "in vg_names: name = name.strip() size, avail = lvm.get_vg_size(name, False)", "0 or len(o.splitlines()) == 0: return metrics.values() vg_names = o.splitlines()", "GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound traffic in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host", "} r, pid_vm_map_str = bash_ro(\"ps --no-headers u -C \\\"%s -name\\\"", "else log, binPath) if not os.path.exists(service_path): linux.write_file(service_path, service_conf, True) os.chmod(service_path,", "PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = \"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash def start_prometheus_exporter(self, req): @in_bash", "file but can not read status = linux.read_file(\"/sys/class/net/%s/carrier\" % nic)", "pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split() pid = arr[0] vm_uuid", "mpid: bash_errorout('collectdmon -- -C %s' % conf_path) def run_in_systemd(binPath, args,", "','.join(vm_pid_arr) r, pid_cpu_usages_str = bash_ro(\"top -b -n 1 -p %s", "= ','.join(vm_pid_arr) r, pid_cpu_usages_str = bash_ro(\"top -b -n 1 -p", "%s' % mpid) else: if not mpid: bash_errorout('collectdmon -- -C", "info\")) nics = bash_o(\"find /sys/class/net -type l -not -lname '*virtual*'", "#TypeInstance \"unspecified\" GroupBy \"Host\" GroupBy \"TypeInstance\" CalculateNum false CalculateSum false", "as e: status = True speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed],", "if not os.path.exists(service_path): linux.write_file(service_path, service_conf, True) os.chmod(service_path, 0644) reload_and_restart_service(service_name) return", "0 all_in_errors = 0 all_out_bytes = 0 all_out_packets = 0", "+= int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values() def collect_lvm_capacity_statistics(): metrics =", "'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound traffic in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages',", "in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound traffic errors'), 'host_network_all_out_bytes':", "= linux.read_file(fname) return 0 if not res else int(res) def", "collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result # NOTE(weiw): some storage can not afford", "pvs: multipath_wwid = None if bash_r(\"dmsetup table %s | grep", "= jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() eths = bash_o(\"ls /sys/class/net\").split() interfaces", "linux.set_fail_if_no_path() r, o, e = bash_roe(\"vgs --nolocking --noheading -oname\") if", "= \"\" os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE) para = jsonobject.loads(req[http.REQUEST_BODY])", "import http from zstacklib.utils import jsonobject from zstacklib.utils import lock", "keys = cls.__collector_cache.keys() if keys is None or len(keys) ==", "# NOTE(weiw): some storage can not afford frequent TUR. ref:", "packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes',", "= state.lower() if \"online\" in state or \"jobd\" in state:", "info.strip() ps_id = info.split(\"|\")[0].strip().split(\" \")[0] health = 10 if \"fail\"", "None collect_node_disk_wwid_last_result = None def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result", "sriov nic contains carrier file but can not read status", "dir in zstack_dir: if not os.path.exists(dir): continue cmd = \"du", "ps_info = bash_ro(\"ipmitool sdr type 'power supply'\") # type: (int,", "sdr type 'power supply'\") # type: (int, str) if r", "return cls.__collector_cache.get(keys[0]) return None @classmethod def __store_cache__(cls, ret): # type:", "bash_r(\"dmsetup table %s | grep multipath\" % pv) == 0:", "ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd) @in_bash def", "bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill -TERM %s'", "disk in disks: disk_name = disk.split(\"/\")[-1].strip() wwids = bash_o(\"udevadm info", "elif not eth: continue else: interfaces.append(eth) for cmd in para.cmds:", "disk_group.split(\" \")[-1] elif \"Drive Temperature\" in info: temp = info.split(\":\")[1].split(\"C\")[0]", "state: return 10 elif \"unconfigured\" in state: return 15 else:", "\"unspecified\" GroupBy \"Host\" GroupBy \"TypeInstance\" CalculateNum false CalculateSum false CalculateAverage", "= info.replace(\"Drive's position: \", \"\").split(\",\") disk_group = filter(lambda x: \"DiskGroup\"", "LoadPlugin aggregation LoadPlugin cpu LoadPlugin disk LoadPlugin interface LoadPlugin memory", "used capacity in bytes') } zstack_used_capacity = 0 for dir", "or len(keys) == 0: return None if (time.time() - keys[0])", "[Unit] Description=prometheus %s After=network.target [Service] ExecStart=/bin/sh -c '%s %s >", "metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values() def convert_raid_state_to_int(state): \"\"\" :type", "not eth: continue else: interfaces.append(eth) all_in_bytes = 0 all_in_packets =", "arr = pid_cpu_usage.split() pid = arr[0] vm_uuid = pid_vm_map[pid] cpu_usage", "in info: kvs = info.replace(\"Drive's position: \", \"\").split(\",\") disk_group =", "GroupBy \"Host\" GroupBy \"TypeInstance\" CalculateNum false CalculateSum false CalculateAverage true", "= cmd.binaryPath LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log') ARGUMENTS =", "interfaces = [] for eth in eths: eth = eth.strip('", "if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = \"/prometheus/collectdexporter/start\"", "\"unknown\" and info.count(\"JBOD\") > 0 else disk_group disk_group = \"unknown\"", "type: () -> list keys = cls.__collector_cache.keys() if keys is", "for disk in disks: disk_name = disk.split(\"/\")[-1].strip() wwids = bash_o(\"udevadm", "old_conf = fd.read() if old_conf != conf: with open(conf_path, 'w')", "2>&1' ExecStop=/bin/sh -c 'pkill -TERM -f %s' Restart=always RestartSec=30s [Install]", "Connection \"qemu:///system\" RefreshInterval {{INTERVAL}} HostnameFormat name PluginInstanceFormat name BlockDevice \"/:hd[a-z]/\"", "capacity in bytes') } zstack_used_capacity = 0 for dir in", "service_name) service_name = get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service' % service_name service_conf", "} metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([],", "'{print $2,$13}'\" % QEMU_CMD) if r != 0 or len(pid_vm_map_str.splitlines())", "> %s 2>&1' ExecStop=/bin/sh -c 'pkill -TERM -f %s' Restart=always", "} pvs = bash_o(\"pvs --nolocking --noheading -o pv_name\").strip().splitlines() for pv", "\"du -bs %s | awk {\\'print $1\\'}\" % dir res", "% dir res = bash_o(cmd) zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity))", "disk temperature', None, ['slot_number', 'disk_group']), } if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL", "if not Collector.check(r): logger.warn(\"result from collector %s contains illegal character", "get_systemd_name(path): if \"collectd_exporter\" in path: return \"collectd_exporter\" elif \"node_exporter\" in", "collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME', None) if zstack_env_path", "in path: return \"collectd_exporter\" elif \"node_exporter\" in path: return \"node_exporter\"", "state.lower() if \"online\" in state or \"jobd\" in state: return", "errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes))", "w in wwids])], 1) collect_node_disk_wwid_last_result = metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics)", "len(nics) != 0: for nic in nics: nic = nic.strip()", "virt> Connection \"qemu:///system\" RefreshInterval {{INTERVAL}} HostnameFormat name PluginInstanceFormat name BlockDevice", "= bash_ro(\"ps --no-headers u -C \\\"%s -name\\\" | awk '{print", "float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([],", "old_conf != conf: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd", "%s | awk {\\'print $1\\'}\" % dir res = bash_o(cmd)", "for vl in v: if Collector.check(vl) is False: return False", "'physical disk state', None, ['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk", "@classmethod def check(cls, v): try: if v is None: return", "outbound traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors))", "None, ['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature', None, ['slot_number',", "eths: eth = eth.strip(' \\t\\n\\r') if eth == 'lo': continue", "metrics['physical_network_interface'].add_metric([nic, speed], status) return metrics.values() def collect_vm_statistics(): metrics = {", "= \"du -bs %s | awk {\\'print $1\\'}\" % dir", "None or len(keys) == 0: return None if (time.time() -", "def start_prometheus_exporter(self, req): @in_bash def start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf')", "| awk '{print $2,$13}'\" % QEMU_CMD) if r != 0", "%s\" % (v, e)) return True return True def collect(self):", "in disks: disk_name = disk.split(\"/\")[-1].strip() wwids = bash_o(\"udevadm info -n", "Id|State'\").strip().splitlines() target_id = state = \"unknown\" for info in raid_info:", "<Plugin network> Server \"localhost\" \"25826\" </Plugin> ''' tmpt = Template(conf)", "seems that the collector [%s] has not been completed yet,\"", "\"\") '''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map =", "'Host all outbound traffic in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all", "path: return \"collectd_exporter\" elif \"node_exporter\" in path: return \"node_exporter\" elif", "service_conf, True) logger.info(\"%s.service conf changed\" % service_name) os.chmod(service_path, 0644) #", "= state.lower() if state == \"optimal\": return 0 elif state", "state = state.lower() if \"online\" in state or \"jobd\" in", "= pid_cpu_usage.split() pid = arr[0] vm_uuid = pid_vm_map[pid] cpu_usage =", "bash_errorout('collectdmon -- -C %s' % conf_path) def run_in_systemd(binPath, args, log):", "zstacklib.utils import thread from zstacklib.utils.bash import * from zstacklib.utils.ip import", "if collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time = time.time() elif time.time() -", "is None or len(keys) == 0: return None if (time.time()", "for vk in v.iterkeys(): if vk == \"timestamp\" or vk", "float(zstack_used_capacity)) return metrics.values() def collect_lvm_capacity_statistics(): metrics = { 'vg_size': GaugeMetricFamily('vg_size',", "0 all_out_packets = 0 all_out_errors = 0 for intf in", "bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill -HUP %s'", "return with collectResultLock: latest_collect_result[fname] = r cache = Collector.__get_cache__() if", "sort\" % disk).strip().splitlines() if multipath_wwid is not None: wwids.append(multipath_wwid) if", "= linux.find_process_by_command('collectdmon', [conf_path]) if not cpid: bash_errorout('collectdmon -- -C %s'", "\")[-1] elif \"Drive Temperature\" in info: temp = info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number,", "disk/by-id' | awk -F '/' '{print $NF}' | grep -v", "\")[-1] else: state = info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info =", "-LDInfo -LALL -aAll | grep -E 'Target Id|State'\").strip().splitlines() target_id =", "\"cpu\" #PluginInstance \"unspecified\" Type \"cpu\" #TypeInstance \"unspecified\" GroupBy \"Host\" GroupBy", "service_name = get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service' % service_name service_conf =", "if need_restart_collectd: if not mpid: bash_errorout('collectdmon -- -C %s' %", "in state: return 15 else: return 100 def collect_raid_state(): metrics", "-> None global collectResultLock global latest_collect_result r = f() if", "= { 'vg_size': GaugeMetricFamily('vg_size', 'volume group size', None, ['vg_name']), 'vg_avail':", "in range(7): for t in collector_dict.values(): if t.is_alive(): time.sleep(0.5) continue", "= eth.strip(' \\t\\n\\r') if eth in virtual_eths: continue if eth", "log): def get_systemd_name(path): if \"collectd_exporter\" in path: return \"collectd_exporter\" elif", "= \"unknown\" for info in disk_info: if \"Slot Number\" in", "type: (typing.Callable, str) -> None global collectResultLock global latest_collect_result r", "disk_group], int(temp)) else: disk_group = \"JBOD\" if disk_group == \"unknown\"", "tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES': interfaces, 'VERSION': cmd.version, }) need_restart_collectd =", "Restart=always RestartSec=30s [Install] WantedBy=multi-user.target ''' % (service_name, binPath, args, '/dev/null'", "import threading import typing from prometheus_client import start_http_server from prometheus_client.core", "vl in v: if Collector.check(vl) is False: return False if", "\"collectd_exporter\" elif \"node_exporter\" in path: return \"node_exporter\" elif \"pushgateway\" in", "conf_path) else: bash_errorout('kill -TERM %s' % cpid) if need_restart_collectd: if", "= info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info = bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList", "bash_errorout('kill -HUP %s' % mpid) else: if not mpid: bash_errorout('collectdmon", "state: return 5 elif \"failed\" in state: return 10 elif", "vm_uuid = pid_vm_map[pid] cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n =", "True return True def collect(self): global latest_collect_result ret = []", "\")[-1] elif \"DiskGroup\" in info: kvs = info.replace(\"Drive's position: \",", "2: pid_vm_map[arr[0]] = arr[1] def collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr) r,", "multipath_wwid = bash_o(\"udevadm info -n %s | grep -E '^S:", "if cache is not None: return cache for c in", "'*virtual*' -printf '%f\\\\n'\").splitlines() if len(nics) != 0: for nic in", "'Target Id|State'\").strip().splitlines() target_id = state = \"unknown\" for info in", "false CalculateStddev false </Aggregation> </Plugin> <Plugin cpu> ReportByCpu true ReportByState", "or \"lost\" in info.lower() else 0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool", "0: for nic in nics: nic = nic.strip() try: #", "traffic in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound traffic errors'),", "table %s | grep multipath\" % pv) == 0: multipath_wwid", "| grep -E 'Target Id|State'\").strip().splitlines() target_id = state = \"unknown\"", "Collector.check(vl) is False: return False if isinstance(v, dict): for vk", "convert_disk_state_to_int(state): \"\"\" :type state: str \"\"\" state = state.lower() if", "from zstacklib.utils import jsonobject from zstacklib.utils import lock from zstacklib.utils", "<Aggregation> #Host \"unspecified\" Plugin \"cpu\" #PluginInstance \"unspecified\" Type \"cpu\" #TypeInstance", "''' [Unit] Description=prometheus %s After=network.target [Service] ExecStart=/bin/sh -c '%s %s", "binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath) if not", "if eth in virtual_eths: continue if eth == 'bonding_masters': continue", "and collect_node_disk_wwid_last_result is not None: return collect_node_disk_wwid_last_result metrics = {", "-aAll | grep -E 'Target Id|State'\").strip().splitlines() target_id = state =", "x: \"DiskGroup\" in x, kvs)[0] disk_group = disk_group.split(\" \")[-1] elif", "\"pushgateway\" in path: return \"pushgateway\" def reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload &&", "} zstack_used_capacity = 0 for dir in zstack_dir: if not", "'^lvm-pv' | sort\" % disk).strip().splitlines() if multipath_wwid is not None:", "pid = arr[0] vm_uuid = pid_vm_map[pid] cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid],", "pid_vm_map_str = bash_ro(\"ps --no-headers u -C \\\"%s -name\\\" | awk", "% (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath)", "cache for c in kvmagent.metric_collectors: name = \"%s.%s\" % (c.__module__,", "else: start_exporter(cmd) return jsonobject.dumps(rsp) def install_colletor(self): class Collector(object): __collector_cache =", "type: (int, str) if r == 0: for info in", "vm_pid_arr_str = ','.join(vm_pid_arr) r, pid_cpu_usages_str = bash_ro(\"top -b -n 1", "linux.read_file(\"/sys/class/net/%s/carrier\" % nic) == 1 except Exception as e: status", "read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics = {", "not None: return collect_node_disk_wwid_last_result metrics = { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node", "name.strip() size, avail = lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail))", "state.lower() if state == \"optimal\": return 0 elif state ==", "= [] def get_result_run(f, fname): # type: (typing.Callable, str) ->", "== \"degraded\": return 5 else: return 100 def convert_disk_state_to_int(state): \"\"\"", "bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep -E 'Target Id|State'\").strip().splitlines() target_id", "conf_path) def run_in_systemd(binPath, args, log): def get_systemd_name(path): if \"collectd_exporter\" in", "= bash_o(\"ls /sys/class/net\").split() interfaces = [] for eth in eths:", "collector_dict.get(name).is_alive(): continue collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,)) for i in", "yet,\" \" temporarily use the last calculation result.\" % k)", "arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n = 10 for i in range(0,", "grep multipath\" % pv) == 0: multipath_wwid = bash_o(\"udevadm info", "in state: return 5 elif \"failed\" in state: return 10", "not None: wwids.append(multipath_wwid) if len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for", "= '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME', None) if zstack_env_path and zstack_env_path", "if \"collectd_exporter\" in cmd.binaryPath: start_collectd_exporter(cmd) else: start_exporter(cmd) return jsonobject.dumps(rsp) def", "network interface', None, ['interface_name', 'speed']), } r, ps_info = bash_ro(\"ipmitool", "bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound traffic in packages'), 'host_network_all_out_errors':", "\"DiskGroup\" in x, kvs)[0] disk_group = disk_group.split(\" \")[-1] elif \"Drive", "FQDNLookup false LoadPlugin syslog LoadPlugin aggregation LoadPlugin cpu LoadPlugin disk", "= bash_o(\"udevadm info -n %s | grep -E '^S: disk/by-id/dm-uuid'", "elif \"Drive Temperature\" in info: temp = info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group],", "for name in vg_names: name = name.strip() size, avail =", "list keys = cls.__collector_cache.keys() if keys is None or len(keys)", "of CPU used by vm', None, ['vmUuid']) } r, pid_vm_map_str", "info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info = bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll", "zstacklib.utils import lvm from zstacklib.utils import misc from zstacklib.utils import", "is not None: return cache for c in kvmagent.metric_collectors: name", "info.replace(\"Drive's position: \", \"\").split(\",\") disk_group = filter(lambda x: \"DiskGroup\" in", "if t.is_alive(): time.sleep(0.5) continue for k in collector_dict.iterkeys(): if collector_dict[k].is_alive():", "(service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath) if", "# type: (list) -> None cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret}) @classmethod def", "str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status) return metrics.values() def collect_vm_statistics(): metrics =", "Collector.__store_cache__(ret) return ret REGISTRY.register(Collector()) def start(self): http_server = kvmagent.get_http_server() http_server.register_async_uri(self.COLLECTD_PATH,", "disk_group], convert_disk_state_to_int(state)) return metrics.values() def collect_equipment_state(): metrics = { 'power_supply':", "reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd) @in_bash def start_exporter(cmd):", "cache is not None: return cache for c in kvmagent.metric_collectors:", "'r') as fd: old_conf = fd.read() if old_conf != conf:", "v.iterkeys(): if vk == \"timestamp\" or vk == \"exemplar\": continue", "float(all_out_errors)) return metrics.values() def collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path =", "len(pid_cpu_usages_str.splitlines()) == 0: return for pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr =", "= True else: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd", "logger.warn(\"result from collector %s contains illegal character None, details: \\n%s\"", "\"qemu:///system\" RefreshInterval {{INTERVAL}} HostnameFormat name PluginInstanceFormat name BlockDevice \"/:hd[a-z]/\" IgnoreSelected", "\"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\" IgnoreSelected false </Plugin> <Plugin \"interface\"> {% for", "def get_systemd_name(path): if \"collectd_exporter\" in path: return \"collectd_exporter\" elif \"node_exporter\"", "def collect_host_network_statistics(): all_eths = os.listdir(\"/sys/class/net/\") virtual_eths = os.listdir(\"/sys/devices/virtual/net/\") interfaces =", "interfaces.append(eth) all_in_bytes = 0 all_in_packets = 0 all_in_errors = 0", "['vg_name']), } r = bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if r", "QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname): res = linux.read_file(fname) return 0", "None, ['vmUuid']) } r, pid_vm_map_str = bash_ro(\"ps --no-headers u -C", "| grep -v '^lvm-pv' | sort\" % disk).strip().splitlines() if multipath_wwid", "%s | grep multipath\" % pv) == 0: multipath_wwid =", "metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values()", "float(cpu_usage)) n = 10 for i in range(0, len(pid_vm_map.keys()), n):", "linux.find_process_by_command('collectdmon', [conf_path]) if not cpid: bash_errorout('collectdmon -- -C %s' %", "status', None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network interface', None, ['interface_name',", "if r == 0: for info in ps_info.splitlines(): info =", "all outbound traffic in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound", "open(conf_path, 'r') as fd: old_conf = fd.read() if old_conf !=", "elif \"pushgateway\" in path: return \"pushgateway\" def reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload", "for nic in nics: nic = nic.strip() try: # NOTE(weiw):", "been completed yet,\" \" temporarily use the last calculation result.\"", "None, ['slot_number', 'disk_group']), } if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll\") !=", "0: return metrics.values() raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll |", "'ipmi status', None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network interface', None,", "= { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU used by vm',", "cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n = 10 for i", "disk_group == \"unknown\" and info.count(\"JBOD\") > 0 else disk_group disk_group", "zstacklib.utils.bash import * from zstacklib.utils.ip import get_nic_supported_max_speed logger = log.get_logger(__name__)", "continue elif eth.startswith('vnic'): continue elif eth.startswith('outer'): continue elif eth.startswith('br_'): continue", "zstacklib.utils import jsonobject from zstacklib.utils import lock from zstacklib.utils import", "= lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values() def", "{ 'vg_size': GaugeMetricFamily('vg_size', 'volume group size', None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail',", "None) if zstack_env_path and zstack_env_path != default_zstack_path: default_zstack_path = zstack_env_path", "disk_group state = info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return metrics.values() def", "\"DiskGroup\" in info: kvs = info.replace(\"Drive's position: \", \"\").split(\",\") disk_group", "collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,)) for i in range(7): for", "global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result # NOTE(weiw): some storage can not", "time.sleep(0.5) continue for k in collector_dict.iterkeys(): if collector_dict[k].is_alive(): logger.warn(\"It seems", "if vk == \"timestamp\" or vk == \"exemplar\": continue if", "in v.iterkeys(): if vk == \"timestamp\" or vk == \"exemplar\":", "return collect_node_disk_wwid_last_result metrics = { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk wwid',", "collectResultLock global latest_collect_result r = f() if not Collector.check(r): logger.warn(\"result", "import * from zstacklib.utils.ip import get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict", "None cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret}) @classmethod def check(cls, v): try: if", "\"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\" IgnoreSelected false </Plugin> <Plugin \"interface\">", "if keys is None or len(keys) == 0: return None", "= log.get_logger(__name__) collector_dict = {} # type: Dict[str, threading.Thread] latest_collect_result", "for t in collector_dict.values(): if t.is_alive(): time.sleep(0.5) continue for k", "latest_collect_result[fname] = r cache = Collector.__get_cache__() if cache is not", "38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map = {} for pid_vm", "\"\" os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE) para = jsonobject.loads(req[http.REQUEST_BODY]) rsp", "open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True else: with", "zstack_env_path and zstack_env_path != default_zstack_path: default_zstack_path = zstack_env_path zstack_dir =", "misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = \"/prometheus/collectdexporter/start\" @kvmagent.replyerror", "eth = eth.strip(' \\t\\n\\r') if eth == 'lo': continue if", "== 0: multipath_wwid = bash_o(\"udevadm info -n %s | grep", "!= service_conf: linux.write_file(service_path, service_conf, True) logger.info(\"%s.service conf changed\" % service_name)", "disk wwid', None, [\"disk\", \"wwid\"]) } pvs = bash_o(\"pvs --nolocking", "in state or \"jobd\" in state: return 0 elif \"rebuild\"", "return jsonobject.dumps(rsp) def install_colletor(self): class Collector(object): __collector_cache = {} @classmethod", "awk -F '-' '{print $NF}'\" % pv).strip() disks = linux.get_physical_disk(pv,", "IgnoreSelected false </Plugin> <Plugin memory> ValuesAbsolute true ValuesPercentage false </Plugin>", "in pvs: multipath_wwid = None if bash_r(\"dmsetup table %s |", "service_name service_conf = ''' [Unit] Description=prometheus %s After=network.target [Service] ExecStart=/bin/sh", "'/etc/systemd/system/%s.service' % service_name service_conf = ''' [Unit] Description=prometheus %s After=network.target", "grep -E '^S: disk/by-id' | awk -F '/' '{print $NF}'", "continue elif eth.startswith('outer'): continue elif eth.startswith('br_'): continue elif not eth:", "'/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics = { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes',", "inbound traffic in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound traffic", "'bonding_masters': continue elif eth.startswith('vnic'): continue elif eth.startswith('outer'): continue elif eth.startswith('br_'):", "None else disk_group state = info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return", "| awk -F '-' '{print $NF}'\" % pv).strip() disks =", "-type l -not -lname '*virtual*' -printf '%f\\\\n'\").splitlines() if len(nics) !=", "os.listdir(\"/sys/class/net/\") virtual_eths = os.listdir(\"/sys/devices/virtual/net/\") interfaces = [] for eth in", "i in INTERFACES -%} Interface \"{{i}}\" {% endfor -%} IgnoreSelected", "\"node_exporter\" elif \"pushgateway\" in path: return \"pushgateway\" def reload_and_restart_service(service_name): bash_errorout(\"systemctl", "if linux.read_file(service_path) != service_conf: linux.write_file(service_path, service_conf, True) logger.info(\"%s.service conf changed\"", "in eths: eth = eth.strip(' \\t\\n\\r') if eth == 'lo':", "else: disk_group = \"JBOD\" if disk_group == \"unknown\" and info.count(\"JBOD\")", "global latest_collect_result ret = [] def get_result_run(f, fname): # type:", "CalculateAverage true CalculateMinimum false CalculateMaximum false CalculateStddev false </Aggregation> </Plugin>", "--nolocking --noheading -oname\") if r != 0 or len(o.splitlines()) ==", "vk in v.iterkeys(): if vk == \"timestamp\" or vk ==", "+= read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors +=", "= bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E 'Slot Number|DiskGroup|Firmware", "def __get_cache__(cls): # type: () -> list keys = cls.__collector_cache.keys()", "cls.__collector_cache.keys() if keys is None or len(keys) == 0: return", "used by vm', None, ['vmUuid']) } r, pid_vm_map_str = bash_ro(\"ps", "0 all_out_bytes = 0 all_out_packets = 0 all_out_errors = 0", "import jsonobject from zstacklib.utils import lock from zstacklib.utils import lvm", "'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity in bytes') } zstack_used_capacity =", "slot_number = state = disk_group = \"unknown\" for info in", "service_path = '/etc/systemd/system/%s.service' % service_name service_conf = ''' [Unit] Description=prometheus", "def read_number(fname): res = linux.read_file(fname) return 0 if not res", "metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values() def convert_raid_state_to_int(state): \"\"\" :type state: str", "eth.startswith('outer'): continue elif eth.startswith('br_'): continue elif not eth: continue else:", "None, details: \\n%s\" % (fname, r)) return with collectResultLock: latest_collect_result[fname]", "0 else disk_group disk_group = \"unknown\" if disk_group is None", "% (fname, r)) return with collectResultLock: latest_collect_result[fname] = r cache", "filter(lambda x: \"DiskGroup\" in x, kvs)[0] disk_group = disk_group.split(\" \")[-1]", "traffic in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound traffic errors'),", "continue elif not eth: continue else: interfaces.append(eth) for cmd in", "if \"fail\" in info.lower() or \"lost\" in info.lower() else 0", "run_in_systemd(binPath, args, log): def get_systemd_name(path): if \"collectd_exporter\" in path: return", "temp = info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else: disk_group = \"JBOD\"", "vm_pid_arr_str) if r != 0 or len(pid_cpu_usages_str.splitlines()) == 0: return", "from zstacklib.utils.ip import get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict = {}", "global collect_node_disk_wwid_last_result # NOTE(weiw): some storage can not afford frequent", "state', None, ['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature', None,", "{\\'print $1\\'}\" % dir res = bash_o(cmd) zstack_used_capacity += int(res)", "in info.lower() or \"lost\" in info.lower() else 0 metrics['power_supply'].add_metric([ps_id], health)", "in pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split() pid = arr[0] vm_uuid =", "jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() eths = bash_o(\"ls /sys/class/net\").split() interfaces =", "network LoadPlugin virt <Plugin aggregation> <Aggregation> #Host \"unspecified\" Plugin \"cpu\"", "NOTE(weiw): sriov nic contains carrier file but can not read", "class Collector(object): __collector_cache = {} @classmethod def __get_cache__(cls): # type:", "state = \"unknown\" for info in raid_info: if \"Target Id\"", "for pv in pvs: multipath_wwid = None if bash_r(\"dmsetup table", "e: logger.warn(\"got exception in check value %s: %s\" % (v,", "import get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict = {} # type:", "from zstacklib.utils import lock from zstacklib.utils import lvm from zstacklib.utils", "in nics: nic = nic.strip() try: # NOTE(weiw): sriov nic", "supply', None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []), 'physical_network_interface':", "str \"\"\" state = state.lower() if \"online\" in state or", "if disk_group == \"unknown\" and info.count(\"JBOD\") > 0 else disk_group", "false CalculateAverage true CalculateMinimum false CalculateMaximum false CalculateStddev false </Aggregation>", "false LoadPlugin syslog LoadPlugin aggregation LoadPlugin cpu LoadPlugin disk LoadPlugin", "\"%s.%s\" % (c.__module__, c.__name__) if collector_dict.get(name) is not None and", "disk_group = \"unknown\" for info in disk_info: if \"Slot Number\"", "and info.count(\"JBOD\") > 0 else disk_group disk_group = \"unknown\" if", "== \"optimal\": return 0 elif state == \"degraded\": return 5", "if r != 0 or len(pid_vm_map_str.splitlines()) == 0: return metrics.values()", "LoadPlugin cpu LoadPlugin disk LoadPlugin interface LoadPlugin memory LoadPlugin network", "cache = Collector.__get_cache__() if cache is not None: return cache", "regardless of conf changes, for ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def", "threading import typing from prometheus_client import start_http_server from prometheus_client.core import", "10 for i in range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i + n])", "outbound traffic in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound traffic", "is not None: return collect_node_disk_wwid_last_result metrics = { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid',", "collect_node_disk_wwid_last_result = metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if", "virt <Plugin aggregation> <Aggregation> #Host \"unspecified\" Plugin \"cpu\" #PluginInstance \"unspecified\"", "from prometheus_client.core import GaugeMetricFamily, REGISTRY from kvmagent import kvmagent from", "-TERM -f %s' Restart=always RestartSec=30s [Install] WantedBy=multi-user.target ''' % (service_name,", "elif eth.startswith('br_'): continue elif not eth: continue else: interfaces.append(eth) for", "['interface_name', 'speed']), } r, ps_info = bash_ro(\"ipmitool sdr type 'power", "latest_collect_result ret = [] def get_result_run(f, fname): # type: (typing.Callable,", "= pid_vm_map[pid] cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n = 10", "bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E 'Slot Number|DiskGroup|Firmware state|Drive", "TUR. ref: ZSTAC-23416 if collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time = time.time()", "or \"jobd\" in state: return 0 elif \"rebuild\" in state:", "'%f\\\\n'\").splitlines() if len(nics) != 0: for nic in nics: nic", "from zstacklib.utils import thread from zstacklib.utils.bash import * from zstacklib.utils.ip", "true ValuesPercentage false </Plugin> <Plugin virt> Connection \"qemu:///system\" RefreshInterval {{INTERVAL}}", "eth: continue else: interfaces.append(eth) for cmd in para.cmds: if \"collectd_exporter\"", "in collector_dict.values(): if t.is_alive(): time.sleep(0.5) continue for k in collector_dict.iterkeys():", "'''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map = {}", "locker=lock.Flock()) def start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd) @in_bash def start_exporter(cmd): EXPORTER_PATH =", "\"\").replace(\"guest=\", \"\") '''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map", "0: return for pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split() pid", "0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc info\")) nics = bash_o(\"find", "wwids])], 1) collect_node_disk_wwid_last_result = metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics)", "'power supply'\") # type: (int, str) if r == 0:", "= f() if not Collector.check(r): logger.warn(\"result from collector %s contains", "f() if not Collector.check(r): logger.warn(\"result from collector %s contains illegal", "\"Host\" GroupBy \"TypeInstance\" CalculateNum false CalculateSum false CalculateAverage true CalculateMinimum", "as fd: fd.write(conf) need_restart_collectd = True cpid = linux.find_process_by_command('collectd', [conf_path])", "vk == \"exemplar\": continue if Collector.check(v[vk]) is False: return False", "collectResultLock: latest_collect_result[fname] = r cache = Collector.__get_cache__() if cache is", "'''Interval {{INTERVAL}} # version {{VERSION}} FQDNLookup false LoadPlugin syslog LoadPlugin", "status = True speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status) return", "= {} for pid_vm in pid_vm_map_str.splitlines(): arr = pid_vm.split() if", "'bonding_masters': continue elif not eth: continue else: interfaces.append(eth) all_in_bytes =", "= os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log') ARGUMENTS = cmd.startupArguments if not", "return False if isinstance(v, GaugeMetricFamily): return Collector.check(v.samples) if isinstance(v, list)", "false </Aggregation> </Plugin> <Plugin cpu> ReportByCpu true ReportByState true ValuesPercentage", "linux.read_file(service_path) != service_conf: linux.write_file(service_path, service_conf, True) logger.info(\"%s.service conf changed\" %", "'pkill -TERM -f %s' Restart=always RestartSec=30s [Install] WantedBy=multi-user.target ''' %", "metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets))", "false CalculateSum false CalculateAverage true CalculateMinimum false CalculateMaximum false CalculateStddev", "\"/:hd[a-z]/\" IgnoreSelected true ExtraStats \"vcpu memory\" </Plugin> <Plugin network> Server", "Server \"localhost\" \"25826\" </Plugin> ''' tmpt = Template(conf) conf =", "storage can not afford frequent TUR. ref: ZSTAC-23416 if collect_node_disk_wwid_last_time", "all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics = { 'host_network_all_in_bytes':", "[%s] has not been completed yet,\" \" temporarily use the", "all inbound traffic in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound", "lvm from zstacklib.utils import misc from zstacklib.utils import thread from", "= bash_o(cmd) zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values() def", "kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname): res = linux.read_file(fname) return 0 if not", "else int(res) def collect_host_network_statistics(): all_eths = os.listdir(\"/sys/class/net/\") virtual_eths = os.listdir(\"/sys/devices/virtual/net/\")", "[conf_path]) mpid = linux.find_process_by_command('collectdmon', [conf_path]) if not cpid: bash_errorout('collectdmon --", "-E 'Target Id|State'\").strip().splitlines() target_id = state = \"unknown\" for info", "os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE) para = jsonobject.loads(req[http.REQUEST_BODY]) rsp =", "fd.write(conf) need_restart_collectd = True else: with open(conf_path, 'w') as fd:", "not cpid: bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill", "vg_names: name = name.strip() size, avail = lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name],", "(typing.Callable, str) -> None global collectResultLock global latest_collect_result r =", "return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state)", "speed], status) return metrics.values() def collect_vm_statistics(): metrics = { 'cpu_occupied_by_vm':", "i in range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i + n]) return metrics.values()", "# type: () -> list keys = cls.__collector_cache.keys() if keys", "GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity in bytes') } zstack_used_capacity = 0", "@kvmagent.replyerror @in_bash def start_prometheus_exporter(self, req): @in_bash def start_collectd(cmd): conf_path =", "for i in range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i + n]) return", "{{INTERVAL}} HostnameFormat name PluginInstanceFormat name BlockDevice \"/:hd[a-z]/\" IgnoreSelected true ExtraStats", "0 or len(pid_vm_map_str.splitlines()) == 0: return metrics.values() pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\",", "all inbound traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound traffic", "true </Plugin> <Plugin disk> Disk \"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\"", "GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound traffic in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host", "None: return collect_node_disk_wwid_last_result metrics = { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk", "- keys[0]) < 9: return cls.__collector_cache.get(keys[0]) return None @classmethod def", "for i in range(7): for t in collector_dict.values(): if t.is_alive():", "= pid_vm.split() if len(arr) == 2: pid_vm_map[arr[0]] = arr[1] def", "if not cpid: bash_errorout('collectdmon -- -C %s' % conf_path) else:", "{ 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU used by vm', None,", "check value %s: %s\" % (v, e)) return True return", "try: # NOTE(weiw): sriov nic contains carrier file but can", "version {{VERSION}} FQDNLookup false LoadPlugin syslog LoadPlugin aggregation LoadPlugin cpu", "disk state', None, ['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature',", "''' pid_vm_map = {} for pid_vm in pid_vm_map_str.splitlines(): arr =", "else: return 100 def collect_raid_state(): metrics = { 'raid_state': GaugeMetricFamily('raid_state',", "collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is not None: return collect_node_disk_wwid_last_result", "'/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME', None) if zstack_env_path and zstack_env_path !=", "\\\"%s -name\\\" | awk '{print $2,$13}'\" % QEMU_CMD) if r", "disk_group disk_group = \"unknown\" if disk_group is None else disk_group", "in info: slot_number = info.strip().split(\" \")[-1] elif \"DiskGroup\" in info:", "== 0: for info in ps_info.splitlines(): info = info.strip() ps_id", "ExtraStats \"vcpu memory\" </Plugin> <Plugin network> Server \"localhost\" \"25826\" </Plugin>", "\"optimal\": return 0 elif state == \"degraded\": return 5 else:", "== 1 except Exception as e: status = True speed", "= bash_ro(\"top -b -n 1 -p %s | grep qemu", "r, ps_info = bash_ro(\"ipmitool sdr type 'power supply'\") # type:", "<Plugin \"interface\"> {% for i in INTERFACES -%} Interface \"{{i}}\"", "{ 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity in bytes') } zstack_used_capacity", "-- -C %s' % conf_path) else: bash_errorout('kill -HUP %s' %", "collect_node_disk_wwid_last_time = time.time() elif time.time() - collect_node_disk_wwid_last_time < 60 and", "read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics = { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host", "CalculateSum false CalculateAverage true CalculateMinimum false CalculateMaximum false CalculateStddev false", "for intf in interfaces: all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf))", "if isinstance(v, dict): for vk in v.iterkeys(): if vk ==", "%s: %s\" % (v, e)) return True return True def", "= arr[0] vm_uuid = pid_vm_map[pid] cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage))", "state|Drive Temperature'\").strip().splitlines() slot_number = state = disk_group = \"unknown\" for", "ValuesPercentage true </Plugin> <Plugin disk> Disk \"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\" Disk", "that the collector [%s] has not been completed yet,\" \"", "info.lower() else 0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc info\")) nics", "kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = \"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash def start_prometheus_exporter(self,", "x, kvs)[0] disk_group = disk_group.split(\" \")[-1] elif \"Drive Temperature\" in", "memory\" </Plugin> <Plugin network> Server \"localhost\" \"25826\" </Plugin> ''' tmpt", "kvmagent.AgentResponse() eths = bash_o(\"ls /sys/class/net\").split() interfaces = [] for eth", "% cpid) if need_restart_collectd: if not mpid: bash_errorout('collectdmon -- -C", "== \"unknown\" and info.count(\"JBOD\") > 0 else disk_group disk_group =", "bash_r(\"ipmitool mc info\")) nics = bash_o(\"find /sys/class/net -type l -not", "cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret}) @classmethod def check(cls, v): try: if v", "lock from zstacklib.utils import lvm from zstacklib.utils import misc from", "int(temp)) else: disk_group = \"JBOD\" if disk_group == \"unknown\" and", "cmd.binaryPath: start_collectd_exporter(cmd) else: start_exporter(cmd) return jsonobject.dumps(rsp) def install_colletor(self): class Collector(object):", "| grep qemu | awk '{print $1,$9}'\" % vm_pid_arr_str) if", "collector_dict = {} # type: Dict[str, threading.Thread] latest_collect_result = {}", "else 0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc info\")) nics =", "start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY from kvmagent import kvmagent", "0 all_in_packets = 0 all_in_errors = 0 all_out_bytes = 0", "os.path.exists(dir): continue cmd = \"du -bs %s | awk {\\'print", "None if bash_r(\"dmsetup table %s | grep multipath\" % pv)", "= info.strip().split(\" \")[-1] elif \"DiskGroup\" in info: kvs = info.replace(\"Drive's", "* from zstacklib.utils.ip import get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict =", "threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname): res = linux.read_file(fname) return", "state: return 15 else: return 100 def collect_raid_state(): metrics =", "return if linux.read_file(service_path) != service_conf: linux.write_file(service_path, service_conf, True) logger.info(\"%s.service conf", "conf_path) else: bash_errorout('kill -HUP %s' % mpid) else: if not", "Dict[str, threading.Thread] latest_collect_result = {} collectResultLock = threading.RLock() QEMU_CMD =", "return 0 elif \"rebuild\" in state: return 5 elif \"failed\"", "read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf))", "if collector_dict.get(name) is not None and collector_dict.get(name).is_alive(): continue collector_dict[name] =", "collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time = time.time() elif time.time() - collect_node_disk_wwid_last_time", "import typing from prometheus_client import start_http_server from prometheus_client.core import GaugeMetricFamily,", "pool free size', None, ['vg_name']), } r = bash_r(\"grep -Ev", "0 if not res else int(res) def collect_host_network_statistics(): all_eths =", "= os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf = '''Interval {{INTERVAL}} # version {{VERSION}}", "= {} # type: Dict[str, threading.Thread] latest_collect_result = {} collectResultLock", "GaugeMetricFamily, REGISTRY from kvmagent import kvmagent from zstacklib.utils import http", "!= 0 or len(pid_cpu_usages_str.splitlines()) == 0: return for pid_cpu_usage in", "CalculateMinimum false CalculateMaximum false CalculateStddev false </Aggregation> </Plugin> <Plugin cpu>", "%s' % cpid) if need_restart_collectd: if not mpid: bash_errorout('collectdmon --", "cmd.startupArguments if not ARGUMENTS: ARGUMENTS = \"\" os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH,", "pid_cpu_usages_str = bash_ro(\"top -b -n 1 -p %s | grep", "'collectd.conf') conf = '''Interval {{INTERVAL}} # version {{VERSION}} FQDNLookup false", "def convert_disk_state_to_int(state): \"\"\" :type state: str \"\"\" state = state.lower()", "__collector_cache = {} @classmethod def __get_cache__(cls): # type: () ->", "disk/by-id/dm-uuid' | awk -F '-' '{print $NF}'\" % pv).strip() disks", "kvmagent.metric_collectors: name = \"%s.%s\" % (c.__module__, c.__name__) if collector_dict.get(name) is", "= eth.strip(' \\t\\n\\r') if eth == 'lo': continue if eth", "== \"exemplar\": continue if Collector.check(v[vk]) is False: return False except", "None @classmethod def __store_cache__(cls, ret): # type: (list) -> None", "for eth in all_eths: eth = eth.strip(' \\t\\n\\r') if eth", "eth: continue else: interfaces.append(eth) all_in_bytes = 0 all_in_packets = 0", "\"cpu\" #TypeInstance \"unspecified\" GroupBy \"Host\" GroupBy \"TypeInstance\" CalculateNum false CalculateSum", "eths = bash_o(\"ls /sys/class/net\").split() interfaces = [] for eth in", "collect_raid_state(): metrics = { 'raid_state': GaugeMetricFamily('raid_state', 'raid state', None, ['target_id']),", "conf changed\" % service_name) os.chmod(service_path, 0644) # restart service regardless", "bash_o(\"udevadm info -n %s | grep -E '^S: disk/by-id/dm-uuid' |", "start_collectd(cmd) start_exporter(cmd) @in_bash def start_exporter(cmd): EXPORTER_PATH = cmd.binaryPath LOG_FILE =", "get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict = {} # type: Dict[str,", "disk_group = filter(lambda x: \"DiskGroup\" in x, kvs)[0] disk_group =", "for w in wwids])], 1) collect_node_disk_wwid_last_result = metrics.values() return metrics.values()", "args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath) if not os.path.exists(service_path):", "ValuesAbsolute true ValuesPercentage false </Plugin> <Plugin virt> Connection \"qemu:///system\" RefreshInterval", "def install_colletor(self): class Collector(object): __collector_cache = {} @classmethod def __get_cache__(cls):", "None and collector_dict.get(name).is_alive(): continue collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,)) for", "def collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME', None) if", "def start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd) @in_bash def start_exporter(cmd): EXPORTER_PATH = cmd.binaryPath", "str) -> None global collectResultLock global latest_collect_result r = f()", "def start(self): http_server = kvmagent.get_http_server() http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter) self.install_colletor() start_http_server(7069) def", "e = bash_roe(\"vgs --nolocking --noheading -oname\") if r != 0", "= 0 all_out_bytes = 0 all_out_packets = 0 all_out_errors =", "cpu> ReportByCpu true ReportByState true ValuesPercentage true </Plugin> <Plugin disk>", "disk_info = bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E 'Slot", "/etc/multipath/wwids\") if r == 0: linux.set_fail_if_no_path() r, o, e =", "\"unspecified\" Plugin \"cpu\" #PluginInstance \"unspecified\" Type \"cpu\" #TypeInstance \"unspecified\" GroupBy", "= Collector.__get_cache__() if cache is not None: return cache for", "\"\"\" :type state: str \"\"\" state = state.lower() if \"online\"", "= 0 all_out_packets = 0 all_out_errors = 0 for intf", "zstacklib.utils import http from zstacklib.utils import jsonobject from zstacklib.utils import", "== 'bonding_masters': continue elif not eth: continue else: interfaces.append(eth) all_in_bytes", "eth == 'bonding_masters': continue elif not eth: continue else: interfaces.append(eth)", "zstack_dir: if not os.path.exists(dir): continue cmd = \"du -bs %s", "bash_o(\"pvs --nolocking --noheading -o pv_name\").strip().splitlines() for pv in pvs: multipath_wwid", "1) collect_node_disk_wwid_last_result = metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid)", "in state: return 10 elif \"unconfigured\" in state: return 15", "{% endfor -%} IgnoreSelected false </Plugin> <Plugin memory> ValuesAbsolute true", "Description=prometheus %s After=network.target [Service] ExecStart=/bin/sh -c '%s %s > %s", "info: target_id = info.strip().strip(\")\").split(\" \")[-1] else: state = info.strip().split(\" \")[-1]", "ret = [] def get_result_run(f, fname): # type: (typing.Callable, str)", "= 0 all_in_errors = 0 all_out_bytes = 0 all_out_packets =", "state = disk_group = \"unknown\" for info in disk_info: if", "pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\") '''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232", "vk == \"timestamp\" or vk == \"exemplar\": continue if Collector.check(v[vk])", "intf in interfaces: all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors", "nic = nic.strip() try: # NOTE(weiw): sriov nic contains carrier", "r = f() if not Collector.check(r): logger.warn(\"result from collector %s", "'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network interface', None, ['interface_name', 'speed']), } r,", "in para.cmds: if \"collectd_exporter\" in cmd.binaryPath: start_collectd_exporter(cmd) else: start_exporter(cmd) return", "-c 'pkill -TERM -f %s' Restart=always RestartSec=30s [Install] WantedBy=multi-user.target '''", "\"pushgateway\" def reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload && systemctl restart %s.service\" %", "'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network", "% service_name) os.chmod(service_path, 0644) # restart service regardless of conf", "fd.read() if old_conf != conf: with open(conf_path, 'w') as fd:", "if multipath_wwid is not None: wwids.append(multipath_wwid) if len(wwids) > 0:", "cpid = linux.find_process_by_command('collectd', [conf_path]) mpid = linux.find_process_by_command('collectdmon', [conf_path]) if not", "metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values() def collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path", "cls.__collector_cache.update({time.time(): ret}) @classmethod def check(cls, v): try: if v is", "if isinstance(v, list) or isinstance(v, tuple): for vl in v:", "collect_node_disk_wwid_last_result # NOTE(weiw): some storage can not afford frequent TUR.", "%s > %s 2>&1' ExecStop=/bin/sh -c 'pkill -TERM -f %s'", "metrics.values() pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\") '''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3", "reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload && systemctl restart %s.service\" % service_name) service_name", "with collectResultLock: latest_collect_result[fname] = r cache = Collector.__get_cache__() if cache", "'Slot Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines() slot_number = state = disk_group =", "+= read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics =", "collector_dict.iterkeys(): if collector_dict[k].is_alive(): logger.warn(\"It seems that the collector [%s] has", "[Install] WantedBy=multi-user.target ''' % (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log')", "['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume group and thin pool free size',", "in v: if Collector.check(vl) is False: return False if isinstance(v,", "or len(pid_vm_map_str.splitlines()) == 0: return metrics.values() pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\",", "default_zstack_path: default_zstack_path = zstack_env_path zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path,", "else disk_group state = info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return metrics.values()", "# NOTE(weiw): sriov nic contains carrier file but can not", "carrier file but can not read status = linux.read_file(\"/sys/class/net/%s/carrier\" %", "illegal character None, details: \\n%s\" % (fname, r)) return with", "start_exporter(cmd): EXPORTER_PATH = cmd.binaryPath LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log')", "is not None and collector_dict.get(name).is_alive(): continue collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c,", "-not -lname '*virtual*' -printf '%f\\\\n'\").splitlines() if len(nics) != 0: for", "samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map = {} for", "contains illegal character None, details: \\n%s\" % (fname, r)) return", "eth.startswith('vnic'): continue elif eth.startswith('outer'): continue elif eth.startswith('br_'): continue elif not", "service_conf, True) os.chmod(service_path, 0644) reload_and_restart_service(service_name) return if linux.read_file(service_path) != service_conf:", "is not None: wwids.append(multipath_wwid) if len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip()", "continue if eth == 'bonding_masters': continue elif not eth: continue", "afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map = {} for pid_vm in pid_vm_map_str.splitlines(): arr", "conf changes, for ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def start_collectd_exporter(cmd): start_collectd(cmd)", "continue else: interfaces.append(eth) for cmd in para.cmds: if \"collectd_exporter\" in", "range(7): for t in collector_dict.values(): if t.is_alive(): time.sleep(0.5) continue for", "GaugeMetricFamily('physical_disk_state', 'physical disk state', None, ['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical", "</Plugin> <Plugin memory> ValuesAbsolute true ValuesPercentage false </Plugin> <Plugin virt>", "Id\" in info: target_id = info.strip().strip(\")\").split(\" \")[-1] else: state =", "\\t\\n\\r') if eth in virtual_eths: continue if eth == 'bonding_masters':", "collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result # NOTE(weiw): some storage can", "disk_group = disk_group.split(\" \")[-1] elif \"Drive Temperature\" in info: temp", "read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf))", "res else int(res) def collect_host_network_statistics(): all_eths = os.listdir(\"/sys/class/net/\") virtual_eths =", "metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc info\")) nics = bash_o(\"find /sys/class/net", "__get_cache__(cls): # type: () -> list keys = cls.__collector_cache.keys() if", "default_zstack_path = zstack_env_path zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/',", "= { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity in bytes') }", "QEMU_CMD) if r != 0 or len(pid_vm_map_str.splitlines()) == 0: return", "['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk state', None, ['slot_number', 'disk_group']), 'physical_disk_temperature':", "para.cmds: if \"collectd_exporter\" in cmd.binaryPath: start_collectd_exporter(cmd) else: start_exporter(cmd) return jsonobject.dumps(rsp)", "| sort\" % disk).strip().splitlines() if multipath_wwid is not None: wwids.append(multipath_wwid)", "fname): # type: (typing.Callable, str) -> None global collectResultLock global", "all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes", "= { 'raid_state': GaugeMetricFamily('raid_state', 'raid state', None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state',", "zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt',", "LoadPlugin network LoadPlugin virt <Plugin aggregation> <Aggregation> #Host \"unspecified\" Plugin", "[conf_path]) if not cpid: bash_errorout('collectdmon -- -C %s' % conf_path)", "metrics = { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk wwid', None, [\"disk\",", "0: for info in ps_info.splitlines(): info = info.strip() ps_id =", "changes, for ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd)", "IgnoreSelected true ExtraStats \"vcpu memory\" </Plugin> <Plugin network> Server \"localhost\"", "logger.warn(\"It seems that the collector [%s] has not been completed", "info.strip().strip(\")\").split(\" \")[-1] else: state = info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info", "packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([],", "raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep -E 'Target", "else: interfaces.append(eth) for cmd in para.cmds: if \"collectd_exporter\" in cmd.binaryPath:", "details: \\n%s\" % (fname, r)) return with collectResultLock: latest_collect_result[fname] =", "return \"collectd_exporter\" elif \"node_exporter\" in path: return \"node_exporter\" elif \"pushgateway\"", "len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for w in wwids])], 1)", "from zstacklib.utils import lvm from zstacklib.utils import misc from zstacklib.utils", "Disk \"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\" IgnoreSelected false </Plugin> <Plugin", "== 0: return for pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split()", "# type: (typing.Callable, str) -> None global collectResultLock global latest_collect_result", "convert_raid_state_to_int(state): \"\"\" :type state: str \"\"\" state = state.lower() if", "elif \"rebuild\" in state: return 5 elif \"failed\" in state:", "= 0 all_in_packets = 0 all_in_errors = 0 all_out_bytes =", "target_id = info.strip().strip(\")\").split(\" \")[-1] else: state = info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id],", "import kvmagent from zstacklib.utils import http from zstacklib.utils import jsonobject", "len(o.splitlines()) == 0: return metrics.values() vg_names = o.splitlines() for name", "and thin pool free size', None, ['vg_name']), } r =", "in zstack_dir: if not os.path.exists(dir): continue cmd = \"du -bs", "result.\" % k) for v in latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret) return", "'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk state', None, ['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature',", "awk {\\'print $1\\'}\" % dir res = bash_o(cmd) zstack_used_capacity +=", "all_out_packets = 0 all_out_errors = 0 for intf in interfaces:", "if (time.time() - keys[0]) < 9: return cls.__collector_cache.get(keys[0]) return None", "eth in virtual_eths: continue if eth == 'bonding_masters': continue elif", "return False if isinstance(v, dict): for vk in v.iterkeys(): if", "True) os.chmod(service_path, 0644) reload_and_restart_service(service_name) return if linux.read_file(service_path) != service_conf: linux.write_file(service_path,", "@in_bash def start_prometheus_exporter(self, req): @in_bash def start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath),", "cmd.binaryPath + '.log') ARGUMENTS = cmd.startupArguments if not ARGUMENTS: ARGUMENTS", "if r == 0: linux.set_fail_if_no_path() r, o, e = bash_roe(\"vgs", "CPU used by vm', None, ['vmUuid']) } r, pid_vm_map_str =", "Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines() slot_number = state = disk_group = \"unknown\"", "metrics = { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU used by", "state: str \"\"\" state = state.lower() if state == \"optimal\":", "--no-headers u -C \\\"%s -name\\\" | awk '{print $2,$13}'\" %", "from zstacklib.utils.bash import * from zstacklib.utils.ip import get_nic_supported_max_speed logger =", "['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical", "\"collectd_exporter\" in path: return \"collectd_exporter\" elif \"node_exporter\" in path: return", "'VERSION': cmd.version, }) need_restart_collectd = False if os.path.exists(conf_path): with open(conf_path,", "read_number(fname): res = linux.read_file(fname) return 0 if not res else", "metrics.values() def convert_raid_state_to_int(state): \"\"\" :type state: str \"\"\" state =", "'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature', None, ['slot_number', 'disk_group']), } if", "0 elif \"rebuild\" in state: return 5 elif \"failed\" in", "ExecStop=/bin/sh -c 'pkill -TERM -f %s' Restart=always RestartSec=30s [Install] WantedBy=multi-user.target", "GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound traffic in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host", "Type \"cpu\" #TypeInstance \"unspecified\" GroupBy \"Host\" GroupBy \"TypeInstance\" CalculateNum false", "linux.read_file(fname) return 0 if not res else int(res) def collect_host_network_statistics():", "$NF}'\" % pv).strip() disks = linux.get_physical_disk(pv, False) for disk in", "'.log') ARGUMENTS = cmd.startupArguments if not ARGUMENTS: ARGUMENTS = \"\"", "% QEMU_CMD) if r != 0 or len(pid_vm_map_str.splitlines()) == 0:", "not None and collector_dict.get(name).is_alive(): continue collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,))", "= \"unknown\" if disk_group is None else disk_group state =", "multipath_wwid = None if bash_r(\"dmsetup table %s | grep multipath\"", "afford frequent TUR. ref: ZSTAC-23416 if collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time", "is False: return False except Exception as e: logger.warn(\"got exception", "class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = \"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash def start_prometheus_exporter(self, req):", "true ReportByState true ValuesPercentage true </Plugin> <Plugin disk> Disk \"/^sd[a-z]$/\"", "install_colletor(self): class Collector(object): __collector_cache = {} @classmethod def __get_cache__(cls): #", "pid_vm_map[arr[0]] = arr[1] def collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr) r, pid_cpu_usages_str", "r = bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if r == 0:", "service_conf: linux.write_file(service_path, service_conf, True) logger.info(\"%s.service conf changed\" % service_name) os.chmod(service_path,", "pid_cpu_usage.split() pid = arr[0] vm_uuid = pid_vm_map[pid] cpu_usage = arr[1]", "t.is_alive(): time.sleep(0.5) continue for k in collector_dict.iterkeys(): if collector_dict[k].is_alive(): logger.warn(\"It", "disk_group is None else disk_group state = info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group],", "-b -n 1 -p %s | grep qemu | awk", "by vm', None, ['vmUuid']) } r, pid_vm_map_str = bash_ro(\"ps --no-headers", "-C %s' % conf_path) else: bash_errorout('kill -TERM %s' % cpid)", "% pv).strip() disks = linux.get_physical_disk(pv, False) for disk in disks:", "</Plugin> ''' tmpt = Template(conf) conf = tmpt.render({ 'INTERVAL': cmd.interval,", "dict): for vk in v.iterkeys(): if vk == \"timestamp\" or", "typing from prometheus_client import start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY", "'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes))", "elif eth.startswith('outer'): continue elif eth.startswith('br_'): continue elif not eth: continue", "% k) for v in latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret) return ret", "= None if bash_r(\"dmsetup table %s | grep multipath\" %", "wwid', None, [\"disk\", \"wwid\"]) } pvs = bash_o(\"pvs --nolocking --noheading", "Disk \"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\" IgnoreSelected false </Plugin> <Plugin \"interface\"> {%", "not None: return cache for c in kvmagent.metric_collectors: name =", "None: collect_node_disk_wwid_last_time = time.time() elif time.time() - collect_node_disk_wwid_last_time < 60", "{ 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk wwid', None, [\"disk\", \"wwid\"]) }", "'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host", "\"unconfigured\" in state: return 15 else: return 100 def collect_raid_state():", "'raid state', None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk state', None,", "-HUP %s' % mpid) else: if not mpid: bash_errorout('collectdmon --", "continue elif not eth: continue else: interfaces.append(eth) all_in_bytes = 0", "-C %s' % conf_path) else: bash_errorout('kill -HUP %s' % mpid)", "not afford frequent TUR. ref: ZSTAC-23416 if collect_node_disk_wwid_last_time is None:", "bash_ro(\"ps --no-headers u -C \\\"%s -name\\\" | awk '{print $2,$13}'\"", "False) for disk in disks: disk_name = disk.split(\"/\")[-1].strip() wwids =", "None, [\"disk\", \"wwid\"]) } pvs = bash_o(\"pvs --nolocking --noheading -o", "True def collect(self): global latest_collect_result ret = [] def get_result_run(f,", "%s contains illegal character None, details: \\n%s\" % (fname, r))", "if not res else int(res) def collect_host_network_statistics(): all_eths = os.listdir(\"/sys/class/net/\")", "str \"\"\" state = state.lower() if state == \"optimal\": return", "systemctl restart %s.service\" % service_name) service_name = get_systemd_name(binPath) service_path =", "if Collector.check(v[vk]) is False: return False except Exception as e:", "Collector.check(v.samples) if isinstance(v, list) or isinstance(v, tuple): for vl in", "def get_result_run(f, fname): # type: (typing.Callable, str) -> None global", "if r != 0 or len(pid_cpu_usages_str.splitlines()) == 0: return for", "pid_vm_map[pid] cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n = 10 for", "eth = eth.strip(' \\t\\n\\r') if eth in virtual_eths: continue if", "metrics = { 'power_supply': GaugeMetricFamily('power_supply', 'power supply', None, ['ps_id']), 'ipmi_status':", "start_exporter(cmd) return jsonobject.dumps(rsp) def install_colletor(self): class Collector(object): __collector_cache = {}", "kvs)[0] disk_group = disk_group.split(\" \")[-1] elif \"Drive Temperature\" in info:", "GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU used by vm', None, ['vmUuid']) }", "type: (list) -> None cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret}) @classmethod def check(cls,", "= nic.strip() try: # NOTE(weiw): sriov nic contains carrier file", "all_in_packets = 0 all_in_errors = 0 all_out_bytes = 0 all_out_packets", "zstack_env_path zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql',", "(time.time() - keys[0]) < 9: return cls.__collector_cache.get(keys[0]) return None @classmethod", "collector [%s] has not been completed yet,\" \" temporarily use", "$1,$9}'\" % vm_pid_arr_str) if r != 0 or len(pid_cpu_usages_str.splitlines()) ==", "{ 'power_supply': GaugeMetricFamily('power_supply', 'power supply', None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi", "ZSTAC-23416 if collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time = time.time() elif time.time()", "tuple): for vl in v: if Collector.check(vl) is False: return", "</Plugin> <Plugin disk> Disk \"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\" IgnoreSelected", "req): @in_bash def start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf =", "return cache for c in kvmagent.metric_collectors: name = \"%s.%s\" %", "threading.Thread] latest_collect_result = {} collectResultLock = threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1]", "v is None: return False if isinstance(v, GaugeMetricFamily): return Collector.check(v.samples)", "eth == 'bonding_masters': continue elif eth.startswith('vnic'): continue elif eth.startswith('outer'): continue", "from collector %s contains illegal character None, details: \\n%s\" %", "len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i + n]) return metrics.values() collect_node_disk_wwid_last_time = None", "in wwids])], 1) collect_node_disk_wwid_last_result = metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics)", "</Plugin> <Plugin network> Server \"localhost\" \"25826\" </Plugin> ''' tmpt =", "@in_bash def start_exporter(cmd): EXPORTER_PATH = cmd.binaryPath LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath", "} r = bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if r ==", "info: kvs = info.replace(\"Drive's position: \", \"\").split(\",\") disk_group = filter(lambda", "in check value %s: %s\" % (v, e)) return True", "info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return metrics.values() def collect_equipment_state(): metrics =", "== 'lo': continue if eth == 'bonding_masters': continue elif eth.startswith('vnic'):", "in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound traffic errors'), }", "for ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock()) def start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd) @in_bash", "GaugeMetricFamily('power_supply', 'power supply', None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None,", "exception in check value %s: %s\" % (v, e)) return", "elif \"failed\" in state: return 10 elif \"unconfigured\" in state:", "false </Plugin> <Plugin virt> Connection \"qemu:///system\" RefreshInterval {{INTERVAL}} HostnameFormat name", "return 100 def convert_disk_state_to_int(state): \"\"\" :type state: str \"\"\" state", "true ValuesPercentage true </Plugin> <Plugin disk> Disk \"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\"", "-- -C %s' % conf_path) else: bash_errorout('kill -TERM %s' %", "cmd in para.cmds: if \"collectd_exporter\" in cmd.binaryPath: start_collectd_exporter(cmd) else: start_exporter(cmd)", "conf: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True", "GaugeMetricFamily('vg_avail', 'volume group and thin pool free size', None, ['vg_name']),", "memory LoadPlugin network LoadPlugin virt <Plugin aggregation> <Aggregation> #Host \"unspecified\"", "'w') as fd: fd.write(conf) need_restart_collectd = True else: with open(conf_path,", "the last calculation result.\" % k) for v in latest_collect_result.itervalues():", "1 except Exception as e: status = True speed =", "r != 0 or len(pid_cpu_usages_str.splitlines()) == 0: return for pid_cpu_usage", "-bs %s | awk {\\'print $1\\'}\" % dir res =", "disk_group = \"JBOD\" if disk_group == \"unknown\" and info.count(\"JBOD\") >", "if log.endswith('/pushgateway.log') else log, binPath) if not os.path.exists(service_path): linux.write_file(service_path, service_conf,", "eth in all_eths: eth = eth.strip(' \\t\\n\\r') if eth in", "!= 0 or len(o.splitlines()) == 0: return metrics.values() vg_names =", "ret REGISTRY.register(Collector()) def start(self): http_server = kvmagent.get_http_server() http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter) self.install_colletor()", "in latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret) return ret REGISTRY.register(Collector()) def start(self): http_server", "IgnoreSelected false </Plugin> <Plugin \"interface\"> {% for i in INTERFACES", "kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = \"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash", "collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr) r, pid_cpu_usages_str = bash_ro(\"top -b -n", "float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values() def convert_raid_state_to_int(state): \"\"\" :type state:", "os.path.exists(conf_path): with open(conf_path, 'r') as fd: old_conf = fd.read() if", "= arr[1] def collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr) r, pid_cpu_usages_str =", "-LALL -aAll\") != 0: return metrics.values() raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo", "zstack_env_path != default_zstack_path: default_zstack_path = zstack_env_path zstack_dir = ['/var/lib/zstack', '%s/../../../'", "info: slot_number = info.strip().split(\" \")[-1] elif \"DiskGroup\" in info: kvs", "REGISTRY.register(Collector()) def start(self): http_server = kvmagent.get_http_server() http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter) self.install_colletor() start_http_server(7069)", "return 0 if not res else int(res) def collect_host_network_statistics(): all_eths", "kvs = info.replace(\"Drive's position: \", \"\").split(\",\") disk_group = filter(lambda x:", "kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH =", "r, pid_vm_map_str = bash_ro(\"ps --no-headers u -C \\\"%s -name\\\" |", "False if os.path.exists(conf_path): with open(conf_path, 'r') as fd: old_conf =", "collectResultLock = threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname): res =", "\"degraded\": return 5 else: return 100 def convert_disk_state_to_int(state): \"\"\" :type", "time.time() - collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is not None:", "metrics.values() vg_names = o.splitlines() for name in vg_names: name =", "'vg_avail': GaugeMetricFamily('vg_avail', 'volume group and thin pool free size', None,", "collect(pid_vm_map.keys()[i:i + n]) return metrics.values() collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result =", "if zstack_env_path and zstack_env_path != default_zstack_path: default_zstack_path = zstack_env_path zstack_dir", "0: return metrics.values() pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\") '''pid_vm_map_str samples:", "= disk_group = \"unknown\" for info in disk_info: if \"Slot", "state', None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk state', None, ['slot_number',", "-TERM %s' % cpid) if need_restart_collectd: if not mpid: bash_errorout('collectdmon", "'node disk wwid', None, [\"disk\", \"wwid\"]) } pvs = bash_o(\"pvs", "%s' % conf_path) else: bash_errorout('kill -HUP %s' % mpid) else:", "-name\\\" | awk '{print $2,$13}'\" % QEMU_CMD) if r !=", "v in latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret) return ret REGISTRY.register(Collector()) def start(self):", "continue collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,)) for i in range(7):", "return None @classmethod def __store_cache__(cls, ret): # type: (list) ->", "state: return 0 elif \"rebuild\" in state: return 5 elif", "r != 0 or len(pid_vm_map_str.splitlines()) == 0: return metrics.values() pid_vm_map_str", "start_prometheus_exporter(self, req): @in_bash def start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf", "\"TypeInstance\" CalculateNum false CalculateSum false CalculateAverage true CalculateMinimum false CalculateMaximum", "for info in raid_info: if \"Target Id\" in info: target_id", "endfor -%} IgnoreSelected false </Plugin> <Plugin memory> ValuesAbsolute true ValuesPercentage", "-- -C %s' % conf_path) def run_in_systemd(binPath, args, log): def", "v: if Collector.check(vl) is False: return False if isinstance(v, dict):", "info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else: disk_group = \"JBOD\" if disk_group", "virtual_eths = os.listdir(\"/sys/devices/virtual/net/\") interfaces = [] for eth in all_eths:", "if old_conf != conf: with open(conf_path, 'w') as fd: fd.write(conf)", "@classmethod def __get_cache__(cls): # type: () -> list keys =", "0 or len(pid_cpu_usages_str.splitlines()) == 0: return for pid_cpu_usage in pid_cpu_usages_str.splitlines():", "= state = \"unknown\" for info in raid_info: if \"Target", "- collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is not None: return", "if v is None: return False if isinstance(v, GaugeMetricFamily): return", "None def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result # NOTE(weiw): some", "= '''Interval {{INTERVAL}} # version {{VERSION}} FQDNLookup false LoadPlugin syslog", "for dir in zstack_dir: if not os.path.exists(dir): continue cmd =", "{} @classmethod def __get_cache__(cls): # type: () -> list keys", "or vk == \"exemplar\": continue if Collector.check(v[vk]) is False: return", "global latest_collect_result r = f() if not Collector.check(r): logger.warn(\"result from", "info.strip().split(\" \")[-1] elif \"DiskGroup\" in info: kvs = info.replace(\"Drive's position:", "avail = lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values()", "temporarily use the last calculation result.\" % k) for v", "if not mpid: bash_errorout('collectdmon -- -C %s' % conf_path) def", "%s' Restart=always RestartSec=30s [Install] WantedBy=multi-user.target ''' % (service_name, binPath, args,", "changed\" % service_name) os.chmod(service_path, 0644) # restart service regardless of", "$2,$13}'\" % QEMU_CMD) if r != 0 or len(pid_vm_map_str.splitlines()) ==", "<Plugin disk> Disk \"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\" IgnoreSelected false", "% (c.__module__, c.__name__) if collector_dict.get(name) is not None and collector_dict.get(name).is_alive():", "-E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines() slot_number = state = disk_group", "interfaces.append(eth) for cmd in para.cmds: if \"collectd_exporter\" in cmd.binaryPath: start_collectd_exporter(cmd)", "% pv) == 0: multipath_wwid = bash_o(\"udevadm info -n %s", "cmd = \"du -bs %s | awk {\\'print $1\\'}\" %", "k in collector_dict.iterkeys(): if collector_dict[k].is_alive(): logger.warn(\"It seems that the collector", "as e: logger.warn(\"got exception in check value %s: %s\" %", "= '/etc/systemd/system/%s.service' % service_name service_conf = ''' [Unit] Description=prometheus %s", "<filename>kvmagent/kvmagent/plugins/prometheus.py import os.path import threading import typing from prometheus_client import", "default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME', None) if zstack_env_path and", "arr = pid_vm.split() if len(arr) == 2: pid_vm_map[arr[0]] = arr[1]", "GaugeMetricFamily('node_disk_wwid', 'node disk wwid', None, [\"disk\", \"wwid\"]) } pvs =", "__store_cache__(cls, ret): # type: (list) -> None cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret})", "== 0: return None if (time.time() - keys[0]) < 9:", "if Collector.check(vl) is False: return False if isinstance(v, dict): for", "name in vg_names: name = name.strip() size, avail = lvm.get_vg_size(name,", "vg_names = o.splitlines() for name in vg_names: name = name.strip()", "'Host all outbound traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets))", "for k in collector_dict.iterkeys(): if collector_dict[k].is_alive(): logger.warn(\"It seems that the", "for v in latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret) return ret REGISTRY.register(Collector()) def", "= metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost():", "status) return metrics.values() def collect_vm_statistics(): metrics = { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm',", "all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets", "def run_in_systemd(binPath, args, log): def get_systemd_name(path): if \"collectd_exporter\" in path:", "nic) == 1 except Exception as e: status = True", "--noheading -o pv_name\").strip().splitlines() for pv in pvs: multipath_wwid = None", "conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf = '''Interval {{INTERVAL}} # version", "eth.strip(' \\t\\n\\r') if eth in virtual_eths: continue if eth ==", "state = info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state)) return metrics.values() def collect_equipment_state():", "60 and collect_node_disk_wwid_last_result is not None: return collect_node_disk_wwid_last_result metrics =", "| grep -E '^S: disk/by-id/dm-uuid' | awk -F '-' '{print", "info -n %s | grep -E '^S: disk/by-id' | awk", "disk> Disk \"/^sd[a-z]$/\" Disk \"/^hd[a-z]$/\" Disk \"/^vd[a-z]$/\" IgnoreSelected false </Plugin>", "rsp = kvmagent.AgentResponse() eths = bash_o(\"ls /sys/class/net\").split() interfaces = []", "} if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll\") != 0: return metrics.values()", "try: if v is None: return False if isinstance(v, GaugeMetricFamily):", "elif \"node_exporter\" in path: return \"node_exporter\" elif \"pushgateway\" in path:", "Interface \"{{i}}\" {% endfor -%} IgnoreSelected false </Plugin> <Plugin memory>", "bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound traffic in packages'), 'host_network_all_in_errors':", "> 0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for w in wwids])], 1) collect_node_disk_wwid_last_result", "interfaces, 'VERSION': cmd.version, }) need_restart_collectd = False if os.path.exists(conf_path): with", "all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics = { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all", "from kvmagent import kvmagent from zstacklib.utils import http from zstacklib.utils", "GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([],", "has not been completed yet,\" \" temporarily use the last", "not res else int(res) def collect_host_network_statistics(): all_eths = os.listdir(\"/sys/class/net/\") virtual_eths", "name BlockDevice \"/:hd[a-z]/\" IgnoreSelected true ExtraStats \"vcpu memory\" </Plugin> <Plugin", "ps_info.splitlines(): info = info.strip() ps_id = info.split(\"|\")[0].strip().split(\" \")[0] health =", "metrics = { 'raid_state': GaugeMetricFamily('raid_state', 'raid state', None, ['target_id']), 'physical_disk_state':", "nic in nics: nic = nic.strip() try: # NOTE(weiw): sriov", "-n %s | grep -E '^S: disk/by-id/dm-uuid' | awk -F", "% nic) == 1 except Exception as e: status =", "kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class", "os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log') ARGUMENTS = cmd.startupArguments if not ARGUMENTS:", "in disk_info: if \"Slot Number\" in info: slot_number = info.strip().split(\"", "arr[1] def collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr) r, pid_cpu_usages_str = bash_ro(\"top", "= ''' [Unit] Description=prometheus %s After=network.target [Service] ExecStart=/bin/sh -c '%s", "5 else: return 100 def convert_disk_state_to_int(state): \"\"\" :type state: str", "all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics", "pv).strip() disks = linux.get_physical_disk(pv, False) for disk in disks: disk_name", "for info in disk_info: if \"Slot Number\" in info: slot_number", "for c in kvmagent.metric_collectors: name = \"%s.%s\" % (c.__module__, c.__name__)", "WantedBy=multi-user.target ''' % (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else", "\"node_exporter\" in path: return \"node_exporter\" elif \"pushgateway\" in path: return", "if eth == 'bonding_masters': continue elif eth.startswith('vnic'): continue elif eth.startswith('outer'):", "+= read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics = { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound", "None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface',", "'volume group size', None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume group and", "= info.split(\"|\")[0].strip().split(\" \")[0] health = 10 if \"fail\" in info.lower()", "http from zstacklib.utils import jsonobject from zstacklib.utils import lock from", "-f %s' Restart=always RestartSec=30s [Install] WantedBy=multi-user.target ''' % (service_name, binPath,", "jsonobject from zstacklib.utils import lock from zstacklib.utils import lvm from", "health = 10 if \"fail\" in info.lower() or \"lost\" in", "disks: disk_name = disk.split(\"/\")[-1].strip() wwids = bash_o(\"udevadm info -n %s", "None if (time.time() - keys[0]) < 9: return cls.__collector_cache.get(keys[0]) return", "disks = linux.get_physical_disk(pv, False) for disk in disks: disk_name =", "traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound traffic in bytes'),", "float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values() def collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack'", "None: return cache for c in kvmagent.metric_collectors: name = \"%s.%s\"", "mc info\")) nics = bash_o(\"find /sys/class/net -type l -not -lname", "r)) return with collectResultLock: latest_collect_result[fname] = r cache = Collector.__get_cache__()", "convert_disk_state_to_int(state)) return metrics.values() def collect_equipment_state(): metrics = { 'power_supply': GaugeMetricFamily('power_supply',", "else: bash_errorout('kill -TERM %s' % cpid) if need_restart_collectd: if not", "else: interfaces.append(eth) all_in_bytes = 0 all_in_packets = 0 all_in_errors =", "= time.time() elif time.time() - collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result", "multipath\" % pv) == 0: multipath_wwid = bash_o(\"udevadm info -n", "</Aggregation> </Plugin> <Plugin cpu> ReportByCpu true ReportByState true ValuesPercentage true", "= zstack_env_path zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack',", "pid_vm_map_str.splitlines(): arr = pid_vm.split() if len(arr) == 2: pid_vm_map[arr[0]] =", "all_in_bytes = 0 all_in_packets = 0 all_in_errors = 0 all_out_bytes", "0644) reload_and_restart_service(service_name) return if linux.read_file(service_path) != service_conf: linux.write_file(service_path, service_conf, True)", "0 for dir in zstack_dir: if not os.path.exists(dir): continue cmd", "log.get_logger(__name__) collector_dict = {} # type: Dict[str, threading.Thread] latest_collect_result =", "(int, str) if r == 0: for info in ps_info.splitlines():", "= disk.split(\"/\")[-1].strip() wwids = bash_o(\"udevadm info -n %s | grep", "REGISTRY from kvmagent import kvmagent from zstacklib.utils import http from", "misc from zstacklib.utils import thread from zstacklib.utils.bash import * from", "-v '^lvm-pv' | sort\" % disk).strip().splitlines() if multipath_wwid is not", "thread from zstacklib.utils.bash import * from zstacklib.utils.ip import get_nic_supported_max_speed logger", "state = state.lower() if state == \"optimal\": return 0 elif", "= r cache = Collector.__get_cache__() if cache is not None:", "| grep multipath\" % pv) == 0: multipath_wwid = bash_o(\"udevadm", "conf = '''Interval {{INTERVAL}} # version {{VERSION}} FQDNLookup false LoadPlugin", "= bash_roe(\"vgs --nolocking --noheading -oname\") if r != 0 or", "'INTERFACES': interfaces, 'VERSION': cmd.version, }) need_restart_collectd = False if os.path.exists(conf_path):", "return 100 def collect_raid_state(): metrics = { 'raid_state': GaugeMetricFamily('raid_state', 'raid", "not eth: continue else: interfaces.append(eth) for cmd in para.cmds: if", "if \"Target Id\" in info: target_id = info.strip().strip(\")\").split(\" \")[-1] else:", "return metrics.values() def collect_vm_statistics(): metrics = { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage", "metrics.values() collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result = None def collect_node_disk_wwid(): global", "-E '^S: disk/by-id' | awk -F '/' '{print $NF}' |", "wwids.append(multipath_wwid) if len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for w in", "%s.service\" % service_name) service_name = get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service' %", "float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values() def collect_host_capacity_statistics(): default_zstack_path", "need_restart_collectd = False if os.path.exists(conf_path): with open(conf_path, 'r') as fd:", "= True cpid = linux.find_process_by_command('collectd', [conf_path]) mpid = linux.find_process_by_command('collectdmon', [conf_path])", "%s' % conf_path) def run_in_systemd(binPath, args, log): def get_systemd_name(path): if", "LoadPlugin memory LoadPlugin network LoadPlugin virt <Plugin aggregation> <Aggregation> #Host", "state = info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info = bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64", "\"unspecified\" Type \"cpu\" #TypeInstance \"unspecified\" GroupBy \"Host\" GroupBy \"TypeInstance\" CalculateNum", "\"Drive Temperature\" in info: temp = info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp))", "bash_errorout(\"systemctl daemon-reload && systemctl restart %s.service\" % service_name) service_name =", "network> Server \"localhost\" \"25826\" </Plugin> ''' tmpt = Template(conf) conf", "= kvmagent.AgentResponse() eths = bash_o(\"ls /sys/class/net\").split() interfaces = [] for", "len(arr) == 2: pid_vm_map[arr[0]] = arr[1] def collect(vm_pid_arr): vm_pid_arr_str =", "multipath_wwid is not None: wwids.append(multipath_wwid) if len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name,", "start_collectd_exporter(cmd) else: start_exporter(cmd) return jsonobject.dumps(rsp) def install_colletor(self): class Collector(object): __collector_cache", "'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature', None, ['slot_number', 'disk_group']), }", "restart service regardless of conf changes, for ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\",", "'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU used by vm', None, ['vmUuid'])", "/sys/class/net\").split() interfaces = [] for eth in eths: eth =", "size, avail = lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return", "Template(conf) conf = tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES': interfaces, 'VERSION': cmd.version,", "GroupBy \"TypeInstance\" CalculateNum false CalculateSum false CalculateAverage true CalculateMinimum false", "return metrics.values() def convert_raid_state_to_int(state): \"\"\" :type state: str \"\"\" state", "#Host \"unspecified\" Plugin \"cpu\" #PluginInstance \"unspecified\" Type \"cpu\" #TypeInstance \"unspecified\"", "-E '^S: disk/by-id/dm-uuid' | awk -F '-' '{print $NF}'\" %", "= 10 for i in range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i +", "errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound traffic in bytes'), 'host_network_all_out_packages':", "nic.strip() try: # NOTE(weiw): sriov nic contains carrier file but", "os.path.exists(service_path): linux.write_file(service_path, service_conf, True) os.chmod(service_path, 0644) reload_and_restart_service(service_name) return if linux.read_file(service_path)", "False if isinstance(v, GaugeMetricFamily): return Collector.check(v.samples) if isinstance(v, list) or", "c.__name__) if collector_dict.get(name) is not None and collector_dict.get(name).is_alive(): continue collector_dict[name]", "if os.path.exists(conf_path): with open(conf_path, 'r') as fd: old_conf = fd.read()", "-n %s | grep -E '^S: disk/by-id' | awk -F", "Collector.__get_cache__() if cache is not None: return cache for c", "return metrics.values() collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result = None def collect_node_disk_wwid():", "the collector [%s] has not been completed yet,\" \" temporarily", "in INTERFACES -%} Interface \"{{i}}\" {% endfor -%} IgnoreSelected false", "all inbound traffic in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound", "in bytes') } zstack_used_capacity = 0 for dir in zstack_dir:", "info.split(\"|\")[0].strip().split(\" \")[0] health = 10 if \"fail\" in info.lower() or", "$1\\'}\" % dir res = bash_o(cmd) zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([],", "isinstance(v, GaugeMetricFamily): return Collector.check(v.samples) if isinstance(v, list) or isinstance(v, tuple):", "\\n%s\" % (fname, r)) return with collectResultLock: latest_collect_result[fname] = r", "collect_node_disk_wwid_last_result is not None: return collect_node_disk_wwid_last_result metrics = { 'node_disk_wwid':", "= arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n = 10 for i in", "supply'\") # type: (int, str) if r == 0: for", "\"\"\" :type state: str \"\"\" state = state.lower() if state", "nics: nic = nic.strip() try: # NOTE(weiw): sriov nic contains", "interface LoadPlugin memory LoadPlugin network LoadPlugin virt <Plugin aggregation> <Aggregation>", "= kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname): res = linux.read_file(fname) return 0 if", "bash_o(cmd) zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values() def collect_lvm_capacity_statistics():", "'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound traffic in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors',", "False if isinstance(v, dict): for vk in v.iterkeys(): if vk", "int(res) def collect_host_network_statistics(): all_eths = os.listdir(\"/sys/class/net/\") virtual_eths = os.listdir(\"/sys/devices/virtual/net/\") interfaces", "> 0 else disk_group disk_group = \"unknown\" if disk_group is", "not mpid: bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill", "10 elif \"unconfigured\" in state: return 15 else: return 100", "ret.extend(v) Collector.__store_cache__(ret) return ret REGISTRY.register(Collector()) def start(self): http_server = kvmagent.get_http_server()", "1 -p %s | grep qemu | awk '{print $1,$9}'\"", "bash_o(\"ls /sys/class/net\").split() interfaces = [] for eth in eths: eth", "for info in ps_info.splitlines(): info = info.strip() ps_id = info.split(\"|\")[0].strip().split(\"", "ret}) @classmethod def check(cls, v): try: if v is None:", "'Percentage of CPU used by vm', None, ['vmUuid']) } r,", "read status = linux.read_file(\"/sys/class/net/%s/carrier\" % nic) == 1 except Exception", "vm', None, ['vmUuid']) } r, pid_vm_map_str = bash_ro(\"ps --no-headers u", "\"exemplar\": continue if Collector.check(v[vk]) is False: return False except Exception", "metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values() def collect_host_capacity_statistics():", "/sys/class/net -type l -not -lname '*virtual*' -printf '%f\\\\n'\").splitlines() if len(nics)", "% conf_path) else: bash_errorout('kill -HUP %s' % mpid) else: if", "ExecStart=/bin/sh -c '%s %s > %s 2>&1' ExecStop=/bin/sh -c 'pkill", "'physical disk temperature', None, ['slot_number', 'disk_group']), } if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo", "= name.strip() size, avail = lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name],", "linux.write_file(service_path, service_conf, True) os.chmod(service_path, 0644) reload_and_restart_service(service_name) return if linux.read_file(service_path) !=", "use the last calculation result.\" % k) for v in", "mpid) else: if not mpid: bash_errorout('collectdmon -- -C %s' %", "-oname\") if r != 0 or len(o.splitlines()) == 0: return", "http_server = kvmagent.get_http_server() http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter) self.install_colletor() start_http_server(7069) def stop(self): pass", "def start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf = '''Interval {{INTERVAL}}", "jsonobject.dumps(rsp) def install_colletor(self): class Collector(object): __collector_cache = {} @classmethod def", "\"timestamp\" or vk == \"exemplar\": continue if Collector.check(v[vk]) is False:", "all_eths: eth = eth.strip(' \\t\\n\\r') if eth in virtual_eths: continue", "state: str \"\"\" state = state.lower() if \"online\" in state", "-o pv_name\").strip().splitlines() for pv in pvs: multipath_wwid = None if", "para = jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() eths = bash_o(\"ls /sys/class/net\").split()", "c in kvmagent.metric_collectors: name = \"%s.%s\" % (c.__module__, c.__name__) if", "\"Target Id\" in info: target_id = info.strip().strip(\")\").split(\" \")[-1] else: state", "grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines() slot_number = state =", "collect_host_network_statistics(): all_eths = os.listdir(\"/sys/class/net/\") virtual_eths = os.listdir(\"/sys/devices/virtual/net/\") interfaces = []", "cpu LoadPlugin disk LoadPlugin interface LoadPlugin memory LoadPlugin network LoadPlugin", "Collector(object): __collector_cache = {} @classmethod def __get_cache__(cls): # type: ()", "RestartSec=30s [Install] WantedBy=multi-user.target ''' % (service_name, binPath, args, '/dev/null' if", "os.environ.get('ZSTACK_HOME', None) if zstack_env_path and zstack_env_path != default_zstack_path: default_zstack_path =", "bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if r == 0: linux.set_fail_if_no_path() r,", "GaugeMetricFamily('host_network_all_in_errors', 'Host all inbound traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all", "%s After=network.target [Service] ExecStart=/bin/sh -c '%s %s > %s 2>&1'", "mpid: bash_errorout('collectdmon -- -C %s' % conf_path) else: bash_errorout('kill -HUP", "in x, kvs)[0] disk_group = disk_group.split(\" \")[-1] elif \"Drive Temperature\"", "\"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines()", "pid_vm_map = {} for pid_vm in pid_vm_map_str.splitlines(): arr = pid_vm.split()", "\"unknown\" if disk_group is None else disk_group state = info.strip().split(\":\")[-1]", "def __store_cache__(cls, ret): # type: (list) -> None cls.__collector_cache.clear() cls.__collector_cache.update({time.time():", "-F '-' '{print $NF}'\" % pv).strip() disks = linux.get_physical_disk(pv, False)", "\"\"\" state = state.lower() if \"online\" in state or \"jobd\"", "0 all_out_errors = 0 for intf in interfaces: all_in_bytes +=", "0 elif state == \"degraded\": return 5 else: return 100", "= info.strip() ps_id = info.split(\"|\")[0].strip().split(\" \")[0] health = 10 if", "bash_ro(\"top -b -n 1 -p %s | grep qemu |", "all_in_errors = 0 all_out_bytes = 0 all_out_packets = 0 all_out_errors", "return ret REGISTRY.register(Collector()) def start(self): http_server = kvmagent.get_http_server() http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter)", "info.count(\"JBOD\") > 0 else disk_group disk_group = \"unknown\" if disk_group", "GaugeMetricFamily('ipmi_status', 'ipmi status', None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network interface',", "restart %s.service\" % service_name) service_name = get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service'", "from prometheus_client import start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY from", "completed yet,\" \" temporarily use the last calculation result.\" %", "None: return False if isinstance(v, GaugeMetricFamily): return Collector.check(v.samples) if isinstance(v,", "--nolocking --noheading -o pv_name\").strip().splitlines() for pv in pvs: multipath_wwid =", "= bash_o(\"find /sys/class/net -type l -not -lname '*virtual*' -printf '%f\\\\n'\").splitlines()", "ARGUMENTS, LOG_FILE) para = jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() eths =", "38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map = {} for pid_vm in pid_vm_map_str.splitlines():", "continue elif eth.startswith('br_'): continue elif not eth: continue else: interfaces.append(eth)", "return 0 elif state == \"degraded\": return 5 else: return", "disk.split(\"/\")[-1].strip() wwids = bash_o(\"udevadm info -n %s | grep -E", "'power supply', None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []),", "grep qemu | awk '{print $1,$9}'\" % vm_pid_arr_str) if r", "0644) # restart service regardless of conf changes, for ZSTAC-23539", "keys is None or len(keys) == 0: return None if", "logger.warn(\"got exception in check value %s: %s\" % (v, e))", "= bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if r == 0: linux.set_fail_if_no_path()", "speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status) return metrics.values() def collect_vm_statistics():", "type 'power supply'\") # type: (int, str) if r ==", "() -> list keys = cls.__collector_cache.keys() if keys is None", "for pid_vm in pid_vm_map_str.splitlines(): arr = pid_vm.split() if len(arr) ==", "COLLECTD_PATH = \"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash def start_prometheus_exporter(self, req): @in_bash def", "!= 0: for nic in nics: nic = nic.strip() try:", "else disk_group disk_group = \"unknown\" if disk_group is None else", "%s | grep -E '^S: disk/by-id/dm-uuid' | awk -F '-'", "== \"timestamp\" or vk == \"exemplar\": continue if Collector.check(v[vk]) is", "zstacklib.utils.ip import get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict = {} #", "\"\"\" state = state.lower() if state == \"optimal\": return 0", "is False: return False if isinstance(v, dict): for vk in", "{% for i in INTERFACES -%} Interface \"{{i}}\" {% endfor", "HostnameFormat name PluginInstanceFormat name BlockDevice \"/:hd[a-z]/\" IgnoreSelected true ExtraStats \"vcpu", "= bash_o(\"pvs --nolocking --noheading -o pv_name\").strip().splitlines() for pv in pvs:", "{ 'raid_state': GaugeMetricFamily('raid_state', 'raid state', None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical", "memory> ValuesAbsolute true ValuesPercentage false </Plugin> <Plugin virt> Connection \"qemu:///system\"", "from zstacklib.utils import misc from zstacklib.utils import thread from zstacklib.utils.bash", "\"{{i}}\" {% endfor -%} IgnoreSelected false </Plugin> <Plugin memory> ValuesAbsolute", "= ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack']", "zstacklib.utils import misc from zstacklib.utils import thread from zstacklib.utils.bash import", "zstack_used_capacity = 0 for dir in zstack_dir: if not os.path.exists(dir):", "== 2: pid_vm_map[arr[0]] = arr[1] def collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr)", "disk_group = \"unknown\" if disk_group is None else disk_group state", "nic contains carrier file but can not read status =", "some storage can not afford frequent TUR. ref: ZSTAC-23416 if", "Collector.check(r): logger.warn(\"result from collector %s contains illegal character None, details:", "import start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY from kvmagent import", "</Plugin> <Plugin virt> Connection \"qemu:///system\" RefreshInterval {{INTERVAL}} HostnameFormat name PluginInstanceFormat", "outbound traffic in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound traffic", "= info.strip().strip(\")\").split(\" \")[-1] else: state = info.strip().split(\" \")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state))", "awk -F '/' '{print $NF}' | grep -v '^lvm-pv' |", "= [] for eth in all_eths: eth = eth.strip(' \\t\\n\\r')", "arr[0] vm_uuid = pid_vm_map[pid] cpu_usage = arr[1] metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n", "if not mpid: bash_errorout('collectdmon -- -C %s' % conf_path) else:", "= \"JBOD\" if disk_group == \"unknown\" and info.count(\"JBOD\") > 0", "= o.splitlines() for name in vg_names: name = name.strip() size,", "elif time.time() - collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is not", "# type: (int, str) if r == 0: for info", "None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk state', None, ['slot_number', 'disk_group']),", "None global collectResultLock global latest_collect_result r = f() if not", "% service_name) service_name = get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service' % service_name", "traffic in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound traffic in", "false </Plugin> <Plugin memory> ValuesAbsolute true ValuesPercentage false </Plugin> <Plugin", "not read status = linux.read_file(\"/sys/class/net/%s/carrier\" % nic) == 1 except", "'physical network interface', None, ['interface_name', 'speed']), } r, ps_info =", "Exception as e: logger.warn(\"got exception in check value %s: %s\"", "return metrics.values() vg_names = o.splitlines() for name in vg_names: name", "latest_collect_result = {} collectResultLock = threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1] def", "'^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if r == 0: linux.set_fail_if_no_path() r, o, e", "= Template(conf) conf = tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES': interfaces, 'VERSION':", "= fd.read() if old_conf != conf: with open(conf_path, 'w') as", "< 9: return cls.__collector_cache.get(keys[0]) return None @classmethod def __store_cache__(cls, ret):", "-C %s' % conf_path) def run_in_systemd(binPath, args, log): def get_systemd_name(path):", "read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf))", "l -not -lname '*virtual*' -printf '%f\\\\n'\").splitlines() if len(nics) != 0:", "-%} IgnoreSelected false </Plugin> <Plugin memory> ValuesAbsolute true ValuesPercentage false", "INTERFACES -%} Interface \"{{i}}\" {% endfor -%} IgnoreSelected false </Plugin>", "import GaugeMetricFamily, REGISTRY from kvmagent import kvmagent from zstacklib.utils import", "need_restart_collectd = True else: with open(conf_path, 'w') as fd: fd.write(conf)", "\"failed\" in state: return 10 elif \"unconfigured\" in state: return", "inbound traffic in bytes'), 'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages', 'Host all inbound traffic", "if state == \"optimal\": return 0 elif state == \"degraded\":", "r, o, e = bash_roe(\"vgs --nolocking --noheading -oname\") if r", "\"localhost\" \"25826\" </Plugin> ''' tmpt = Template(conf) conf = tmpt.render({", "bash_ro(\"ipmitool sdr type 'power supply'\") # type: (int, str) if", "start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf = '''Interval {{INTERVAL}} #", "return False except Exception as e: logger.warn(\"got exception in check", "(c.__module__, c.__name__) if collector_dict.get(name) is not None and collector_dict.get(name).is_alive(): continue", "Number\" in info: slot_number = info.strip().split(\" \")[-1] elif \"DiskGroup\" in", "= 0 for dir in zstack_dir: if not os.path.exists(dir): continue", "% mpid) else: if not mpid: bash_errorout('collectdmon -- -C %s'", "\")[-1] metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info = bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll |", "= True speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status) return metrics.values()", "#PluginInstance \"unspecified\" Type \"cpu\" #TypeInstance \"unspecified\" GroupBy \"Host\" GroupBy \"TypeInstance\"", "% default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics = {", "def start_exporter(cmd): EXPORTER_PATH = cmd.binaryPath LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath +", "state == \"optimal\": return 0 elif state == \"degraded\": return", "True speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status) return metrics.values() def", "== 0: linux.set_fail_if_no_path() r, o, e = bash_roe(\"vgs --nolocking --noheading", "# restart service regardless of conf changes, for ZSTAC-23539 reload_and_restart_service(service_name)", "-aAll | grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines() slot_number =", "with open(conf_path, 'r') as fd: old_conf = fd.read() if old_conf", "aggregation LoadPlugin cpu LoadPlugin disk LoadPlugin interface LoadPlugin memory LoadPlugin", "return metrics.values() def collect_lvm_capacity_statistics(): metrics = { 'vg_size': GaugeMetricFamily('vg_size', 'volume", "else: return 100 def convert_disk_state_to_int(state): \"\"\" :type state: str \"\"\"", "latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret) return ret REGISTRY.register(Collector()) def start(self): http_server =", "in interfaces: all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors +=", "interfaces: all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf))", "\"25826\" </Plugin> ''' tmpt = Template(conf) conf = tmpt.render({ 'INTERVAL':", "else: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True", "in info: temp = info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else: disk_group", "cls.__collector_cache.get(keys[0]) return None @classmethod def __store_cache__(cls, ret): # type: (list)", "in info.lower() else 0 metrics['power_supply'].add_metric([ps_id], health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc info\"))", "in cmd.binaryPath: start_collectd_exporter(cmd) else: start_exporter(cmd) return jsonobject.dumps(rsp) def install_colletor(self): class", "\"\").split(\",\") disk_group = filter(lambda x: \"DiskGroup\" in x, kvs)[0] disk_group", "name,)) for i in range(7): for t in collector_dict.values(): if", "for cmd in para.cmds: if \"collectd_exporter\" in cmd.binaryPath: start_collectd_exporter(cmd) else:", "grep -v '^lvm-pv' | sort\" % disk).strip().splitlines() if multipath_wwid is", "interfaces = [] for eth in all_eths: eth = eth.strip('", "return True def collect(self): global latest_collect_result ret = [] def", "return metrics.values() pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\") '''pid_vm_map_str samples: 38149", "if eth == 'bonding_masters': continue elif not eth: continue else:", "continue else: interfaces.append(eth) all_in_bytes = 0 all_in_packets = 0 all_in_errors", "metrics.values() def collect_vm_statistics(): metrics = { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of", "target_id = state = \"unknown\" for info in raid_info: if", "if not ARGUMENTS: ARGUMENTS = \"\" os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS,", "= \"%s.%s\" % (c.__module__, c.__name__) if collector_dict.get(name) is not None", "@in_bash def start_collectd(cmd): conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf = '''Interval", "continue cmd = \"du -bs %s | awk {\\'print $1\\'}\"", "fd: old_conf = fd.read() if old_conf != conf: with open(conf_path,", "thread.ThreadFacade.run_in_thread(get_result_run, (c, name,)) for i in range(7): for t in", "'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound traffic in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages',", "'/dev/null' if log.endswith('/pushgateway.log') else log, binPath) if not os.path.exists(service_path): linux.write_file(service_path,", "linux.find_process_by_command('collectd', [conf_path]) mpid = linux.find_process_by_command('collectdmon', [conf_path]) if not cpid: bash_errorout('collectdmon", "metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state)) disk_info = bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep", "service regardless of conf changes, for ZSTAC-23539 reload_and_restart_service(service_name) @lock.file_lock(\"/run/collectd-conf.lock\", locker=lock.Flock())", "\"jobd\" in state: return 0 elif \"rebuild\" in state: return", "os.path import threading import typing from prometheus_client import start_http_server from", "else: bash_errorout('kill -HUP %s' % mpid) else: if not mpid:", "15 else: return 100 def collect_raid_state(): metrics = { 'raid_state':", "'-' '{print $NF}'\" % pv).strip() disks = linux.get_physical_disk(pv, False) for", "str) if r == 0: for info in ps_info.splitlines(): info", "collect(self): global latest_collect_result ret = [] def get_result_run(f, fname): #", "in ps_info.splitlines(): info = info.strip() ps_id = info.split(\"|\")[0].strip().split(\" \")[0] health", "character None, details: \\n%s\" % (fname, r)) return with collectResultLock:", "default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics = { 'zstack_used_capacity_in_bytes':", "if len(arr) == 2: pid_vm_map[arr[0]] = arr[1] def collect(vm_pid_arr): vm_pid_arr_str", "= cmd.startupArguments if not ARGUMENTS: ARGUMENTS = \"\" os.chmod(EXPORTER_PATH, 0o755)", "bash_o(\"udevadm info -n %s | grep -E '^S: disk/by-id' |", "bash_roe(\"vgs --nolocking --noheading -oname\") if r != 0 or len(o.splitlines())", "'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound traffic in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors',", "= pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\") '''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1", "continue if eth == 'bonding_masters': continue elif eth.startswith('vnic'): continue elif", "-> None cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret}) @classmethod def check(cls, v): try:", "LoadPlugin disk LoadPlugin interface LoadPlugin memory LoadPlugin network LoadPlugin virt", "n]) return metrics.values() collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result = None def", "EXPORTER_PATH = cmd.binaryPath LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log') ARGUMENTS", "i in range(7): for t in collector_dict.values(): if t.is_alive(): time.sleep(0.5)", "os.listdir(\"/sys/devices/virtual/net/\") interfaces = [] for eth in all_eths: eth =", "t in collector_dict.values(): if t.is_alive(): time.sleep(0.5) continue for k in", "is None: return False if isinstance(v, GaugeMetricFamily): return Collector.check(v.samples) if", "traffic in bytes'), 'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound traffic in", "&& systemctl restart %s.service\" % service_name) service_name = get_systemd_name(binPath) service_path", "def collect(vm_pid_arr): vm_pid_arr_str = ','.join(vm_pid_arr) r, pid_cpu_usages_str = bash_ro(\"top -b", "if \"online\" in state or \"jobd\" in state: return 0", "return 5 elif \"failed\" in state: return 10 elif \"unconfigured\"", "false </Plugin> <Plugin \"interface\"> {% for i in INTERFACES -%}", "return for pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split() pid =", "{ 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound traffic in bytes'), 'host_network_all_in_packages':", "eth.startswith('br_'): continue elif not eth: continue else: interfaces.append(eth) for cmd", "\"JBOD\" if disk_group == \"unknown\" and info.count(\"JBOD\") > 0 else", "\"/^vd[a-z]$/\" IgnoreSelected false </Plugin> <Plugin \"interface\"> {% for i in", "linux.write_file(service_path, service_conf, True) logger.info(\"%s.service conf changed\" % service_name) os.chmod(service_path, 0644)", "if bash_r(\"dmsetup table %s | grep multipath\" % pv) ==", "health) metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc info\")) nics = bash_o(\"find /sys/class/net -type", "'raid_state': GaugeMetricFamily('raid_state', 'raid state', None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk", "in state: return 0 elif \"rebuild\" in state: return 5", "frequent TUR. ref: ZSTAC-23416 if collect_node_disk_wwid_last_time is None: collect_node_disk_wwid_last_time =", "-> list keys = cls.__collector_cache.keys() if keys is None or", "eth.strip(' \\t\\n\\r') if eth == 'lo': continue if eth ==", "Temperature'\").strip().splitlines() slot_number = state = disk_group = \"unknown\" for info", "-Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if r == 0: linux.set_fail_if_no_path() r, o,", "= { 'power_supply': GaugeMetricFamily('power_supply', 'power supply', None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status',", "0: linux.set_fail_if_no_path() r, o, e = bash_roe(\"vgs --nolocking --noheading -oname\")", "if \"Slot Number\" in info: slot_number = info.strip().split(\" \")[-1] elif", "<Plugin cpu> ReportByCpu true ReportByState true ValuesPercentage true </Plugin> <Plugin", "metrics.values() return metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics)", "\"collectd_exporter\" in cmd.binaryPath: start_collectd_exporter(cmd) else: start_exporter(cmd) return jsonobject.dumps(rsp) def install_colletor(self):", "= {} collectResultLock = threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname):", "'speed']), } r, ps_info = bash_ro(\"ipmitool sdr type 'power supply'\")", "True) logger.info(\"%s.service conf changed\" % service_name) os.chmod(service_path, 0644) # restart", "return \"pushgateway\" def reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload && systemctl restart %s.service\"", "last calculation result.\" % k) for v in latest_collect_result.itervalues(): ret.extend(v)", "Collector.check(v[vk]) is False: return False except Exception as e: logger.warn(\"got", "cmd.interval, 'INTERFACES': interfaces, 'VERSION': cmd.version, }) need_restart_collectd = False if", "nics = bash_o(\"find /sys/class/net -type l -not -lname '*virtual*' -printf", "slot_number = info.strip().split(\" \")[-1] elif \"DiskGroup\" in info: kvs =", "% disk).strip().splitlines() if multipath_wwid is not None: wwids.append(multipath_wwid) if len(wwids)", "return metrics.values() raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep", "'Host all inbound traffic in packages'), 'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors', 'Host all", "isinstance(v, dict): for vk in v.iterkeys(): if vk == \"timestamp\"", "0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE) para = jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse()", "ps_id = info.split(\"|\")[0].strip().split(\" \")[0] health = 10 if \"fail\" in", ":type state: str \"\"\" state = state.lower() if state ==", "| grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'\").strip().splitlines() slot_number = state", "or isinstance(v, tuple): for vl in v: if Collector.check(vl) is", "GaugeMetricFamily): return Collector.check(v.samples) if isinstance(v, list) or isinstance(v, tuple): for", "GaugeMetricFamily('vg_size', 'volume group size', None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume group", "= linux.find_process_by_command('collectd', [conf_path]) mpid = linux.find_process_by_command('collectdmon', [conf_path]) if not cpid:", "= None def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result # NOTE(weiw):", "in pid_vm_map_str.splitlines(): arr = pid_vm.split() if len(arr) == 2: pid_vm_map[arr[0]]", "grep -E '^S: disk/by-id/dm-uuid' | awk -F '-' '{print $NF}'\"", "= \"unknown\" for info in raid_info: if \"Target Id\" in", "Temperature\" in info: temp = info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else:", "elif state == \"degraded\": return 5 else: return 100 def", "syslog LoadPlugin aggregation LoadPlugin cpu LoadPlugin disk LoadPlugin interface LoadPlugin", "$NF}' | grep -v '^lvm-pv' | sort\" % disk).strip().splitlines() if", "%s | grep qemu | awk '{print $1,$9}'\" % vm_pid_arr_str)", "metrics.values() raid_info = bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep -E", "in virtual_eths: continue if eth == 'bonding_masters': continue elif not", "<Plugin memory> ValuesAbsolute true ValuesPercentage false </Plugin> <Plugin virt> Connection", "status = linux.read_file(\"/sys/class/net/%s/carrier\" % nic) == 1 except Exception as", "== 0: return metrics.values() pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\") '''pid_vm_map_str", "{{VERSION}} FQDNLookup false LoadPlugin syslog LoadPlugin aggregation LoadPlugin cpu LoadPlugin", "def collect_raid_state(): metrics = { 'raid_state': GaugeMetricFamily('raid_state', 'raid state', None,", "or len(o.splitlines()) == 0: return metrics.values() vg_names = o.splitlines() for", "disk).strip().splitlines() if multipath_wwid is not None: wwids.append(multipath_wwid) if len(wwids) >", "lvm.get_vg_size(name, False) metrics['vg_size'].add_metric([name], float(size)) metrics['vg_avail'].add_metric([name], float(avail)) return metrics.values() def convert_raid_state_to_int(state):", "virtual_eths: continue if eth == 'bonding_masters': continue elif not eth:", "None, ['vg_name']), } r = bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\") if", "for eth in eths: eth = eth.strip(' \\t\\n\\r') if eth", "e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 ''' pid_vm_map = {} for pid_vm in", "r == 0: linux.set_fail_if_no_path() r, o, e = bash_roe(\"vgs --nolocking", "'Host all inbound traffic errors'), 'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes', 'Host all outbound", "\"wwid\"]) } pvs = bash_o(\"pvs --nolocking --noheading -o pv_name\").strip().splitlines() for", "convert_raid_state_to_int(state)) disk_info = bash_o( \"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E", "need_restart_collectd: if not mpid: bash_errorout('collectdmon -- -C %s' % conf_path)", "%s' % conf_path) else: bash_errorout('kill -TERM %s' % cpid) if", "cmd.binaryPath LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log') ARGUMENTS = cmd.startupArguments", "Disk \"/^vd[a-z]$/\" IgnoreSelected false </Plugin> <Plugin \"interface\"> {% for i", "-p %s | grep qemu | awk '{print $1,$9}'\" %", "interface', None, ['interface_name', 'speed']), } r, ps_info = bash_ro(\"ipmitool sdr", "'/' '{print $NF}' | grep -v '^lvm-pv' | sort\" %", "kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent):", "in all_eths: eth = eth.strip(' \\t\\n\\r') if eth in virtual_eths:", "ARGUMENTS = cmd.startupArguments if not ARGUMENTS: ARGUMENTS = \"\" os.chmod(EXPORTER_PATH,", "+= read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors += read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics = { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes',", "= { 'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk wwid', None, [\"disk\", \"wwid\"])", "= os.environ.get('ZSTACK_HOME', None) if zstack_env_path and zstack_env_path != default_zstack_path: default_zstack_path", "if \"collectd_exporter\" in path: return \"collectd_exporter\" elif \"node_exporter\" in path:", "if disk_group is None else disk_group state = info.strip().split(\":\")[-1] metrics['physical_disk_state'].add_metric([slot_number,", "['vmUuid']) } r, pid_vm_map_str = bash_ro(\"ps --no-headers u -C \\\"%s", "def collect(self): global latest_collect_result ret = [] def get_result_run(f, fname):", "all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes += read_number(\"/sys/class/net/{}/statistics/tx_bytes\".format(intf)) all_out_packets += read_number(\"/sys/class/net/{}/statistics/tx_packets\".format(intf)) all_out_errors", "state == \"degraded\": return 5 else: return 100 def convert_disk_state_to_int(state):", "pvs = bash_o(\"pvs --nolocking --noheading -o pv_name\").strip().splitlines() for pv in", "{} # type: Dict[str, threading.Thread] latest_collect_result = {} collectResultLock =", "LoadPlugin virt <Plugin aggregation> <Aggregation> #Host \"unspecified\" Plugin \"cpu\" #PluginInstance", "= filter(lambda x: \"DiskGroup\" in x, kvs)[0] disk_group = disk_group.split(\"", "import lock from zstacklib.utils import lvm from zstacklib.utils import misc", "in raid_info: if \"Target Id\" in info: target_id = info.strip().strip(\")\").split(\"", "metrics.values() def collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME', None)", "bash_o(\"find /sys/class/net -type l -not -lname '*virtual*' -printf '%f\\\\n'\").splitlines() if", "start_collectd_exporter(cmd): start_collectd(cmd) start_exporter(cmd) @in_bash def start_exporter(cmd): EXPORTER_PATH = cmd.binaryPath LOG_FILE", "res = bash_o(cmd) zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values()", "= thread.ThreadFacade.run_in_thread(get_result_run, (c, name,)) for i in range(7): for t", "-c '%s %s > %s 2>&1' ExecStop=/bin/sh -c 'pkill -TERM", "fd: fd.write(conf) need_restart_collectd = True else: with open(conf_path, 'w') as", "metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage)) n = 10 for i in range(0, len(pid_vm_map.keys()),", "\"fail\" in info.lower() or \"lost\" in info.lower() else 0 metrics['power_supply'].add_metric([ps_id],", "except Exception as e: status = True speed = str(get_nic_supported_max_speed(nic))", "info: temp = info.split(\":\")[1].split(\"C\")[0] metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else: disk_group =", "kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH", "def collect_vm_statistics(): metrics = { 'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm', 'Percentage of CPU", "service_conf = ''' [Unit] Description=prometheus %s After=network.target [Service] ExecStart=/bin/sh -c", "in path: return \"pushgateway\" def reload_and_restart_service(service_name): bash_errorout(\"systemctl daemon-reload && systemctl", "o, e = bash_roe(\"vgs --nolocking --noheading -oname\") if r !=", "and zstack_env_path != default_zstack_path: default_zstack_path = zstack_env_path zstack_dir = ['/var/lib/zstack',", "get_systemd_name(binPath) service_path = '/etc/systemd/system/%s.service' % service_name service_conf = ''' [Unit]", "= threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split(\"/\")[-1] def read_number(fname): res = linux.read_file(fname)", "not os.path.exists(dir): continue cmd = \"du -bs %s | awk", "e: status = True speed = str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status)", "elif \"DiskGroup\" in info: kvs = info.replace(\"Drive's position: \", \"\").split(\",\")", "<Plugin virt> Connection \"qemu:///system\" RefreshInterval {{INTERVAL}} HostnameFormat name PluginInstanceFormat name", "reload_and_restart_service(service_name) return if linux.read_file(service_path) != service_conf: linux.write_file(service_path, service_conf, True) logger.info(\"%s.service", "'disk_group']), } if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll\") != 0: return", "'{print $NF}' | grep -v '^lvm-pv' | sort\" % disk).strip().splitlines()", "for pid_cpu_usage in pid_cpu_usages_str.splitlines(): arr = pid_cpu_usage.split() pid = arr[0]", "traffic errors'), } metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes)) metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([],", "PluginInstanceFormat name BlockDevice \"/:hd[a-z]/\" IgnoreSelected true ExtraStats \"vcpu memory\" </Plugin>", "False: return False if isinstance(v, dict): for vk in v.iterkeys():", "with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True cpid", "!= default_zstack_path: default_zstack_path = zstack_env_path zstack_dir = ['/var/lib/zstack', '%s/../../../' %", "prometheus_client.core import GaugeMetricFamily, REGISTRY from kvmagent import kvmagent from zstacklib.utils", "dir res = bash_o(cmd) zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return", "= str(get_nic_supported_max_speed(nic)) metrics['physical_network_interface'].add_metric([nic, speed], status) return metrics.values() def collect_vm_statistics(): metrics", "= tmpt.render({ 'INTERVAL': cmd.interval, 'INTERFACES': interfaces, 'VERSION': cmd.version, }) need_restart_collectd", "[\"disk\", \"wwid\"]) } pvs = bash_o(\"pvs --nolocking --noheading -o pv_name\").strip().splitlines()", "collect_node_disk_wwid_last_result = None def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global collect_node_disk_wwid_last_result #", "os.chmod(service_path, 0644) # restart service regardless of conf changes, for", "'power_supply': GaugeMetricFamily('power_supply', 'power supply', None, ['ps_id']), 'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status',", "false CalculateMaximum false CalculateStddev false </Aggregation> </Plugin> <Plugin cpu> ReportByCpu", "9: return cls.__collector_cache.get(keys[0]) return None @classmethod def __store_cache__(cls, ret): #", "= False if os.path.exists(conf_path): with open(conf_path, 'r') as fd: old_conf", "true CalculateMinimum false CalculateMaximum false CalculateStddev false </Aggregation> </Plugin> <Plugin", "False except Exception as e: logger.warn(\"got exception in check value", "< 60 and collect_node_disk_wwid_last_result is not None: return collect_node_disk_wwid_last_result metrics", "BlockDevice \"/:hd[a-z]/\" IgnoreSelected true ExtraStats \"vcpu memory\" </Plugin> <Plugin network>", "metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors)) return metrics.values() def collect_host_capacity_statistics(): default_zstack_path =", "CalculateNum false CalculateSum false CalculateAverage true CalculateMinimum false CalculateMaximum false", "-LALL -aAll | grep -E 'Target Id|State'\").strip().splitlines() target_id = state", "if len(wwids) > 0: metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for w in wwids])],", "True else: with open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd =", "all_out_errors = 0 for intf in interfaces: all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf))", "eth == 'lo': continue if eth == 'bonding_masters': continue elif", "linux.get_physical_disk(pv, False) for disk in disks: disk_name = disk.split(\"/\")[-1].strip() wwids", "= 10 if \"fail\" in info.lower() or \"lost\" in info.lower()", "can not read status = linux.read_file(\"/sys/class/net/%s/carrier\" % nic) == 1", "len(keys) == 0: return None if (time.time() - keys[0]) <", "contains carrier file but can not read status = linux.read_file(\"/sys/class/net/%s/carrier\"", "r != 0 or len(o.splitlines()) == 0: return metrics.values() vg_names", "group and thin pool free size', None, ['vg_name']), } r", "return Collector.check(v.samples) if isinstance(v, list) or isinstance(v, tuple): for vl", "position: \", \"\").split(\",\") disk_group = filter(lambda x: \"DiskGroup\" in x,", "logger.info(\"%s.service conf changed\" % service_name) os.chmod(service_path, 0644) # restart service", "-printf '%f\\\\n'\").splitlines() if len(nics) != 0: for nic in nics:", "import misc from zstacklib.utils import thread from zstacklib.utils.bash import *", "open(conf_path, 'w') as fd: fd.write(conf) need_restart_collectd = True cpid =", "'node_disk_wwid': GaugeMetricFamily('node_disk_wwid', 'node disk wwid', None, [\"disk\", \"wwid\"]) } pvs", "run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE) para = jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() eths", "pv in pvs: multipath_wwid = None if bash_r(\"dmsetup table %s", "['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/', '/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics", "metrics.values() kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state)", "'%s %s > %s 2>&1' ExecStop=/bin/sh -c 'pkill -TERM -f", "collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result = None def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time", "'vg_size': GaugeMetricFamily('vg_size', 'volume group size', None, ['vg_name']), 'vg_avail': GaugeMetricFamily('vg_avail', 'volume", "= bash_ro(\"ipmitool sdr type 'power supply'\") # type: (int, str)", "len(pid_vm_map_str.splitlines()) == 0: return metrics.values() pid_vm_map_str = pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\")", "''' % (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log,", "temperature', None, ['slot_number', 'disk_group']), } if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll\")", "disk LoadPlugin interface LoadPlugin memory LoadPlugin network LoadPlugin virt <Plugin", "metrics = { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity in bytes')", "GaugeMetricFamily('raid_state', 'raid state', None, ['target_id']), 'physical_disk_state': GaugeMetricFamily('physical_disk_state', 'physical disk state',", "not ARGUMENTS: ARGUMENTS = \"\" os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE)", "<Plugin aggregation> <Aggregation> #Host \"unspecified\" Plugin \"cpu\" #PluginInstance \"unspecified\" Type", "10 if \"fail\" in info.lower() or \"lost\" in info.lower() else", "can not afford frequent TUR. ref: ZSTAC-23416 if collect_node_disk_wwid_last_time is", "pid_vm_map_str.replace(\",debug-threads=on\", \"\").replace(\"guest=\", \"\") '''pid_vm_map_str samples: 38149 e8e6f27bfb2d47e08c59cbea1d0488c3 38232 afa02edca7eb4afcb5d2904ac1216eb1 '''", "collector_dict.values(): if t.is_alive(): time.sleep(0.5) continue for k in collector_dict.iterkeys(): if", "% (v, e)) return True return True def collect(self): global", "\\t\\n\\r') if eth == 'lo': continue if eth == 'bonding_masters':", "raid_info: if \"Target Id\" in info: target_id = info.strip().strip(\")\").split(\" \")[-1]", "args, log): def get_systemd_name(path): if \"collectd_exporter\" in path: return \"collectd_exporter\"", "True cpid = linux.find_process_by_command('collectd', [conf_path]) mpid = linux.find_process_by_command('collectdmon', [conf_path]) if", "zstack_used_capacity += int(res) metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity)) return metrics.values() def collect_lvm_capacity_statistics(): metrics", "r == 0: for info in ps_info.splitlines(): info = info.strip()", "cmd.version, }) need_restart_collectd = False if os.path.exists(conf_path): with open(conf_path, 'r')", "thin pool free size', None, ['vg_name']), } r = bash_r(\"grep", "kvmagent from zstacklib.utils import http from zstacklib.utils import jsonobject from", "if len(nics) != 0: for nic in nics: nic =", "{{INTERVAL}} # version {{VERSION}} FQDNLookup false LoadPlugin syslog LoadPlugin aggregation", "from zstacklib.utils import http from zstacklib.utils import jsonobject from zstacklib.utils", "0: return metrics.values() vg_names = o.splitlines() for name in vg_names:", "if bash_r(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll\") != 0: return metrics.values() raid_info", "(fname, r)) return with collectResultLock: latest_collect_result[fname] = r cache =", "global collectResultLock global latest_collect_result r = f() if not Collector.check(r):", "\";\".join([w.strip() for w in wwids])], 1) collect_node_disk_wwid_last_result = metrics.values() return", "in info: target_id = info.strip().strip(\")\").split(\" \")[-1] else: state = info.strip().split(\"", "o.splitlines() for name in vg_names: name = name.strip() size, avail", "'/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics = { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used", "</Plugin> <Plugin cpu> ReportByCpu true ReportByState true ValuesPercentage true </Plugin>", "if r != 0 or len(o.splitlines()) == 0: return metrics.values()", "False: return False except Exception as e: logger.warn(\"got exception in", "read_number(\"/sys/class/net/{}/statistics/tx_errors\".format(intf)) metrics = { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound traffic", "{} for pid_vm in pid_vm_map_str.splitlines(): arr = pid_vm.split() if len(arr)", "k) for v in latest_collect_result.itervalues(): ret.extend(v) Collector.__store_cache__(ret) return ret REGISTRY.register(Collector())", "disk_name = disk.split(\"/\")[-1].strip() wwids = bash_o(\"udevadm info -n %s |", "def collect_lvm_capacity_statistics(): metrics = { 'vg_size': GaugeMetricFamily('vg_size', 'volume group size',", "cpid) if need_restart_collectd: if not mpid: bash_errorout('collectdmon -- -C %s'", "os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf') conf = '''Interval {{INTERVAL}} # version {{VERSION}} FQDNLookup", "}) need_restart_collectd = False if os.path.exists(conf_path): with open(conf_path, 'r') as", "e)) return True return True def collect(self): global latest_collect_result ret", "n = 10 for i in range(0, len(pid_vm_map.keys()), n): collect(pid_vm_map.keys()[i:i", "not os.path.exists(service_path): linux.write_file(service_path, service_conf, True) os.chmod(service_path, 0644) reload_and_restart_service(service_name) return if", "(v, e)) return True return True def collect(self): global latest_collect_result", "# type: Dict[str, threading.Thread] latest_collect_result = {} collectResultLock = threading.RLock()", "'/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack'] metrics = { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack", "get_result_run(f, fname): # type: (typing.Callable, str) -> None global collectResultLock", "fd: fd.write(conf) need_restart_collectd = True cpid = linux.find_process_by_command('collectd', [conf_path]) mpid", "ValuesPercentage false </Plugin> <Plugin virt> Connection \"qemu:///system\" RefreshInterval {{INTERVAL}} HostnameFormat", "zstacklib.utils import lock from zstacklib.utils import lvm from zstacklib.utils import", "r cache = Collector.__get_cache__() if cache is not None: return", "'ZStack used capacity in bytes') } zstack_used_capacity = 0 for", "'{print $1,$9}'\" % vm_pid_arr_str) if r != 0 or len(pid_cpu_usages_str.splitlines())", "mpid = linux.find_process_by_command('collectdmon', [conf_path]) if not cpid: bash_errorout('collectdmon -- -C", "log.endswith('/pushgateway.log') else log, binPath) if not os.path.exists(service_path): linux.write_file(service_path, service_conf, True)", "size', None, ['vg_name']), } r = bash_r(\"grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids\")", "\")[0] health = 10 if \"fail\" in info.lower() or \"lost\"", "+= read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets += read_number(\"/sys/class/net/{}/statistics/rx_packets\".format(intf)) all_in_errors += read_number(\"/sys/class/net/{}/statistics/rx_errors\".format(intf)) all_out_bytes +=", "bytes') } zstack_used_capacity = 0 for dir in zstack_dir: if", "'w') as fd: fd.write(conf) need_restart_collectd = True cpid = linux.find_process_by_command('collectd',", "!= 0 or len(pid_vm_map_str.splitlines()) == 0: return metrics.values() pid_vm_map_str =", "# version {{VERSION}} FQDNLookup false LoadPlugin syslog LoadPlugin aggregation LoadPlugin", "'/tmp/zstack'] metrics = { 'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes', 'ZStack used capacity in", "v): try: if v is None: return False if isinstance(v,", "metrics['node_disk_wwid'].add_metric([disk_name, \";\".join([w.strip() for w in wwids])], 1) collect_node_disk_wwid_last_result = metrics.values()", "pid_vm in pid_vm_map_str.splitlines(): arr = pid_vm.split() if len(arr) == 2:", "\"Slot Number\" in info: slot_number = info.strip().split(\" \")[-1] elif \"DiskGroup\"", "return 15 else: return 100 def collect_raid_state(): metrics = {", "-n 1 -p %s | grep qemu | awk '{print", "= os.listdir(\"/sys/class/net/\") virtual_eths = os.listdir(\"/sys/devices/virtual/net/\") interfaces = [] for eth", "but can not read status = linux.read_file(\"/sys/class/net/%s/carrier\" % nic) ==", "%s | grep -E '^S: disk/by-id' | awk -F '/'", "GaugeMetricFamily('host_network_all_out_packages', 'Host all outbound traffic in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host", "CalculateStddev false </Aggregation> </Plugin> <Plugin cpu> ReportByCpu true ReportByState true", "kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state) class PrometheusPlugin(kvmagent.KvmAgent): COLLECTD_PATH = \"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash def", "daemon-reload && systemctl restart %s.service\" % service_name) service_name = get_systemd_name(binPath)", "all outbound traffic in packages'), 'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors', 'Host all outbound", "in path: return \"node_exporter\" elif \"pushgateway\" in path: return \"pushgateway\"", "if collector_dict[k].is_alive(): logger.warn(\"It seems that the collector [%s] has not", "n): collect(pid_vm_map.keys()[i:i + n]) return metrics.values() collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result", "time.time() elif time.time() - collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is", "all_eths = os.listdir(\"/sys/class/net/\") virtual_eths = os.listdir(\"/sys/devices/virtual/net/\") interfaces = [] for", "-lname '*virtual*' -printf '%f\\\\n'\").splitlines() if len(nics) != 0: for nic", "-C \\\"%s -name\\\" | awk '{print $2,$13}'\" % QEMU_CMD) if", "'^S: disk/by-id' | awk -F '/' '{print $NF}' | grep", "== 'bonding_masters': continue elif eth.startswith('vnic'): continue elif eth.startswith('outer'): continue elif", "metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp)) else: disk_group = \"JBOD\" if disk_group ==", "= bash_o(\"udevadm info -n %s | grep -E '^S: disk/by-id'", "= [] for eth in eths: eth = eth.strip(' \\t\\n\\r')", "None, []), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network interface', None, ['interface_name', 'speed']),", "\"unknown\" for info in disk_info: if \"Slot Number\" in info:", "'INTERVAL': cmd.interval, 'INTERFACES': interfaces, 'VERSION': cmd.version, }) need_restart_collectd = False", "\"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash def start_prometheus_exporter(self, req): @in_bash def start_collectd(cmd): conf_path", "= bash_o(\"/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep -E 'Target Id|State'\").strip().splitlines()", "disk_info: if \"Slot Number\" in info: slot_number = info.strip().split(\" \")[-1]", "metrics['ipmi_status'].add_metric([], bash_r(\"ipmitool mc info\")) nics = bash_o(\"find /sys/class/net -type l", "| awk {\\'print $1\\'}\" % dir res = bash_o(cmd) zstack_used_capacity", "['slot_number', 'disk_group']), 'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature', 'physical disk temperature', None, ['slot_number', 'disk_group']),", "not been completed yet,\" \" temporarily use the last calculation", "def collect_equipment_state(): metrics = { 'power_supply': GaugeMetricFamily('power_supply', 'power supply', None,", "metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets)) metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors)) metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes)) metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets)) metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors))", "'^S: disk/by-id/dm-uuid' | awk -F '-' '{print $NF}'\" % pv).strip()", "not mpid: bash_errorout('collectdmon -- -C %s' % conf_path) def run_in_systemd(binPath,", "= None collect_node_disk_wwid_last_result = None def collect_node_disk_wwid(): global collect_node_disk_wwid_last_time global", "[]), 'physical_network_interface': GaugeMetricFamily('physical_network_interface', 'physical network interface', None, ['interface_name', 'speed']), }", "start_exporter(cmd) @in_bash def start_exporter(cmd): EXPORTER_PATH = cmd.binaryPath LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH),", "res = linux.read_file(fname) return 0 if not res else int(res)", "\" temporarily use the last calculation result.\" % k) for", "name = \"%s.%s\" % (c.__module__, c.__name__) if collector_dict.get(name) is not", "| awk -F '/' '{print $NF}' | grep -v '^lvm-pv'", "ARGUMENTS = \"\" os.chmod(EXPORTER_PATH, 0o755) run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE) para =", "LoadPlugin syslog LoadPlugin aggregation LoadPlugin cpu LoadPlugin disk LoadPlugin interface", "(list) -> None cls.__collector_cache.clear() cls.__collector_cache.update({time.time(): ret}) @classmethod def check(cls, v):", "in collector_dict.iterkeys(): if collector_dict[k].is_alive(): logger.warn(\"It seems that the collector [%s]", "awk '{print $2,$13}'\" % QEMU_CMD) if r != 0 or", "metrics = { 'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes', 'Host all inbound traffic in", "| grep -E '^S: disk/by-id' | awk -F '/' '{print", "= {} @classmethod def __get_cache__(cls): # type: () -> list", "Plugin \"cpu\" #PluginInstance \"unspecified\" Type \"cpu\" #TypeInstance \"unspecified\" GroupBy \"Host\"", "continue if Collector.check(v[vk]) is False: return False except Exception as", "= 0 for intf in interfaces: all_in_bytes += read_number(\"/sys/class/net/{}/statistics/rx_bytes\".format(intf)) all_in_packets", "= linux.get_physical_disk(pv, False) for disk in disks: disk_name = disk.split(\"/\")[-1].strip()", "as fd: old_conf = fd.read() if old_conf != conf: with", "service_name) os.chmod(service_path, 0644) # restart service regardless of conf changes,", "= \"/prometheus/collectdexporter/start\" @kvmagent.replyerror @in_bash def start_prometheus_exporter(self, req): @in_bash def start_collectd(cmd):", "info -n %s | grep -E '^S: disk/by-id/dm-uuid' | awk", "return metrics.values() def collect_host_capacity_statistics(): default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack' zstack_env_path = os.environ.get('ZSTACK_HOME',", "% conf_path) def run_in_systemd(binPath, args, log): def get_systemd_name(path): if \"collectd_exporter\"", "= cls.__collector_cache.keys() if keys is None or len(keys) == 0:", "info in ps_info.splitlines(): info = info.strip() ps_id = info.split(\"|\")[0].strip().split(\" \")[0]", "list) or isinstance(v, tuple): for vl in v: if Collector.check(vl)", "import lvm from zstacklib.utils import misc from zstacklib.utils import thread" ]
[ "from conans.errors import ConanInvalidConfiguration import os import shutil required_conan_version =", "del self.options.fPIC def requirements(self): self.requires(\"expat/2.4.1\") def validate(self): if self.settings.compiler ==", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\",", "self._build_autotools() def package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) if self.settings.compiler == \"Visual", "\"LIBSTUDXML_STATIC_LIB\") msbuild = MSBuild(self) msbuild.build(sln_path, platforms={\"x86\": \"Win32\"}) @property def _user_info_build(self):", "\"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\", \"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\") == \"libc++\": #", "with tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools = self._configure_autotools() autotools.make() def", "\"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\") == \"libc++\": # libc++ includes a file", "dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: autotools = self._configure_autotools() autotools.install()", "\"libstudxml\" # If built with makefile, static library mechanism is", "= \"\" if self.settings.arch == \"x86_64\": suffix = \"64\" if", "is not supported.\".format(self.settings.compiler.version)) @property def _settings_build(self): return getattr(self, \"settings_build\", self.settings)", "\"share\")) def package_info(self): self.cpp_info.libs = tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] = \"libstudxml\" #", "getattr(self, \"user_info_build\", self.deps_user_info) def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS,", "if tools.Version(self.settings.compiler.version) < \"9\": raise ConanInvalidConfiguration(\"Visual Studio {} is not", "autotools.make() def build(self): for patch in self.conan_data.get(\"patches\", {}).get(self.version, []): tools.patch(**patch)", "= \"https://www.codesynthesis.com/projects/libstudxml/\" license = \"MIT\" settings = \"os\", \"compiler\", \"build_type\",", "not os.path.exists(sln_path): vc_ver -= 1 sln_path = get_sln_path() proj_path =", "os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path,", "\"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\")) def package_info(self): self.cpp_info.libs = tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] =", "topics = (\"xml\", \"xml-parser\", \"serialization\") url = \"https://github.com/conan-io/conan-center-index\" homepage =", "def build_requirements(self): if self.settings.compiler != \"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if", "= \"libstudxml\" description = \"A streaming XML pull parser and", "license = \"MIT\" settings = \"os\", \"compiler\", \"build_type\", \"arch\" exports_sources", "\"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if self._settings_build.os == \"Windows\" and not", "\"lib\" + suffix)) self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else:", "mechanism is provided by their buildsystem already if self.settings.compiler ==", "== \"libc++\": # libc++ includes a file called 'version', and", "src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder)", "src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder)", "source_subfolder as an # include dir, libc++ ends up including", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\",", "# libc++ includes a file called 'version', and since libstudxml", "None @property def _source_subfolder(self): return \"source_subfolder\" def config_options(self): if self.settings.os", "os.path.exists(sln_path): vc_ver -= 1 sln_path = get_sln_path() proj_path = os.path.join(self._source_subfolder,", "vc_ver -= 1 sln_path = get_sln_path() proj_path = os.path.join(self._source_subfolder, \"xml\",", "build_requirements(self): if self.settings.compiler != \"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if self._settings_build.os", "for patch in self.conan_data.get(\"patches\", {}).get(self.version, []): tools.patch(**patch) if self.settings.compiler ==", "self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder,", "libstudxml adds source_subfolder as an # include dir, libc++ ends", "makefile, static library mechanism is provided by their buildsystem already", "\"Visual Studio\": if tools.Version(self.settings.compiler.version) < \"9\": raise ConanInvalidConfiguration(\"Visual Studio {}", "os.path.join(self._source_subfolder, \"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\", \"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\") ==", "False], \"fPIC\": [True, False], } default_options = { \"shared\": False,", "by their buildsystem already if self.settings.compiler == \"Visual Studio\" and", "import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools from conans.errors import ConanInvalidConfiguration import", "self.settings.compiler.get_safe(\"libcxx\") == \"libc++\": # libc++ includes a file called 'version',", "destination=self._source_subfolder, strip_root=True) def _configure_autotools(self): if not self._autotools: args = [\"--with-external-expat\"]", "self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args) return self._autotools def _build_vs(self):", "\"libc++\": # libc++ includes a file called 'version', and since", "self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\",", "\"source_subfolder\" def config_options(self): if self.settings.os == \"Windows\": del self.options.fPIC def", "return \"source_subfolder\" def config_options(self): if self.settings.os == \"Windows\": del self.options.fPIC", "self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\",", "\"version\") with tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools = self._configure_autotools() autotools.make()", "self.options.fPIC def configure(self): if self.options.shared: del self.options.fPIC def requirements(self): self.requires(\"expat/2.4.1\")", "in self.conan_data.get(\"patches\", {}).get(self.version, []): tools.patch(**patch) if self.settings.compiler == \"Visual Studio\":", "self.build_requires(\"libtool/2.4.6\") if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def", "def package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) if self.settings.compiler == \"Visual Studio\":", "= self._configure_autotools() autotools.make() def build(self): for patch in self.conan_data.get(\"patches\", {}).get(self.version,", "XML pull parser and streaming XML serializer implementation for modern,", "\"64\" if self.options.shared: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\" + suffix)) self.copy(\"*.dll\",", "\"https://github.com/conan-io/conan-center-index\" homepage = \"https://www.codesynthesis.com/projects/libstudxml/\" license = \"MIT\" settings = \"os\",", "if not self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild", "if self.settings.compiler == \"Visual Studio\": self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\",", "= \"MIT\" settings = \"os\", \"compiler\", \"build_type\", \"arch\" exports_sources =", "their 'version' file instead, causing a compile error tools.remove_files_by_mask(self._source_subfolder, \"version\")", "error tools.remove_files_by_mask(self._source_subfolder, \"version\") with tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools =", "\"Visual Studio\": self._build_vs() else: self._build_autotools() def package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)", "{ \"shared\": False, \"fPIC\": True, } _autotools = None @property", "patch in self.conan_data.get(\"patches\", {}).get(self.version, []): tools.patch(**patch) if self.settings.compiler == \"Visual", "LibStudXmlConan(ConanFile): name = \"libstudxml\" description = \"A streaming XML pull", "\"\" if self.settings.arch == \"x86_64\": suffix = \"64\" if self.options.shared:", "\"xml-parser\", \"serialization\") url = \"https://github.com/conan-io/conan-center-index\" homepage = \"https://www.codesynthesis.com/projects/libstudxml/\" license =", "\"settings_build\", self.settings) def build_requirements(self): if self.settings.compiler != \"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\")", "= \"64\" if self.options.shared: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\" + suffix))", "import shutil required_conan_version = \">=1.33.0\" class LibStudXmlConan(ConanFile): name = \"libstudxml\"", "self._configure_autotools() autotools.make() def build(self): for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):", "self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: autotools = self._configure_autotools()", "is provided by their buildsystem already if self.settings.compiler == \"Visual", "since libstudxml adds source_subfolder as an # include dir, libc++", "raise ConanInvalidConfiguration(\"Visual Studio {} is not supported.\".format(self.settings.compiler.version)) @property def _settings_build(self):", "If built with makefile, static library mechanism is provided by", "== \"Visual Studio\": self._build_vs() else: self._build_autotools() def package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\",", "library mechanism is provided by their buildsystem already if self.settings.compiler", "@property def _user_info_build(self): return getattr(self, \"user_info_build\", self.deps_user_info) def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB,", "\"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild = MSBuild(self) msbuild.build(sln_path, platforms={\"x86\": \"Win32\"}) @property def", "\"shared\": [True, False], \"fPIC\": [True, False], } default_options = {", "if self.options.shared: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\" + suffix)) self.copy(\"*.dll\", dst=\"bin\",", "suffix)) else: autotools = self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder,", "C++.\" topics = (\"xml\", \"xml-parser\", \"serialization\") url = \"https://github.com/conan-io/conan-center-index\" homepage", "msbuild = MSBuild(self) msbuild.build(sln_path, platforms={\"x86\": \"Win32\"}) @property def _user_info_build(self): return", "sln_path = get_sln_path() while not os.path.exists(sln_path): vc_ver -= 1 sln_path", "def _source_subfolder(self): return \"source_subfolder\" def config_options(self): if self.settings.os == \"Windows\":", "if self.options.shared: del self.options.fPIC def requirements(self): self.requires(\"expat/2.4.1\") def validate(self): if", "None def get_sln_path(): return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path = get_sln_path() while", "import os import shutil required_conan_version = \">=1.33.0\" class LibStudXmlConan(ConanFile): name", "if self.settings.compiler == \"Visual Studio\" and not self.options.shared: self.cpp_info.defines =", "tools.patch(**patch) if self.settings.compiler == \"Visual Studio\": self._build_vs() else: self._build_autotools() def", "from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools from conans.errors import", "src=self._source_subfolder) suffix = \"\" if self.settings.arch == \"x86_64\": suffix =", "\"arch\" exports_sources = \"patches/*\" options = { \"shared\": [True, False],", "= (\"xml\", \"xml-parser\", \"serialization\") url = \"https://github.com/conan-io/conan-center-index\" homepage = \"https://www.codesynthesis.com/projects/libstudxml/\"", "\"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\", \"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\") == \"libc++\":", "modern, standard C++.\" topics = (\"xml\", \"xml-parser\", \"serialization\") url =", "class LibStudXmlConan(ConanFile): name = \"libstudxml\" description = \"A streaming XML", "up including their 'version' file instead, causing a compile error", "\"A streaming XML pull parser and streaming XML serializer implementation", "msbuild.build(sln_path, platforms={\"x86\": \"Win32\"}) @property def _user_info_build(self): return getattr(self, \"user_info_build\", self.deps_user_info)", "args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args) return self._autotools", "description = \"A streaming XML pull parser and streaming XML", "serializer implementation for modern, standard C++.\" topics = (\"xml\", \"xml-parser\",", "== \"Windows\": del self.options.fPIC def configure(self): if self.options.shared: del self.options.fPIC", "_source_subfolder(self): return \"source_subfolder\" def config_options(self): if self.settings.os == \"Windows\": del", "\"user_info_build\", self.deps_user_info) def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder,", "Studio\": self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder)", "= { \"shared\": False, \"fPIC\": True, } _autotools = None", "\"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\")) def package_info(self): self.cpp_info.libs =", "settings = \"os\", \"compiler\", \"build_type\", \"arch\" exports_sources = \"patches/*\" options", "default_options = { \"shared\": False, \"fPIC\": True, } _autotools =", "autotools = self._configure_autotools() autotools.make() def build(self): for patch in self.conan_data.get(\"patches\",", "shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\", \"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\") == \"libc++\": # libc++", "def _user_info_build(self): return getattr(self, \"user_info_build\", self.deps_user_info) def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder,", "_configure_autotools(self): if not self._autotools: args = [\"--with-external-expat\"] if self.options.shared: args.extend([\"--enable-shared\",", "self._autotools.configure(configure_dir=self._source_subfolder, args=args) return self._autotools def _build_vs(self): vc_ver = int(tools.Version(self.settings.compiler.version).major) sln_path", "def config_options(self): if self.settings.os == \"Windows\": del self.options.fPIC def configure(self):", "return self._autotools def _build_vs(self): vc_ver = int(tools.Version(self.settings.compiler.version).major) sln_path = None", "{ \"shared\": [True, False], \"fPIC\": [True, False], } default_options =", "args = [\"--with-external-expat\"] if self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"]) else: args.extend([\"--disable-shared\", \"--enable-static\"])", "src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder)", "validate(self): if self.settings.compiler == \"Visual Studio\": if tools.Version(self.settings.compiler.version) < \"9\":", "+ suffix)) else: autotools = self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\")", "@property def _source_subfolder(self): return \"source_subfolder\" def config_options(self): if self.settings.os ==", "a file called 'version', and since libstudxml adds source_subfolder as", "\"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\",", "\"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\")) def package_info(self): self.cpp_info.libs", "-fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools = self._configure_autotools() autotools.make() def build(self): for patch", "if self.settings.compiler.get_safe(\"libcxx\") == \"libc++\": # libc++ includes a file called", "instead, causing a compile error tools.remove_files_by_mask(self._source_subfolder, \"version\") with tools.chdir(self._source_subfolder): self.run(\"{}", "\"9\": raise ConanInvalidConfiguration(\"Visual Studio {} is not supported.\".format(self.settings.compiler.version)) @property def", "a compile error tools.remove_files_by_mask(self._source_subfolder, \"version\") with tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows)", "Studio\": self._build_vs() else: self._build_autotools() def package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) if", "strip_root=True) def _configure_autotools(self): if not self._autotools: args = [\"--with-external-expat\"] if", "package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) if self.settings.compiler == \"Visual Studio\": self.copy(\"xml/value-traits\",", "= \">=1.33.0\" class LibStudXmlConan(ConanFile): name = \"libstudxml\" description = \"A", "while not os.path.exists(sln_path): vc_ver -= 1 sln_path = get_sln_path() proj_path", "args=args) return self._autotools def _build_vs(self): vc_ver = int(tools.Version(self.settings.compiler.version).major) sln_path =", "get_sln_path() while not os.path.exists(sln_path): vc_ver -= 1 sln_path = get_sln_path()", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\",", "src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder)", "True, } _autotools = None @property def _source_subfolder(self): return \"source_subfolder\"", "with makefile, static library mechanism is provided by their buildsystem", "False], } default_options = { \"shared\": False, \"fPIC\": True, }", "pull parser and streaming XML serializer implementation for modern, standard", "self.options.shared: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\" + suffix)) self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder,", "= \"A streaming XML pull parser and streaming XML serializer", "self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\",", "self.settings.compiler == \"Visual Studio\": if tools.Version(self.settings.compiler.version) < \"9\": raise ConanInvalidConfiguration(\"Visual", "libc++ ends up including their 'version' file instead, causing a", "platforms={\"x86\": \"Win32\"}) @property def _user_info_build(self): return getattr(self, \"user_info_build\", self.deps_user_info) def", "\"Visual Studio\": self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\",", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\",", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder) suffix = \"\" if self.settings.arch", "= os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\")", "\"os\", \"compiler\", \"build_type\", \"arch\" exports_sources = \"patches/*\" options = {", "file called 'version', and since libstudxml adds source_subfolder as an", "[]): tools.patch(**patch) if self.settings.compiler == \"Visual Studio\": self._build_vs() else: self._build_autotools()", "int(tools.Version(self.settings.compiler.version).major) sln_path = None def get_sln_path(): return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path", "self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\" + suffix)) self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\"", "config_options(self): if self.settings.os == \"Windows\": del self.options.fPIC def configure(self): if", "name = \"libstudxml\" description = \"A streaming XML pull parser", "_build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\", \"config.guess\")) if", "\"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild = MSBuild(self) msbuild.build(sln_path, platforms={\"x86\": \"Win32\"})", "} _autotools = None @property def _source_subfolder(self): return \"source_subfolder\" def", "def package_info(self): self.cpp_info.libs = tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] = \"libstudxml\" # If", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\",", "\"libstudxml-vc{}.sln\".format(vc_ver)) sln_path = get_sln_path() while not os.path.exists(sln_path): vc_ver -= 1", "else: autotools = self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\",", "for modern, standard C++.\" topics = (\"xml\", \"xml-parser\", \"serialization\") url", "sln_path = get_sln_path() proj_path = os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not", "self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version],", "# include dir, libc++ ends up including their 'version' file", "ends up including their 'version' file instead, causing a compile", "= MSBuild(self) msbuild.build(sln_path, platforms={\"x86\": \"Win32\"}) @property def _user_info_build(self): return getattr(self,", "self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild = MSBuild(self)", "False, \"fPIC\": True, } _autotools = None @property def _source_subfolder(self):", "self.settings.compiler == \"Visual Studio\": self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder)", "return getattr(self, \"user_info_build\", self.deps_user_info) def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\", \"config.sub\"))", "(\"xml\", \"xml-parser\", \"serialization\") url = \"https://github.com/conan-io/conan-center-index\" homepage = \"https://www.codesynthesis.com/projects/libstudxml/\" license", "src=os.path.join(self._source_subfolder, \"lib\" + suffix)) self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\" + suffix))", "AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args) return self._autotools def _build_vs(self): vc_ver =", "else: self._build_autotools() def package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) if self.settings.compiler ==", "self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder) suffix = \"\" if self.settings.arch == \"x86_64\":", "get_sln_path() proj_path = os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not self.options.shared: tools.replace_in_file(proj_path,", "\"https://www.codesynthesis.com/projects/libstudxml/\" license = \"MIT\" settings = \"os\", \"compiler\", \"build_type\", \"arch\"", "= [\"--with-external-expat\"] if self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"]) else: args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools", "\"Windows\": del self.options.fPIC def configure(self): if self.options.shared: del self.options.fPIC def", "their buildsystem already if self.settings.compiler == \"Visual Studio\" and not", "src=self._source_subfolder) if self.settings.compiler == \"Visual Studio\": self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\",", "called 'version', and since libstudxml adds source_subfolder as an #", "self.options.fPIC def requirements(self): self.requires(\"expat/2.4.1\") def validate(self): if self.settings.compiler == \"Visual", "suffix = \"\" if self.settings.arch == \"x86_64\": suffix = \"64\"", "} default_options = { \"shared\": False, \"fPIC\": True, } _autotools", "_user_info_build(self): return getattr(self, \"user_info_build\", self.deps_user_info) def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\",", "vc_ver = int(tools.Version(self.settings.compiler.version).major) sln_path = None def get_sln_path(): return os.path.join(self._source_subfolder,", "includes a file called 'version', and since libstudxml adds source_subfolder", "and streaming XML serializer implementation for modern, standard C++.\" topics", "Studio\": if tools.Version(self.settings.compiler.version) < \"9\": raise ConanInvalidConfiguration(\"Visual Studio {} is", "XML serializer implementation for modern, standard C++.\" topics = (\"xml\",", "tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True) def _configure_autotools(self): if not self._autotools: args =", "src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder)", "Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"):", "causing a compile error tools.remove_files_by_mask(self._source_subfolder, \"version\") with tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")),", "del self.options.fPIC def configure(self): if self.options.shared: del self.options.fPIC def requirements(self):", "compile error tools.remove_files_by_mask(self._source_subfolder, \"version\") with tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools", "\"config\", \"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\") == \"libc++\": # libc++ includes a", "\"libstudxml\" description = \"A streaming XML pull parser and streaming", "win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args) return self._autotools def _build_vs(self): vc_ver = int(tools.Version(self.settings.compiler.version).major)", "self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) if self.settings.compiler == \"Visual Studio\": self.copy(\"xml/value-traits\", dst=\"include\",", "\"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\")", "self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\")", "= \"https://github.com/conan-io/conan-center-index\" homepage = \"https://www.codesynthesis.com/projects/libstudxml/\" license = \"MIT\" settings =", "self.settings.compiler != \"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if self._settings_build.os == \"Windows\"", "source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True) def _configure_autotools(self): if not self._autotools: args", "args.extend([\"--enable-shared\", \"--disable-static\"]) else: args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder,", "self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\",", "import ConanInvalidConfiguration import os import shutil required_conan_version = \">=1.33.0\" class", "self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\",", "self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\",", "not self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild =", "conans.errors import ConanInvalidConfiguration import os import shutil required_conan_version = \">=1.33.0\"", "# If built with makefile, static library mechanism is provided", "def validate(self): if self.settings.compiler == \"Visual Studio\": if tools.Version(self.settings.compiler.version) <", "dst=\"licenses\", src=self._source_subfolder) if self.settings.compiler == \"Visual Studio\": self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder)", "= AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args) return self._autotools def _build_vs(self): vc_ver", "def _configure_autotools(self): if not self._autotools: args = [\"--with-external-expat\"] if self.options.shared:", "\"shared\": False, \"fPIC\": True, } _autotools = None @property def", "tools.remove_files_by_mask(self._source_subfolder, \"version\") with tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools = self._configure_autotools()", "\"fPIC\": True, } _autotools = None @property def _source_subfolder(self): return", "suffix)) self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: self.copy(\"*.lib\", dst=\"lib\",", "if self.settings.compiler == \"Visual Studio\": self._build_vs() else: self._build_autotools() def package(self):", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder) suffix =", "already if self.settings.compiler == \"Visual Studio\" and not self.options.shared: self.cpp_info.defines", "file instead, causing a compile error tools.remove_files_by_mask(self._source_subfolder, \"version\") with tools.chdir(self._source_subfolder):", "and not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True) def", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\",", "autotools = self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))", "\"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\")) def package_info(self): self.cpp_info.libs = tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"]", "\"build_type\", \"arch\" exports_sources = \"patches/*\" options = { \"shared\": [True,", "self.settings.compiler == \"Visual Studio\" and not self.options.shared: self.cpp_info.defines = [\"LIBSTUDXML_STATIC_LIB=1\"]", "== \"Visual Studio\": if tools.Version(self.settings.compiler.version) < \"9\": raise ConanInvalidConfiguration(\"Visual Studio", "def get_sln_path(): return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path = get_sln_path() while not", "= self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder,", "if self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"]) else: args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools = AutoToolsBuildEnvironment(self,", "self._autotools: args = [\"--with-external-expat\"] if self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"]) else: args.extend([\"--disable-shared\",", "return getattr(self, \"settings_build\", self.settings) def build_requirements(self): if self.settings.compiler != \"Visual", "= \"os\", \"compiler\", \"build_type\", \"arch\" exports_sources = \"patches/*\" options =", "built with makefile, static library mechanism is provided by their", "buildsystem already if self.settings.compiler == \"Visual Studio\" and not self.options.shared:", "= { \"shared\": [True, False], \"fPIC\": [True, False], } default_options", "def requirements(self): self.requires(\"expat/2.4.1\") def validate(self): if self.settings.compiler == \"Visual Studio\":", "\"serialization\") url = \"https://github.com/conan-io/conan-center-index\" homepage = \"https://www.codesynthesis.com/projects/libstudxml/\" license = \"MIT\"", "exports_sources = \"patches/*\" options = { \"shared\": [True, False], \"fPIC\":", "not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True) def _configure_autotools(self):", "if not self._autotools: args = [\"--with-external-expat\"] if self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"])", "\"--enable-static\"]) self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args) return self._autotools def", "self.build_requires(\"msys2/cci.latest\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True) def _configure_autotools(self): if not", "'version' file instead, causing a compile error tools.remove_files_by_mask(self._source_subfolder, \"version\") with", "including their 'version' file instead, causing a compile error tools.remove_files_by_mask(self._source_subfolder,", "standard C++.\" topics = (\"xml\", \"xml-parser\", \"serialization\") url = \"https://github.com/conan-io/conan-center-index\"", "tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] = \"libstudxml\" # If built with makefile, static", "libc++ includes a file called 'version', and since libstudxml adds", "\"MIT\" settings = \"os\", \"compiler\", \"build_type\", \"arch\" exports_sources = \"patches/*\"", "src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: autotools = self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder,", "and since libstudxml adds source_subfolder as an # include dir,", "= int(tools.Version(self.settings.compiler.version).major) sln_path = None def get_sln_path(): return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver))", "if self.settings.compiler != \"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if self._settings_build.os ==", "if self.settings.compiler == \"Visual Studio\": if tools.Version(self.settings.compiler.version) < \"9\": raise", "self.settings.arch == \"x86_64\": suffix = \"64\" if self.options.shared: self.copy(\"*.lib\", dst=\"lib\",", "win_bash=tools.os_info.is_windows) autotools = self._configure_autotools() autotools.make() def build(self): for patch in", "\"patches/*\" options = { \"shared\": [True, False], \"fPIC\": [True, False],", "get_sln_path(): return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path = get_sln_path() while not os.path.exists(sln_path):", "parser and streaming XML serializer implementation for modern, standard C++.\"", "shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\", \"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\")", "!= \"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\") if self._settings_build.os == \"Windows\" and", "== \"Visual Studio\": self.copy(\"xml/value-traits\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\",", "src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\" +", "self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"]) else: args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)", "self.settings.compiler == \"Visual Studio\": self._build_vs() else: self._build_autotools() def package(self): self.copy(pattern=\"LICENSE\",", "self.options.shared: del self.options.fPIC def requirements(self): self.requires(\"expat/2.4.1\") def validate(self): if self.settings.compiler", "as an # include dir, libc++ ends up including their", "src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder)", "getattr(self, \"settings_build\", self.settings) def build_requirements(self): if self.settings.compiler != \"Visual Studio\":", "self.settings.os == \"Windows\": del self.options.fPIC def configure(self): if self.options.shared: del", "self.conan_data.get(\"patches\", {}).get(self.version, []): tools.patch(**patch) if self.settings.compiler == \"Visual Studio\": self._build_vs()", "configure(self): if self.options.shared: del self.options.fPIC def requirements(self): self.requires(\"expat/2.4.1\") def validate(self):", "if self.settings.arch == \"x86_64\": suffix = \"64\" if self.options.shared: self.copy(\"*.lib\",", "-= 1 sln_path = get_sln_path() proj_path = os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver))", "suffix)) else: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: autotools", "options = { \"shared\": [True, False], \"fPIC\": [True, False], }", "proj_path = os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not self.options.shared: tools.replace_in_file(proj_path, \"DynamicLibrary\",", "an # include dir, libc++ ends up including their 'version'", "< \"9\": raise ConanInvalidConfiguration(\"Visual Studio {} is not supported.\".format(self.settings.compiler.version)) @property", "1 sln_path = get_sln_path() proj_path = os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if", "tools.Version(self.settings.compiler.version) < \"9\": raise ConanInvalidConfiguration(\"Visual Studio {} is not supported.\".format(self.settings.compiler.version))", "self.requires(\"expat/2.4.1\") def validate(self): if self.settings.compiler == \"Visual Studio\": if tools.Version(self.settings.compiler.version)", "self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder) suffix = \"\" if", "_build_vs(self): vc_ver = int(tools.Version(self.settings.compiler.version).major) sln_path = None def get_sln_path(): return", "== \"x86_64\": suffix = \"64\" if self.options.shared: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder,", "conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools from conans.errors import ConanInvalidConfiguration", "if self.settings.os == \"Windows\": del self.options.fPIC def configure(self): if self.options.shared:", "= None def get_sln_path(): return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path = get_sln_path()", "autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\")) def", "src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder) suffix = \"\"", "dir, libc++ ends up including their 'version' file instead, causing", "def configure(self): if self.options.shared: del self.options.fPIC def requirements(self): self.requires(\"expat/2.4.1\") def", "else: args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args) return", "= tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] = \"libstudxml\" # If built with makefile,", "self._build_vs() else: self._build_autotools() def package(self): self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) if self.settings.compiler", "ConanFile, AutoToolsBuildEnvironment, MSBuild, tools from conans.errors import ConanInvalidConfiguration import os", "os.path.join(self._source_subfolder, \"config\", \"config.guess\")) if self.settings.compiler.get_safe(\"libcxx\") == \"libc++\": # libc++ includes", "\"fPIC\": [True, False], } default_options = { \"shared\": False, \"fPIC\":", "{} is not supported.\".format(self.settings.compiler.version)) @property def _settings_build(self): return getattr(self, \"settings_build\",", "\"--disable-static\"]) else: args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) self._autotools.configure(configure_dir=self._source_subfolder, args=args)", "+ suffix)) self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: self.copy(\"*.lib\",", "adds source_subfolder as an # include dir, libc++ ends up", "dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\"", "self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools = self._configure_autotools() autotools.make() def build(self): for", "= \"libstudxml\" # If built with makefile, static library mechanism", "def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\", \"config.guess\"))", "homepage = \"https://www.codesynthesis.com/projects/libstudxml/\" license = \"MIT\" settings = \"os\", \"compiler\",", "def _build_vs(self): vc_ver = int(tools.Version(self.settings.compiler.version).major) sln_path = None def get_sln_path():", "streaming XML pull parser and streaming XML serializer implementation for", "dst=\"include\", src=self._source_subfolder) suffix = \"\" if self.settings.arch == \"x86_64\": suffix", "= get_sln_path() proj_path = os.path.join(self._source_subfolder, \"xml\", \"libstudxml-vc{}.vcxproj\".format(vc_ver)) if not self.options.shared:", "\"bin\" + suffix)) else: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\" + suffix))", "implementation for modern, standard C++.\" topics = (\"xml\", \"xml-parser\", \"serialization\")", "[True, False], } default_options = { \"shared\": False, \"fPIC\": True,", "[True, False], \"fPIC\": [True, False], } default_options = { \"shared\":", "\"x86_64\": suffix = \"64\" if self.options.shared: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\"", "self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\"))", "tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"libstudxml.la\") tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\")) def package_info(self):", "url = \"https://github.com/conan-io/conan-center-index\" homepage = \"https://www.codesynthesis.com/projects/libstudxml/\" license = \"MIT\" settings", "\"compiler\", \"build_type\", \"arch\" exports_sources = \"patches/*\" options = { \"shared\":", "streaming XML serializer implementation for modern, standard C++.\" topics =", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\",", "+ suffix)) else: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else:", "\"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)", "@property def _settings_build(self): return getattr(self, \"settings_build\", self.settings) def build_requirements(self): if", "self.settings) def build_requirements(self): if self.settings.compiler != \"Visual Studio\": self.build_requires(\"gnu-config/cci.20201022\") self.build_requires(\"libtool/2.4.6\")", "self.cpp_info.names[\"pkg_config\"] = \"libstudxml\" # If built with makefile, static library", "MSBuild(self) msbuild.build(sln_path, platforms={\"x86\": \"Win32\"}) @property def _user_info_build(self): return getattr(self, \"user_info_build\",", "required_conan_version = \">=1.33.0\" class LibStudXmlConan(ConanFile): name = \"libstudxml\" description =", "_settings_build(self): return getattr(self, \"settings_build\", self.settings) def build_requirements(self): if self.settings.compiler !=", "'version', and since libstudxml adds source_subfolder as an # include", "= None @property def _source_subfolder(self): return \"source_subfolder\" def config_options(self): if", "self._autotools def _build_vs(self): vc_ver = int(tools.Version(self.settings.compiler.version).major) sln_path = None def", "[\"--with-external-expat\"] if self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"]) else: args.extend([\"--disable-shared\", \"--enable-static\"]) self._autotools =", "_autotools = None @property def _source_subfolder(self): return \"source_subfolder\" def config_options(self):", "src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder)", "== \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder,", "build(self): for patch in self.conan_data.get(\"patches\", {}).get(self.version, []): tools.patch(**patch) if self.settings.compiler", "self.deps_user_info) def _build_autotools(self): shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_SUB, os.path.join(self._source_subfolder, \"config\", \"config.sub\")) shutil.copy(self._user_info_build[\"gnu-config\"].CONFIG_GUESS, os.path.join(self._source_subfolder, \"config\",", "tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\")) tools.rmdir(os.path.join(self.package_folder, \"share\")) def package_info(self): self.cpp_info.libs = tools.collect_libs(self)", "dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\" + suffix)) self.copy(\"*.dll\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\" +", "shutil required_conan_version = \">=1.33.0\" class LibStudXmlConan(ConanFile): name = \"libstudxml\" description", "tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True) def _configure_autotools(self): if", "\">=1.33.0\" class LibStudXmlConan(ConanFile): name = \"libstudxml\" description = \"A streaming", "def build(self): for patch in self.conan_data.get(\"patches\", {}).get(self.version, []): tools.patch(**patch) if", "\"bin\" + suffix)) else: autotools = self._configure_autotools() autotools.install() tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"),", "if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"): self.build_requires(\"msys2/cci.latest\") def source(self):", "AutoToolsBuildEnvironment, MSBuild, tools from conans.errors import ConanInvalidConfiguration import os import", "tools.rmdir(os.path.join(self.package_folder, \"share\")) def package_info(self): self.cpp_info.libs = tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] = \"libstudxml\"", "= get_sln_path() while not os.path.exists(sln_path): vc_ver -= 1 sln_path =", "provided by their buildsystem already if self.settings.compiler == \"Visual Studio\"", "package_info(self): self.cpp_info.libs = tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] = \"libstudxml\" # If built", "os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path = get_sln_path() while not os.path.exists(sln_path): vc_ver -=", "Studio {} is not supported.\".format(self.settings.compiler.version)) @property def _settings_build(self): return getattr(self,", "static library mechanism is provided by their buildsystem already if", "MSBuild, tools from conans.errors import ConanInvalidConfiguration import os import shutil", "supported.\".format(self.settings.compiler.version)) @property def _settings_build(self): return getattr(self, \"settings_build\", self.settings) def build_requirements(self):", "not self._autotools: args = [\"--with-external-expat\"] if self.options.shared: args.extend([\"--enable-shared\", \"--disable-static\"]) else:", "\"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild = MSBuild(self) msbuild.build(sln_path, platforms={\"x86\":", "include dir, libc++ ends up including their 'version' file instead,", "tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild = MSBuild(self) msbuild.build(sln_path, platforms={\"x86\": \"Win32\"}) @property", "sln_path = None def get_sln_path(): return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path =", "= \"patches/*\" options = { \"shared\": [True, False], \"fPIC\": [True,", "tools from conans.errors import ConanInvalidConfiguration import os import shutil required_conan_version", "\"Win32\"}) @property def _user_info_build(self): return getattr(self, \"user_info_build\", self.deps_user_info) def _build_autotools(self):", "ConanInvalidConfiguration import os import shutil required_conan_version = \">=1.33.0\" class LibStudXmlConan(ConanFile):", "self.copy(\"xml/*.txx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.hxx\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder) suffix", "suffix = \"64\" if self.options.shared: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\" +", "os import shutil required_conan_version = \">=1.33.0\" class LibStudXmlConan(ConanFile): name =", "def source(self): tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True) def _configure_autotools(self): if not self._autotools:", "ConanInvalidConfiguration(\"Visual Studio {} is not supported.\".format(self.settings.compiler.version)) @property def _settings_build(self): return", "else: self.copy(\"*.lib\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"bin\" + suffix)) else: autotools =", "not supported.\".format(self.settings.compiler.version)) @property def _settings_build(self): return getattr(self, \"settings_build\", self.settings) def", "{}).get(self.version, []): tools.patch(**patch) if self.settings.compiler == \"Visual Studio\": self._build_vs() else:", "tools.replace_in_file(proj_path, \"DynamicLibrary\", \"StaticLibrary\") tools.replace_in_file(proj_path, \"LIBSTUDXML_DYNAMIC_LIB\", \"LIBSTUDXML_STATIC_LIB\") msbuild = MSBuild(self) msbuild.build(sln_path,", "def _settings_build(self): return getattr(self, \"settings_build\", self.settings) def build_requirements(self): if self.settings.compiler", "tools.chdir(self._source_subfolder): self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows) autotools = self._configure_autotools() autotools.make() def build(self):", "requirements(self): self.requires(\"expat/2.4.1\") def validate(self): if self.settings.compiler == \"Visual Studio\": if", "self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\",", "src=self._source_subfolder) self.copy(\"xml/*.h\", dst=\"include\", src=self._source_subfolder) suffix = \"\" if self.settings.arch ==", "dst=\"include\", src=self._source_subfolder) self.copy(\"xml/serializer\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\",", "return os.path.join(self._source_subfolder, \"libstudxml-vc{}.sln\".format(vc_ver)) sln_path = get_sln_path() while not os.path.exists(sln_path): vc_ver", "self.cpp_info.libs = tools.collect_libs(self) self.cpp_info.names[\"pkg_config\"] = \"libstudxml\" # If built with", "src=self._source_subfolder) self.copy(\"xml/exception\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/content\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/*.ixx\", dst=\"include\", src=self._source_subfolder)", "self.copy(\"xml/qname\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/parser\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/forward\", dst=\"include\", src=self._source_subfolder) self.copy(\"xml/exception\"," ]
[ "'all_data': if self.parse == 'Caricature': self.subPath = 'all_cari_data' elif self.parse", "2.0 (the \"License\"); # you may not use this file", "'Bald', 'LargeForehead', 'RoundFace', 'DiamondFace', 'OvalFace', 'SquareShapeFace', 'NarrowEye', 'SleepyEye', 'SlantEye', 'SharpEye',", "a dataType from: train, val, github\") exit(1) self.modelType = modelType", "= 'WebCariTrain' elif self.dataType == 'val': if self.parse == 'Caricature':", "'Photo': self.subPath = 'PhotoTrain' else: self.subPath = 'WebCariTrain' elif self.dataType", "self.parse == 'Photo': self.subPath = 'all_photo_data' else: self.subPath = 'all_WebCari_data'", "correct attribute in param\") exit(1) for line in fileList: names.append(line[0])", "'all_cari_data' elif self.parse == 'Photo': self.subPath = 'all_photo_data' else: self.subPath", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "self.attributes.index(attribute) annas.append([int(attributes[index])]) for line in fileVList: attributes_v = line[1::] index", "for x in line[1::]]) for line in fileVList: visuals.append([int(x) for", "annas.append([int(x) for x in line[1::]]) for line in fileVList: visuals.append([int(x)", "annas = [] visuals = [] file = self.subPath+\".txt\" file_v", "if self.dataType == 'train': if self.parse == 'Caricature': self.subPath =", "fileVList: attributes_v = line[1::] index = self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else: for", "attribute, \"is not in this dataset, please write a correct", "'NarrowNose', 'Toothy', 'Smile', 'BigMouth', 'SmallMouth', 'ThickLips', 'ThinLips', 'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow',", "num_attribute def getPath(self, name): name = name.replace(' ', '_') name", "this dataset, please write a correct attribute in param\") exit(1)", "WebCariA: def __init__(self, dataType, modelType, parse, des_attri=None): self.dir_path = \"/data/jw/dataset/\"", "use this file except in compliance with the License. #", "'BigNose', 'SmallNose', 'HighNose', 'FlatNose', 'HookNose', 'WideNose', 'NarrowNose', 'Toothy', 'Smile', 'BigMouth',", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "'Photo': self.subPath = 'all_photo_data' else: self.subPath = 'all_WebCari_data' else: print(\"Caricature", "for line in fileVList: attributes_v = line[1::] index = self.attributes.index(attribute)", "== 'val': if self.parse == 'Caricature': self.subPath = 'CariVal' elif", "'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows', 'Mustache', 'Goatee', 'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones',", "line in fileList: names.append(line[0]) annas.append([int(x) for x in line[1::]]) for", "License. # You may obtain a copy of the License", "= name.replace(' ', '_') name = name.replace('._', '_') name =", "self.parse = parse self.des_attri = des_attri if self.dataType == 'train':", "= self.subPath+\".txt\" file_v = self.subPath+\"_V.txt\" fileList = open(os.path.join(self.dir_path, file)).readlines() fileVList", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "fileList = open(os.path.join(self.dir_path, file)).readlines() fileVList = open((os.path.join(self.dir_path, file_v))).readlines() if self.modelType", "line[1::]]) self.attributes = self.attributes num_attribute = len(self.attributes) return names, annas,", "2020 <NAME> & <NAME> (<EMAIL>) # Licensed under the Apache", "# Copyright 2020 <NAME> & <NAME> (<EMAIL>) # Licensed under", "self.dataType = dataType self.parse = parse self.des_attri = des_attri if", "self.dataType == 'test': if self.parse == 'Caricature': self.subPath = 'CariTest'", "name.replace(' ', '_') name = name.replace('._', '_') name = name.replace('-',", "'Asian', 'White', 'Black', 'Youth', 'Middle', 'Old', 'Wrinkle', 'MakeUp', 'Bald', 'LargeForehead',", "+ str(parse) self.dataType = dataType self.parse = parse self.des_attri =", "self.subPath = 'CariVal' elif self.parse == 'Photo': self.subPath = 'PhotoVal'", "self.parse == 'Caricature': self.subPath = 'CariTrain' elif self.parse == 'Photo':", "please select a dataType from: train, val, github\") exit(1) self.modelType", "names.append(line[0]) attributes = line[1::] index = self.attributes.index(attribute) annas.append([int(attributes[index])]) for line", "= name.replace('-', '_') name = name + \".jpg\" return name", "if self.parse == 'Caricature': self.subPath = 'CariTest' elif self.parse ==", "open(os.path.join(self.dir_path, file)).readlines() fileVList = open((os.path.join(self.dir_path, file_v))).readlines() if self.modelType == 'seperate':", "line[1::] index = self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else: for line in fileList:", "'HighCheekbones', 'SquareJaw'] self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas() print(parse+\"dataset, images:", "self.attributes num_attribute = len(self.attributes) return names, annas, visuals, num_attribute def", "in param\") exit(1) for line in fileList: names.append(line[0]) attributes =", "'Middle', 'Old', 'Wrinkle', 'MakeUp', 'Bald', 'LargeForehead', 'RoundFace', 'DiamondFace', 'OvalFace', 'SquareShapeFace',", "== 'Photo': self.subPath = 'PhotoVal' else: self.subPath = 'WebCariVal' elif", "== 'Photo': self.subPath = 'all_photo_data' else: self.subPath = 'all_WebCari_data' else:", "'CariVal' elif self.parse == 'Photo': self.subPath = 'PhotoVal' else: self.subPath", "in compliance with the License. # You may obtain a", "self.des_attri print(\"des_attribute\", attribute) if attribute not in self.attributes: print(\"error: \",", "software # distributed under the License is distributed on an", "dataType self.parse = parse self.des_attri = des_attri if self.dataType ==", "'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows', 'Mustache', 'Goatee', 'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw']", "'seperate': num_attribute = 1 attribute = self.des_attri print(\"des_attribute\", attribute) if", "= [] file = self.subPath+\".txt\" file_v = self.subPath+\"_V.txt\" fileList =", "'SleepyEye', 'SlantEye', 'SharpEye', 'FlabbyEye', 'BigEye', 'SmallEye', 'UnderEyePuffiness', 'BigNose', 'SmallNose', 'HighNose',", "limitations under the License. import os class WebCariA: def __init__(self,", "== 'Caricature': self.subPath = 'CariVal' elif self.parse == 'Photo': self.subPath", "in fileVList: attributes_v = line[1::] index = self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else:", "os.path.join(self.dir_path, self.subPath) self.attributes = ['Women', 'Asian', 'White', 'Black', 'Youth', 'Middle',", "'Caricature': self.subPath = 'CariVal' elif self.parse == 'Photo': self.subPath =", "== 'Caricature': self.subPath = 'CariTest' elif self.parse == 'Photo': self.subPath", "def __init__(self, dataType, modelType, parse, des_attri=None): self.dir_path = \"/data/jw/dataset/\" +", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= self.attributes.index(attribute) annas.append([int(attributes[index])]) for line in fileVList: attributes_v = line[1::]", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "'WideNose', 'NarrowNose', 'Toothy', 'Smile', 'BigMouth', 'SmallMouth', 'ThickLips', 'ThinLips', 'DoubleChin', 'ArchedEyebrows',", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "modelType self.dir_path = os.path.join(self.dir_path, self.subPath) self.attributes = ['Women', 'Asian', 'White',", "'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows', 'Mustache', 'Goatee',", "to in writing, software # distributed under the License is", "# See the License for the specific language governing permissions", "'ThickEyebrows', 'ThinEyebrows', 'Mustache', 'Goatee', 'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw'] self.names, self.annas,", "self.parse == 'Photo': self.subPath = 'PhotoTrain' else: self.subPath = 'WebCariTrain'", "in fileList: names.append(line[0]) annas.append([int(x) for x in line[1::]]) for line", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "= [] visuals = [] file = self.subPath+\".txt\" file_v =", "train, val, github\") exit(1) self.modelType = modelType self.dir_path = os.path.join(self.dir_path,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "'ThinEyebrows', 'Mustache', 'Goatee', 'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw'] self.names, self.annas, self.visuals,", "x in line[1::]]) self.attributes = self.attributes num_attribute = len(self.attributes) return", "= self.getImgNameAndAnnas() print(parse+\"dataset, images: \", len(self.names), \" type for: \",", "[] annas = [] visuals = [] file = self.subPath+\".txt\"", "self.des_attri = des_attri if self.dataType == 'train': if self.parse ==", "index = self.attributes.index(attribute) annas.append([int(attributes[index])]) for line in fileVList: attributes_v =", "self.parse == 'Caricature': self.subPath = 'CariVal' elif self.parse == 'Photo':", "attribute = self.des_attri print(\"des_attribute\", attribute) if attribute not in self.attributes:", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw'] self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas()", "num_attribute = 1 attribute = self.des_attri print(\"des_attribute\", attribute) if attribute", "elif self.parse == 'Photo': self.subPath = 'all_photo_data' else: self.subPath =", "= line[1::] index = self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else: for line in", "= 'all_WebCari_data' else: print(\"Caricature error, please select a dataType from:", "self.subPath+\"_V.txt\" fileList = open(os.path.join(self.dir_path, file)).readlines() fileVList = open((os.path.join(self.dir_path, file_v))).readlines() if", "'SmallNose', 'HighNose', 'FlatNose', 'HookNose', 'WideNose', 'NarrowNose', 'Toothy', 'Smile', 'BigMouth', 'SmallMouth',", "\" num_attribute: \", self.num_attribute) def getImgNameAndAnnas(self): names = [] annas", "'PhotoTest' else: self.subPath = 'WebCariTest' elif self.dataType == 'all_data': if", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "\" type for: \", self.dataType, \" num_attribute: \", self.num_attribute) def", "self.parse == 'Caricature': self.subPath = 'CariTest' elif self.parse == 'Photo':", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "val, github\") exit(1) self.modelType = modelType self.dir_path = os.path.join(self.dir_path, self.subPath)", "self.subPath+\".txt\" file_v = self.subPath+\"_V.txt\" fileList = open(os.path.join(self.dir_path, file)).readlines() fileVList =", "not use this file except in compliance with the License.", "error, please select a dataType from: train, val, github\") exit(1)", "writing, software # distributed under the License is distributed on", "self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else: for line in fileList: names.append(line[0]) annas.append([int(x) for", "you may not use this file except in compliance with", "and # limitations under the License. import os class WebCariA:", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "visuals.append([int(x) for x in line[1::]]) self.attributes = self.attributes num_attribute =", "fileVList: visuals.append([int(x) for x in line[1::]]) self.attributes = self.attributes num_attribute", "= dataType self.parse = parse self.des_attri = des_attri if self.dataType", "'_') name = name.replace('._', '_') name = name.replace('-', '_') name", "visuals.append([int(attributes_v[index])]) else: for line in fileList: names.append(line[0]) annas.append([int(x) for x", "= 'WebCariTest' elif self.dataType == 'all_data': if self.parse == 'Caricature':", "CONDITIONS OF ANY KIND, either express or implied. # See", "self.getImgNameAndAnnas() print(parse+\"dataset, images: \", len(self.names), \" type for: \", self.dataType,", "parse, des_attri=None): self.dir_path = \"/data/jw/dataset/\" + str(parse) self.dataType = dataType", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= 'PhotoTest' else: self.subPath = 'WebCariTest' elif self.dataType == 'all_data':", "os class WebCariA: def __init__(self, dataType, modelType, parse, des_attri=None): self.dir_path", "self.attributes = ['Women', 'Asian', 'White', 'Black', 'Youth', 'Middle', 'Old', 'Wrinkle',", "self.modelType = modelType self.dir_path = os.path.join(self.dir_path, self.subPath) self.attributes = ['Women',", "\"is not in this dataset, please write a correct attribute", "the License. import os class WebCariA: def __init__(self, dataType, modelType,", "else: self.subPath = 'WebCariVal' elif self.dataType == 'test': if self.parse", "print(\"des_attribute\", attribute) if attribute not in self.attributes: print(\"error: \", attribute,", "'Toothy', 'Smile', 'BigMouth', 'SmallMouth', 'ThickLips', 'ThinLips', 'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows',", "in fileList: names.append(line[0]) attributes = line[1::] index = self.attributes.index(attribute) annas.append([int(attributes[index])])", "not in this dataset, please write a correct attribute in", "else: for line in fileList: names.append(line[0]) annas.append([int(x) for x in", "self.subPath = 'WebCariVal' elif self.dataType == 'test': if self.parse ==", "& <NAME> (<EMAIL>) # Licensed under the Apache License, Version", "OR CONDITIONS OF ANY KIND, either express or implied. #", "governing permissions and # limitations under the License. import os", "'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows', 'Mustache', 'Goatee', 'Whiskers', 'OtherBeard&NoBeard',", "the License is distributed on an \"AS IS\" BASIS, #", "'train': if self.parse == 'Caricature': self.subPath = 'CariTrain' elif self.parse", "fileVList = open((os.path.join(self.dir_path, file_v))).readlines() if self.modelType == 'seperate': num_attribute =", "'PhotoVal' else: self.subPath = 'WebCariVal' elif self.dataType == 'test': if", "== 'test': if self.parse == 'Caricature': self.subPath = 'CariTest' elif", "= \"/data/jw/dataset/\" + str(parse) self.dataType = dataType self.parse = parse", "from: train, val, github\") exit(1) self.modelType = modelType self.dir_path =", "self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas() print(parse+\"dataset, images: \", len(self.names),", "elif self.parse == 'Photo': self.subPath = 'PhotoTrain' else: self.subPath =", "not in self.attributes: print(\"error: \", attribute, \"is not in this", "self.dataType == 'train': if self.parse == 'Caricature': self.subPath = 'CariTrain'", "in fileVList: visuals.append([int(x) for x in line[1::]]) self.attributes = self.attributes", "'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows', 'Mustache', 'Goatee', 'Whiskers',", "law or agreed to in writing, software # distributed under", "'FlabbyEye', 'BigEye', 'SmallEye', 'UnderEyePuffiness', 'BigNose', 'SmallNose', 'HighNose', 'FlatNose', 'HookNose', 'WideNose',", "self.modelType == 'seperate': num_attribute = 1 attribute = self.des_attri print(\"des_attribute\",", "\", self.dataType, \" num_attribute: \", self.num_attribute) def getImgNameAndAnnas(self): names =", "if self.parse == 'Caricature': self.subPath = 'CariVal' elif self.parse ==", "for line in fileList: names.append(line[0]) annas.append([int(x) for x in line[1::]])", "= 'PhotoVal' else: self.subPath = 'WebCariVal' elif self.dataType == 'test':", "= 'WebCariVal' elif self.dataType == 'test': if self.parse == 'Caricature':", "in this dataset, please write a correct attribute in param\")", "\"/data/jw/dataset/\" + str(parse) self.dataType = dataType self.parse = parse self.des_attri", "print(\"Caricature error, please select a dataType from: train, val, github\")", "= self.attributes num_attribute = len(self.attributes) return names, annas, visuals, num_attribute", "in self.attributes: print(\"error: \", attribute, \"is not in this dataset,", "self.dir_path = os.path.join(self.dir_path, self.subPath) self.attributes = ['Women', 'Asian', 'White', 'Black',", "may obtain a copy of the License at # #", "== 'all_data': if self.parse == 'Caricature': self.subPath = 'all_cari_data' elif", "'CariTrain' elif self.parse == 'Photo': self.subPath = 'PhotoTrain' else: self.subPath", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.parse == 'Photo': self.subPath = 'PhotoTest' else: self.subPath = 'WebCariTest'", "'SquareJaw'] self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas() print(parse+\"dataset, images: \",", "'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows', 'Mustache', 'Goatee', 'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw'] self.names,", "= 'all_photo_data' else: self.subPath = 'all_WebCari_data' else: print(\"Caricature error, please", "modelType, parse, des_attri=None): self.dir_path = \"/data/jw/dataset/\" + str(parse) self.dataType =", "= line[1::] index = self.attributes.index(attribute) annas.append([int(attributes[index])]) for line in fileVList:", "may not use this file except in compliance with the", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "'OvalFace', 'SquareShapeFace', 'NarrowEye', 'SleepyEye', 'SlantEye', 'SharpEye', 'FlabbyEye', 'BigEye', 'SmallEye', 'UnderEyePuffiness',", "index = self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else: for line in fileList: names.append(line[0])", "'test': if self.parse == 'Caricature': self.subPath = 'CariTest' elif self.parse", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "== 'train': if self.parse == 'Caricature': self.subPath = 'CariTrain' elif", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "'HookNose', 'WideNose', 'NarrowNose', 'Toothy', 'Smile', 'BigMouth', 'SmallMouth', 'ThickLips', 'ThinLips', 'DoubleChin',", "if self.parse == 'Caricature': self.subPath = 'all_cari_data' elif self.parse ==", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "'NarrowEye', 'SleepyEye', 'SlantEye', 'SharpEye', 'FlabbyEye', 'BigEye', 'SmallEye', 'UnderEyePuffiness', 'BigNose', 'SmallNose',", "'MakeUp', 'Bald', 'LargeForehead', 'RoundFace', 'DiamondFace', 'OvalFace', 'SquareShapeFace', 'NarrowEye', 'SleepyEye', 'SlantEye',", "exit(1) for line in fileList: names.append(line[0]) attributes = line[1::] index", "'SquareShapeFace', 'NarrowEye', 'SleepyEye', 'SlantEye', 'SharpEye', 'FlabbyEye', 'BigEye', 'SmallEye', 'UnderEyePuffiness', 'BigNose',", "class WebCariA: def __init__(self, dataType, modelType, parse, des_attri=None): self.dir_path =", "github\") exit(1) self.modelType = modelType self.dir_path = os.path.join(self.dir_path, self.subPath) self.attributes", "'SmallMouth', 'ThickLips', 'ThinLips', 'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows',", "visuals = [] file = self.subPath+\".txt\" file_v = self.subPath+\"_V.txt\" fileList", "'Youth', 'Middle', 'Old', 'Wrinkle', 'MakeUp', 'Bald', 'LargeForehead', 'RoundFace', 'DiamondFace', 'OvalFace',", "\", len(self.names), \" type for: \", self.dataType, \" num_attribute: \",", "name = name.replace('-', '_') name = name + \".jpg\" return", "language governing permissions and # limitations under the License. import", "line in fileVList: visuals.append([int(x) for x in line[1::]]) self.attributes =", "Copyright 2020 <NAME> & <NAME> (<EMAIL>) # Licensed under the", "len(self.names), \" type for: \", self.dataType, \" num_attribute: \", self.num_attribute)", "file)).readlines() fileVList = open((os.path.join(self.dir_path, file_v))).readlines() if self.modelType == 'seperate': num_attribute", "# limitations under the License. import os class WebCariA: def", "in line[1::]]) self.attributes = self.attributes num_attribute = len(self.attributes) return names,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'BigMouth', 'SmallMouth', 'ThickLips', 'ThinLips', 'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows',", "annas.append([int(attributes[index])]) for line in fileVList: attributes_v = line[1::] index =", "'SharpEye', 'FlabbyEye', 'BigEye', 'SmallEye', 'UnderEyePuffiness', 'BigNose', 'SmallNose', 'HighNose', 'FlatNose', 'HookNose',", "dataType from: train, val, github\") exit(1) self.modelType = modelType self.dir_path", "or implied. # See the License for the specific language", "self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas() print(parse+\"dataset, images: \", len(self.names), \"", "= 1 attribute = self.des_attri print(\"des_attribute\", attribute) if attribute not", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "= self.subPath+\"_V.txt\" fileList = open(os.path.join(self.dir_path, file)).readlines() fileVList = open((os.path.join(self.dir_path, file_v))).readlines()", "1 attribute = self.des_attri print(\"des_attribute\", attribute) if attribute not in", "elif self.parse == 'Photo': self.subPath = 'PhotoVal' else: self.subPath =", "self.subPath = 'PhotoTest' else: self.subPath = 'WebCariTest' elif self.dataType ==", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= self.des_attri print(\"des_attribute\", attribute) if attribute not in self.attributes: print(\"error:", "for line in fileList: names.append(line[0]) attributes = line[1::] index =", "line[1::] index = self.attributes.index(attribute) annas.append([int(attributes[index])]) for line in fileVList: attributes_v", "self.dataType == 'val': if self.parse == 'Caricature': self.subPath = 'CariVal'", "dataset, please write a correct attribute in param\") exit(1) for", "for: \", self.dataType, \" num_attribute: \", self.num_attribute) def getImgNameAndAnnas(self): names", "(the \"License\"); # you may not use this file except", "# you may not use this file except in compliance", "(<EMAIL>) # Licensed under the Apache License, Version 2.0 (the", "'BigEye', 'SmallEye', 'UnderEyePuffiness', 'BigNose', 'SmallNose', 'HighNose', 'FlatNose', 'HookNose', 'WideNose', 'NarrowNose',", "self.dataType, \" num_attribute: \", self.num_attribute) def getImgNameAndAnnas(self): names = []", "param\") exit(1) for line in fileList: names.append(line[0]) attributes = line[1::]", "__init__(self, dataType, modelType, parse, des_attri=None): self.dir_path = \"/data/jw/dataset/\" + str(parse)", "self.subPath = 'all_cari_data' elif self.parse == 'Photo': self.subPath = 'all_photo_data'", "= len(self.attributes) return names, annas, visuals, num_attribute def getPath(self, name):", "elif self.dataType == 'all_data': if self.parse == 'Caricature': self.subPath =", "file = self.subPath+\".txt\" file_v = self.subPath+\"_V.txt\" fileList = open(os.path.join(self.dir_path, file)).readlines()", "getPath(self, name): name = name.replace(' ', '_') name = name.replace('._',", "self.dir_path = \"/data/jw/dataset/\" + str(parse) self.dataType = dataType self.parse =", "= os.path.join(self.dir_path, self.subPath) self.attributes = ['Women', 'Asian', 'White', 'Black', 'Youth',", "'CariTest' elif self.parse == 'Photo': self.subPath = 'PhotoTest' else: self.subPath", "'Wrinkle', 'MakeUp', 'Bald', 'LargeForehead', 'RoundFace', 'DiamondFace', 'OvalFace', 'SquareShapeFace', 'NarrowEye', 'SleepyEye',", "fileList: names.append(line[0]) annas.append([int(x) for x in line[1::]]) for line in", "# # Unless required by applicable law or agreed to", "<NAME> & <NAME> (<EMAIL>) # Licensed under the Apache License,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "'SlantEye', 'SharpEye', 'FlabbyEye', 'BigEye', 'SmallEye', 'UnderEyePuffiness', 'BigNose', 'SmallNose', 'HighNose', 'FlatNose',", "images: \", len(self.names), \" type for: \", self.dataType, \" num_attribute:", "self.subPath = 'all_photo_data' else: self.subPath = 'all_WebCari_data' else: print(\"Caricature error,", "Version 2.0 (the \"License\"); # you may not use this", "= des_attri if self.dataType == 'train': if self.parse == 'Caricature':", "= 'CariTest' elif self.parse == 'Photo': self.subPath = 'PhotoTest' else:", "'ThickLips', 'ThinLips', 'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows',", "print(\"error: \", attribute, \"is not in this dataset, please write", "dataType, modelType, parse, des_attri=None): self.dir_path = \"/data/jw/dataset/\" + str(parse) self.dataType", "'DiamondFace', 'OvalFace', 'SquareShapeFace', 'NarrowEye', 'SleepyEye', 'SlantEye', 'SharpEye', 'FlabbyEye', 'BigEye', 'SmallEye',", "implied. # See the License for the specific language governing", "'LargeForehead', 'RoundFace', 'DiamondFace', 'OvalFace', 'SquareShapeFace', 'NarrowEye', 'SleepyEye', 'SlantEye', 'SharpEye', 'FlabbyEye',", "= 'PhotoTrain' else: self.subPath = 'WebCariTrain' elif self.dataType == 'val':", "under the Apache License, Version 2.0 (the \"License\"); # you", "getImgNameAndAnnas(self): names = [] annas = [] visuals = []", "self.num_attribute = self.getImgNameAndAnnas() print(parse+\"dataset, images: \", len(self.names), \" type for:", "name.replace('._', '_') name = name.replace('-', '_') name = name +", "attribute not in self.attributes: print(\"error: \", attribute, \"is not in", "'Mustache', 'Goatee', 'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw'] self.names, self.annas, self.visuals, self.num_attribute", "for line in fileVList: visuals.append([int(x) for x in line[1::]]) self.attributes", "by applicable law or agreed to in writing, software #", "'Smile', 'BigMouth', 'SmallMouth', 'ThickLips', 'ThinLips', 'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows',", "'WebCariVal' elif self.dataType == 'test': if self.parse == 'Caricature': self.subPath", "'WebCariTest' elif self.dataType == 'all_data': if self.parse == 'Caricature': self.subPath", "'HighNose', 'FlatNose', 'HookNose', 'WideNose', 'NarrowNose', 'Toothy', 'Smile', 'BigMouth', 'SmallMouth', 'ThickLips',", "<NAME> (<EMAIL>) # Licensed under the Apache License, Version 2.0", "self.attributes: print(\"error: \", attribute, \"is not in this dataset, please", "write a correct attribute in param\") exit(1) for line in", "'val': if self.parse == 'Caricature': self.subPath = 'CariVal' elif self.parse", "type for: \", self.dataType, \" num_attribute: \", self.num_attribute) def getImgNameAndAnnas(self):", "self.attributes = self.attributes num_attribute = len(self.attributes) return names, annas, visuals,", "'Black', 'Youth', 'Middle', 'Old', 'Wrinkle', 'MakeUp', 'Bald', 'LargeForehead', 'RoundFace', 'DiamondFace',", "open((os.path.join(self.dir_path, file_v))).readlines() if self.modelType == 'seperate': num_attribute = 1 attribute", "== 'Photo': self.subPath = 'PhotoTrain' else: self.subPath = 'WebCariTrain' elif", "= 'CariTrain' elif self.parse == 'Photo': self.subPath = 'PhotoTrain' else:", "== 'seperate': num_attribute = 1 attribute = self.des_attri print(\"des_attribute\", attribute)", "self.parse == 'Caricature': self.subPath = 'all_cari_data' elif self.parse == 'Photo':", "line in fileList: names.append(line[0]) attributes = line[1::] index = self.attributes.index(attribute)", "attributes_v = line[1::] index = self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else: for line", "'PhotoTrain' else: self.subPath = 'WebCariTrain' elif self.dataType == 'val': if", "des_attri if self.dataType == 'train': if self.parse == 'Caricature': self.subPath", "'FlatNose', 'HookNose', 'WideNose', 'NarrowNose', 'Toothy', 'Smile', 'BigMouth', 'SmallMouth', 'ThickLips', 'ThinLips',", "= open(os.path.join(self.dir_path, file)).readlines() fileVList = open((os.path.join(self.dir_path, file_v))).readlines() if self.modelType ==", "def getImgNameAndAnnas(self): names = [] annas = [] visuals =", "permissions and # limitations under the License. import os class", "a correct attribute in param\") exit(1) for line in fileList:", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "self.subPath = 'all_WebCari_data' else: print(\"Caricature error, please select a dataType", "line[1::]]) for line in fileVList: visuals.append([int(x) for x in line[1::]])", "'Photo': self.subPath = 'PhotoVal' else: self.subPath = 'WebCariVal' elif self.dataType", "elif self.dataType == 'val': if self.parse == 'Caricature': self.subPath =", "annas, visuals, num_attribute def getPath(self, name): name = name.replace(' ',", "the specific language governing permissions and # limitations under the", "'Goatee', 'Whiskers', 'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw'] self.names, self.annas, self.visuals, self.num_attribute =", "applicable law or agreed to in writing, software # distributed", "else: self.subPath = 'WebCariTest' elif self.dataType == 'all_data': if self.parse", "\", self.num_attribute) def getImgNameAndAnnas(self): names = [] annas = []", "== 'Photo': self.subPath = 'PhotoTest' else: self.subPath = 'WebCariTest' elif", "names.append(line[0]) annas.append([int(x) for x in line[1::]]) for line in fileVList:", "fileList: names.append(line[0]) attributes = line[1::] index = self.attributes.index(attribute) annas.append([int(attributes[index])]) for", "self.subPath = 'WebCariTest' elif self.dataType == 'all_data': if self.parse ==", "in writing, software # distributed under the License is distributed", "num_attribute = len(self.attributes) return names, annas, visuals, num_attribute def getPath(self,", "self.dataType == 'all_data': if self.parse == 'Caricature': self.subPath = 'all_cari_data'", "if attribute not in self.attributes: print(\"error: \", attribute, \"is not", "'_') name = name.replace('-', '_') name = name + \".jpg\"", "elif self.dataType == 'test': if self.parse == 'Caricature': self.subPath =", "self.subPath = 'CariTrain' elif self.parse == 'Photo': self.subPath = 'PhotoTrain'", "attribute) if attribute not in self.attributes: print(\"error: \", attribute, \"is", "== 'Caricature': self.subPath = 'all_cari_data' elif self.parse == 'Photo': self.subPath", "in line[1::]]) for line in fileVList: visuals.append([int(x) for x in", "[] visuals = [] file = self.subPath+\".txt\" file_v = self.subPath+\"_V.txt\"", "License. import os class WebCariA: def __init__(self, dataType, modelType, parse,", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "= self.attributes.index(attribute) visuals.append([int(attributes_v[index])]) else: for line in fileList: names.append(line[0]) annas.append([int(x)", "# You may obtain a copy of the License at", "str(parse) self.dataType = dataType self.parse = parse self.des_attri = des_attri", "== 'Caricature': self.subPath = 'CariTrain' elif self.parse == 'Photo': self.subPath", "= modelType self.dir_path = os.path.join(self.dir_path, self.subPath) self.attributes = ['Women', 'Asian',", "self.num_attribute) def getImgNameAndAnnas(self): names = [] annas = [] visuals", "'UnderEyePuffiness', 'BigNose', 'SmallNose', 'HighNose', 'FlatNose', 'HookNose', 'WideNose', 'NarrowNose', 'Toothy', 'Smile',", "self.visuals, self.num_attribute = self.getImgNameAndAnnas() print(parse+\"dataset, images: \", len(self.names), \" type", "attribute in param\") exit(1) for line in fileList: names.append(line[0]) attributes", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= parse self.des_attri = des_attri if self.dataType == 'train': if", "num_attribute: \", self.num_attribute) def getImgNameAndAnnas(self): names = [] annas =", "self.subPath = 'CariTest' elif self.parse == 'Photo': self.subPath = 'PhotoTest'", "'all_photo_data' else: self.subPath = 'all_WebCari_data' else: print(\"Caricature error, please select", "return names, annas, visuals, num_attribute def getPath(self, name): name =", "the License for the specific language governing permissions and #", "= 'CariVal' elif self.parse == 'Photo': self.subPath = 'PhotoVal' else:", "name = name.replace('._', '_') name = name.replace('-', '_') name =", "Apache License, Version 2.0 (the \"License\"); # you may not", "'ThinLips', 'DoubleChin', 'ArchedEyebrows', 'FlatEyebrow', 'SlantedEyebrows', 'UpsideDownSlantedEyebrows', 'BushyEyebrows', 'ThickEyebrows', 'ThinEyebrows', 'Mustache',", "'SmallEye', 'UnderEyePuffiness', 'BigNose', 'SmallNose', 'HighNose', 'FlatNose', 'HookNose', 'WideNose', 'NarrowNose', 'Toothy',", "either express or implied. # See the License for the", "for x in line[1::]]) self.attributes = self.attributes num_attribute = len(self.attributes)", "else: print(\"Caricature error, please select a dataType from: train, val,", "', '_') name = name.replace('._', '_') name = name.replace('-', '_')", "['Women', 'Asian', 'White', 'Black', 'Youth', 'Middle', 'Old', 'Wrinkle', 'MakeUp', 'Bald',", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "'Caricature': self.subPath = 'CariTrain' elif self.parse == 'Photo': self.subPath =", "'OtherBeard&NoBeard', 'HighCheekbones', 'SquareJaw'] self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas() print(parse+\"dataset,", "= [] annas = [] visuals = [] file =", "len(self.attributes) return names, annas, visuals, num_attribute def getPath(self, name): name", "self.subPath = 'PhotoVal' else: self.subPath = 'WebCariVal' elif self.dataType ==", "= ['Women', 'Asian', 'White', 'Black', 'Youth', 'Middle', 'Old', 'Wrinkle', 'MakeUp',", "'WebCariTrain' elif self.dataType == 'val': if self.parse == 'Caricature': self.subPath", "please write a correct attribute in param\") exit(1) for line", "'all_WebCari_data' else: print(\"Caricature error, please select a dataType from: train,", "self.subPath = 'WebCariTrain' elif self.dataType == 'val': if self.parse ==", "names, annas, visuals, num_attribute def getPath(self, name): name = name.replace('", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "file_v = self.subPath+\"_V.txt\" fileList = open(os.path.join(self.dir_path, file)).readlines() fileVList = open((os.path.join(self.dir_path,", "else: self.subPath = 'all_WebCari_data' else: print(\"Caricature error, please select a", "self.subPath) self.attributes = ['Women', 'Asian', 'White', 'Black', 'Youth', 'Middle', 'Old',", "'Photo': self.subPath = 'PhotoTest' else: self.subPath = 'WebCariTest' elif self.dataType", "parse self.des_attri = des_attri if self.dataType == 'train': if self.parse", "print(parse+\"dataset, images: \", len(self.names), \" type for: \", self.dataType, \"", "des_attri=None): self.dir_path = \"/data/jw/dataset/\" + str(parse) self.dataType = dataType self.parse", "file_v))).readlines() if self.modelType == 'seperate': num_attribute = 1 attribute =", "import os class WebCariA: def __init__(self, dataType, modelType, parse, des_attri=None):", "exit(1) self.modelType = modelType self.dir_path = os.path.join(self.dir_path, self.subPath) self.attributes =", "elif self.parse == 'Photo': self.subPath = 'PhotoTest' else: self.subPath =", "\"License\"); # you may not use this file except in", "'RoundFace', 'DiamondFace', 'OvalFace', 'SquareShapeFace', 'NarrowEye', 'SleepyEye', 'SlantEye', 'SharpEye', 'FlabbyEye', 'BigEye',", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "line in fileVList: attributes_v = line[1::] index = self.attributes.index(attribute) visuals.append([int(attributes_v[index])])", "names = [] annas = [] visuals = [] file", "= open((os.path.join(self.dir_path, file_v))).readlines() if self.modelType == 'seperate': num_attribute = 1", "else: self.subPath = 'WebCariTrain' elif self.dataType == 'val': if self.parse", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "'White', 'Black', 'Youth', 'Middle', 'Old', 'Wrinkle', 'MakeUp', 'Bald', 'LargeForehead', 'RoundFace',", "= 'all_cari_data' elif self.parse == 'Photo': self.subPath = 'all_photo_data' else:", "'Old', 'Wrinkle', 'MakeUp', 'Bald', 'LargeForehead', 'RoundFace', 'DiamondFace', 'OvalFace', 'SquareShapeFace', 'NarrowEye',", "self.parse == 'Photo': self.subPath = 'PhotoVal' else: self.subPath = 'WebCariVal'", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "visuals, num_attribute def getPath(self, name): name = name.replace(' ', '_')", "attributes = line[1::] index = self.attributes.index(attribute) annas.append([int(attributes[index])]) for line in", "if self.modelType == 'seperate': num_attribute = 1 attribute = self.des_attri", "def getPath(self, name): name = name.replace(' ', '_') name =", "'Caricature': self.subPath = 'CariTest' elif self.parse == 'Photo': self.subPath =", "x in line[1::]]) for line in fileVList: visuals.append([int(x) for x", "You may obtain a copy of the License at #", "name): name = name.replace(' ', '_') name = name.replace('._', '_')", "if self.parse == 'Caricature': self.subPath = 'CariTrain' elif self.parse ==", "= name.replace('._', '_') name = name.replace('-', '_') name = name", "select a dataType from: train, val, github\") exit(1) self.modelType =", "\", attribute, \"is not in this dataset, please write a", "self.subPath = 'PhotoTrain' else: self.subPath = 'WebCariTrain' elif self.dataType ==", "the Apache License, Version 2.0 (the \"License\"); # you may", "under the License. import os class WebCariA: def __init__(self, dataType,", "'Caricature': self.subPath = 'all_cari_data' elif self.parse == 'Photo': self.subPath =", "[] file = self.subPath+\".txt\" file_v = self.subPath+\"_V.txt\" fileList = open(os.path.join(self.dir_path,", "name = name.replace(' ', '_') name = name.replace('._', '_') name" ]
[ "\"test-state.json\" # How many second ago we saved the JSON", "recorded any transfers in this transaction # (One transaction may", "if block_when is not None: transfer[\"timestamp\"] = block_when.isoformat() # Create", "potentially reorganised blocks from the scan data.\"\"\" for block_num in", "the JSON file self.last_save = 0 def reset(self): \"\"\"Create initial", "we saved the JSON file self.last_save = 0 def reset(self):", "far in a file.\"\"\" with open(self.fname, \"wt\") as f: json.dump(self.state,", "block_number # Save the database file for every minute if", "starting from scratch\") self.reset() def save(self): \"\"\"Save everything we have", "self.state[\"blocks\"]: self.state[\"blocks\"][block_number] = {} block = self.state[\"blocks\"][block_number] if txhash not", "get_last_scanned_block(self): \"\"\"The number of the last block we have stored.\"\"\"", "# \"Transfer\" log_index = event.logIndex # Log index within the", "{self.state['last_scanned_block']} blocks have been scanned\" ) except (IOError, json.decoder.JSONDecodeError): print(\"State", "How many second ago we saved the JSON file self.last_save", "entry that contains all events by a log index self.state[\"blocks\"][block_number][txhash]", "self.last_save > 60: self.save() def process_event( self, block_when: Optional[datetime.datetime], event:", "the case of a crash or CTRL+C\"\"\" # Next time", "methods implemented below # def get_last_scanned_block(self): \"\"\"The number of the", "in range(since_block, self.get_last_scanned_block()): if block_num in self.state[\"blocks\"]: del self.state[\"blocks\"][block_num] def", "print(\"State starting from scratch\") self.reset() def save(self): \"\"\"Save everything we", "= 0 def reset(self): \"\"\"Create initial state of nothing scanned.\"\"\"", "if time.time() - self.last_save > 60: self.save() def process_event( self,", "the last block we have stored.\"\"\" return self.state[\"last_scanned_block\"] def delete_data(self,", "f: json.dump(self.state, f) self.last_save = time.time() # # EventScannerState methods", "transaction_index = event.transactionIndex # Transaction index within the block txhash", "in self.state[\"blocks\"]: self.state[\"blocks\"][block_number] = {} block = self.state[\"blocks\"][block_number] if txhash", "block = self.state[\"blocks\"][block_number] if txhash not in block: # We", "we have scanned so far in a file.\"\"\" with open(self.fname,", "event_name = event.event # \"Transfer\" log_index = event.logIndex # Log", "in the case of a crash or CTRL+C\"\"\" # Next", "time the scanner is started we will resume from this", "smart contract). # Create a tx entry that contains all", "\"\"\"Remove potentially reorganised blocks from the scan data.\"\"\" for block_num", "the state, previously {self.state['last_scanned_block']} blocks have been scanned\" ) except", "events if executed by a smart contract). # Create a", "time.time() - self.last_save > 60: self.save() def process_event( self, block_when:", "scratch\") self.reset() def save(self): \"\"\"Save everything we have scanned so", "} if block_when is not None: transfer[\"timestamp\"] = block_when.isoformat() #", "\"rt\")) print( f\"Restored the state, previously {self.state['last_scanned_block']} blocks have been", "transaction hash and log index # One transaction may contain", "that contains all transactions by txhash if block_number not in", "number of the last block we have stored.\"\"\" return self.state[\"last_scanned_block\"]", "def start_chunk(self, block_number, chunk_size): pass def end_chunk(self, block_number): \"\"\"Save at", "import json import time from typing import Optional from web3.datastructures", "block_num in self.state[\"blocks\"]: del self.state[\"blocks\"][block_num] def start_chunk(self, block_number, chunk_size): pass", "events by a log index self.state[\"blocks\"][block_number][txhash] = {} # Record", "contains all transactions by txhash if block_number not in self.state[\"blocks\"]:", "have stored.\"\"\" return self.state[\"last_scanned_block\"] def delete_data(self, since_block): \"\"\"Remove potentially reorganised", "def end_chunk(self, block_number): \"\"\"Save at the end of each block,", "not in self.state[\"blocks\"]: self.state[\"blocks\"][block_number] = {} block = self.state[\"blocks\"][block_number] if", "of those gets their own log index # event_name =", "last scan state from a file.\"\"\" try: self.state = json.load(open(self.fname,", "\"\"\"Restore the last scan state from a file.\"\"\" try: self.state", "block_when is not None: transfer[\"timestamp\"] = block_when.isoformat() # Create empty", "database self.state[\"blocks\"][block_number][txhash][log_index] = transfer # Return a pointer that allows", "allows us to look up this event later if needed", "# Create empty dict as the block that contains all", "state, previously {self.state['last_scanned_block']} blocks have been scanned\" ) except (IOError,", "= event.transactionHash.hex() # Transaction hash block_number = event.blockNumber # Convert", "start up. \"\"\" def __init__(self): self.state = None self.fname =", "# event_name = event.event # \"Transfer\" log_index = event.logIndex #", "All state is an in-memory dict. Simple load/store massive JSON", "the block # transaction_index = event.transactionIndex # Transaction index within", "= self.state[\"blocks\"][block_number] if txhash not in block: # We have", "self.state[\"blocks\"][block_num] def start_chunk(self, block_number, chunk_size): pass def end_chunk(self, block_number): \"\"\"Save", "file.\"\"\" try: self.state = json.load(open(self.fname, \"rt\")) print( f\"Restored the state,", "def process_event( self, block_when: Optional[datetime.datetime], event: AttributeDict ) -> str:", "60: self.save() def process_event( self, block_when: Optional[datetime.datetime], event: AttributeDict )", "stored.\"\"\" return self.state[\"last_scanned_block\"] def delete_data(self, since_block): \"\"\"Remove potentially reorganised blocks", "log index # event_name = event.event # \"Transfer\" log_index =", "= { \"from\": args[\"from\"], \"to\": args.to, \"value\": args.value, } if", "scanned.\"\"\" self.state = { \"last_scanned_block\": 0, \"blocks\": {}, } def", "contain multiple events if executed by a smart contract). #", "chunk_size): pass def end_chunk(self, block_number): \"\"\"Save at the end of", "ago we saved the JSON file self.last_save = 0 def", "# Return a pointer that allows us to look up", "0 def reset(self): \"\"\"Create initial state of nothing scanned.\"\"\" self.state", "We have not yet recorded any transfers in this transaction", "within the block txhash = event.transactionHash.hex() # Transaction hash block_number", "return self.state[\"last_scanned_block\"] def delete_data(self, since_block): \"\"\"Remove potentially reorganised blocks from", "one of those gets their own log index # event_name", "json.load(open(self.fname, \"rt\")) print( f\"Restored the state, previously {self.state['last_scanned_block']} blocks have", "event to our internal format args = event[\"args\"] transfer =", "Next time the scanner is started we will resume from", "transfers in this transaction # (One transaction may contain multiple", "process_event( self, block_when: Optional[datetime.datetime], event: AttributeDict ) -> str: \"\"\"Record", "\"Transfer\" log_index = event.logIndex # Log index within the block", "database file for every minute if time.time() - self.last_save >", "that contains all events by a log index self.state[\"blocks\"][block_number][txhash] =", "all events. All state is an in-memory dict. Simple load/store", "= { \"last_scanned_block\": 0, \"blocks\": {}, } def restore(self): \"\"\"Restore", "Record ERC-20 transfer in our database self.state[\"blocks\"][block_number][txhash][log_index] = transfer #", "a crash or CTRL+C\"\"\" # Next time the scanner is", "AttributeDict ) -> str: \"\"\"Record a ERC-20 transfer in our", "= None self.fname = \"test-state.json\" # How many second ago", "{} # Record ERC-20 transfer in our database self.state[\"blocks\"][block_number][txhash][log_index] =", "their transaction hash and log index # One transaction may", "datetime import json import time from typing import Optional from", "are keyed by their transaction hash and log index #", "block: # We have not yet recorded any transfers in", "to our internal format args = event[\"args\"] transfer = {", "restore(self): \"\"\"Restore the last scan state from a file.\"\"\" try:", "# Log index within the block # transaction_index = event.transactionIndex", "previously {self.state['last_scanned_block']} blocks have been scanned\" ) except (IOError, json.decoder.JSONDecodeError):", "every minute if time.time() - self.last_save > 60: self.save() def", "by txhash if block_number not in self.state[\"blocks\"]: self.state[\"blocks\"][block_number] = {}", "import time from typing import Optional from web3.datastructures import AttributeDict", "Optional[datetime.datetime], event: AttributeDict ) -> str: \"\"\"Record a ERC-20 transfer", "yet recorded any transfers in this transaction # (One transaction", "we will resume from this block self.state[\"last_scanned_block\"] = block_number #", "of the last block we have stored.\"\"\" return self.state[\"last_scanned_block\"] def", ".event_scanner_state import EventScannerState class JSONifiedState(EventScannerState): \"\"\"Store the state of scanned", "CTRL+C\"\"\" # Next time the scanner is started we will", "block that contains all transactions by txhash if block_number not", "self.state[\"blocks\"][block_number] if txhash not in block: # We have not", "import AttributeDict from .event_scanner_state import EventScannerState class JSONifiedState(EventScannerState): \"\"\"Store the", "we can resume in the case of a crash or", "in our database.\"\"\" # Events are keyed by their transaction", "a file.\"\"\" with open(self.fname, \"wt\") as f: json.dump(self.state, f) self.last_save", "\"to\": args.to, \"value\": args.value, } if block_when is not None:", "= transfer # Return a pointer that allows us to", "= block_when.isoformat() # Create empty dict as the block that", "json.decoder.JSONDecodeError): print(\"State starting from scratch\") self.reset() def save(self): \"\"\"Save everything", "reset(self): \"\"\"Create initial state of nothing scanned.\"\"\" self.state = {", "# (One transaction may contain multiple events if executed by", "dict. Simple load/store massive JSON on start up. \"\"\" def", "from a file.\"\"\" try: self.state = json.load(open(self.fname, \"rt\")) print( f\"Restored", "def reset(self): \"\"\"Create initial state of nothing scanned.\"\"\" self.state =", "many second ago we saved the JSON file self.last_save =", "a file.\"\"\" try: self.state = json.load(open(self.fname, \"rt\")) print( f\"Restored the", "started we will resume from this block self.state[\"last_scanned_block\"] = block_number", "range(since_block, self.get_last_scanned_block()): if block_num in self.state[\"blocks\"]: del self.state[\"blocks\"][block_num] def start_chunk(self,", "database.\"\"\" # Events are keyed by their transaction hash and", "self.save() def process_event( self, block_when: Optional[datetime.datetime], event: AttributeDict ) ->", "def delete_data(self, since_block): \"\"\"Remove potentially reorganised blocks from the scan", "# and each one of those gets their own log", "the database file for every minute if time.time() - self.last_save", "# # EventScannerState methods implemented below # def get_last_scanned_block(self): \"\"\"The", "if block_num in self.state[\"blocks\"]: del self.state[\"blocks\"][block_num] def start_chunk(self, block_number, chunk_size):", "hash block_number = event.blockNumber # Convert ERC-20 Transfer event to", "Log index within the block # transaction_index = event.transactionIndex #", "index within the block txhash = event.transactionHash.hex() # Transaction hash", "blocks and all events. All state is an in-memory dict.", "transaction # (One transaction may contain multiple events if executed", "scanned blocks and all events. All state is an in-memory", "our database.\"\"\" # Events are keyed by their transaction hash", "as the block that contains all transactions by txhash if", "= json.load(open(self.fname, \"rt\")) print( f\"Restored the state, previously {self.state['last_scanned_block']} blocks", "in block: # We have not yet recorded any transfers", "data.\"\"\" for block_num in range(since_block, self.get_last_scanned_block()): if block_num in self.state[\"blocks\"]:", "EventScannerState class JSONifiedState(EventScannerState): \"\"\"Store the state of scanned blocks and", "self.state[\"blocks\"]: del self.state[\"blocks\"][block_num] def start_chunk(self, block_number, chunk_size): pass def end_chunk(self,", "contract). # Create a tx entry that contains all events", "state from a file.\"\"\" try: self.state = json.load(open(self.fname, \"rt\")) print(", "JSON file self.last_save = 0 def reset(self): \"\"\"Create initial state", "print( f\"Restored the state, previously {self.state['last_scanned_block']} blocks have been scanned\"", "crash or CTRL+C\"\"\" # Next time the scanner is started", "any transfers in this transaction # (One transaction may contain", "event[\"args\"] transfer = { \"from\": args[\"from\"], \"to\": args.to, \"value\": args.value,", "\"\"\"Save at the end of each block, so we can", "gets their own log index # event_name = event.event #", "delete_data(self, since_block): \"\"\"Remove potentially reorganised blocks from the scan data.\"\"\"", "-> str: \"\"\"Record a ERC-20 transfer in our database.\"\"\" #", "None: transfer[\"timestamp\"] = block_when.isoformat() # Create empty dict as the", "not yet recorded any transfers in this transaction # (One", "nothing scanned.\"\"\" self.state = { \"last_scanned_block\": 0, \"blocks\": {}, }", "event.blockNumber # Convert ERC-20 Transfer event to our internal format", ") -> str: \"\"\"Record a ERC-20 transfer in our database.\"\"\"", "import Optional from web3.datastructures import AttributeDict from .event_scanner_state import EventScannerState", "for block_num in range(since_block, self.get_last_scanned_block()): if block_num in self.state[\"blocks\"]: del", "end_chunk(self, block_number): \"\"\"Save at the end of each block, so", "(IOError, json.decoder.JSONDecodeError): print(\"State starting from scratch\") self.reset() def save(self): \"\"\"Save", "within the block # transaction_index = event.transactionIndex # Transaction index", "of scanned blocks and all events. All state is an", "= event.blockNumber # Convert ERC-20 Transfer event to our internal", "block we have stored.\"\"\" return self.state[\"last_scanned_block\"] def delete_data(self, since_block): \"\"\"Remove", "executed by a smart contract). # Create a tx entry", "{ \"last_scanned_block\": 0, \"blocks\": {}, } def restore(self): \"\"\"Restore the", "state is an in-memory dict. Simple load/store massive JSON on", "format args = event[\"args\"] transfer = { \"from\": args[\"from\"], \"to\":", "args.value, } if block_when is not None: transfer[\"timestamp\"] = block_when.isoformat()", "or CTRL+C\"\"\" # Next time the scanner is started we", "to look up this event later if needed return f\"{block_number}-{txhash}-{log_index}\"", "initial state of nothing scanned.\"\"\" self.state = { \"last_scanned_block\": 0,", "str: \"\"\"Record a ERC-20 transfer in our database.\"\"\" # Events", "contains all events by a log index self.state[\"blocks\"][block_number][txhash] = {}", "is started we will resume from this block self.state[\"last_scanned_block\"] =", "= event.logIndex # Log index within the block # transaction_index", "# def get_last_scanned_block(self): \"\"\"The number of the last block we", "\"\"\"Save everything we have scanned so far in a file.\"\"\"", "web3.datastructures import AttributeDict from .event_scanner_state import EventScannerState class JSONifiedState(EventScannerState): \"\"\"Store", "Simple load/store massive JSON on start up. \"\"\" def __init__(self):", "own log index # event_name = event.event # \"Transfer\" log_index", "def restore(self): \"\"\"Restore the last scan state from a file.\"\"\"", "implemented below # def get_last_scanned_block(self): \"\"\"The number of the last", "the block txhash = event.transactionHash.hex() # Transaction hash block_number =", "and log index # One transaction may contain multiple events", "the end of each block, so we can resume in", "= time.time() # # EventScannerState methods implemented below # def", "scanned so far in a file.\"\"\" with open(self.fname, \"wt\") as", "our internal format args = event[\"args\"] transfer = { \"from\":", "log index # One transaction may contain multiple events #", "txhash = event.transactionHash.hex() # Transaction hash block_number = event.blockNumber #", "self.state[\"blocks\"][block_number][txhash] = {} # Record ERC-20 transfer in our database", "from this block self.state[\"last_scanned_block\"] = block_number # Save the database", "the block that contains all transactions by txhash if block_number", "ERC-20 transfer in our database self.state[\"blocks\"][block_number][txhash][log_index] = transfer # Return", "transaction may contain multiple events # and each one of", "transfer[\"timestamp\"] = block_when.isoformat() # Create empty dict as the block", ") except (IOError, json.decoder.JSONDecodeError): print(\"State starting from scratch\") self.reset() def", "from scratch\") self.reset() def save(self): \"\"\"Save everything we have scanned", "txhash not in block: # We have not yet recorded", "(One transaction may contain multiple events if executed by a", "may contain multiple events # and each one of those", "del self.state[\"blocks\"][block_num] def start_chunk(self, block_number, chunk_size): pass def end_chunk(self, block_number):", "# One transaction may contain multiple events # and each", "# transaction_index = event.transactionIndex # Transaction index within the block", "time from typing import Optional from web3.datastructures import AttributeDict from", "resume in the case of a crash or CTRL+C\"\"\" #", "state of scanned blocks and all events. All state is", "log index self.state[\"blocks\"][block_number][txhash] = {} # Record ERC-20 transfer in", "event.event # \"Transfer\" log_index = event.logIndex # Log index within", "from web3.datastructures import AttributeDict from .event_scanner_state import EventScannerState class JSONifiedState(EventScannerState):", "events. All state is an in-memory dict. Simple load/store massive", "\"\"\"Create initial state of nothing scanned.\"\"\" self.state = { \"last_scanned_block\":", "up. \"\"\" def __init__(self): self.state = None self.fname = \"test-state.json\"", "# Create a tx entry that contains all events by", "{} block = self.state[\"blocks\"][block_number] if txhash not in block: #", "\"\"\"Record a ERC-20 transfer in our database.\"\"\" # Events are", "file.\"\"\" with open(self.fname, \"wt\") as f: json.dump(self.state, f) self.last_save =", "self.state = { \"last_scanned_block\": 0, \"blocks\": {}, } def restore(self):", "on start up. \"\"\" def __init__(self): self.state = None self.fname", "{}, } def restore(self): \"\"\"Restore the last scan state from", "# EventScannerState methods implemented below # def get_last_scanned_block(self): \"\"\"The number", "minute if time.time() - self.last_save > 60: self.save() def process_event(", "can resume in the case of a crash or CTRL+C\"\"\"", "us to look up this event later if needed return", "f\"Restored the state, previously {self.state['last_scanned_block']} blocks have been scanned\" )", "block_number not in self.state[\"blocks\"]: self.state[\"blocks\"][block_number] = {} block = self.state[\"blocks\"][block_number]", "if block_number not in self.state[\"blocks\"]: self.state[\"blocks\"][block_number] = {} block =", "self, block_when: Optional[datetime.datetime], event: AttributeDict ) -> str: \"\"\"Record a", "transfer in our database.\"\"\" # Events are keyed by their", "# How many second ago we saved the JSON file", "One transaction may contain multiple events # and each one", "open(self.fname, \"wt\") as f: json.dump(self.state, f) self.last_save = time.time() #", "file for every minute if time.time() - self.last_save > 60:", "scanner is started we will resume from this block self.state[\"last_scanned_block\"]", "= event.transactionIndex # Transaction index within the block txhash =", "= \"test-state.json\" # How many second ago we saved the", "\"blocks\": {}, } def restore(self): \"\"\"Restore the last scan state", "so far in a file.\"\"\" with open(self.fname, \"wt\") as f:", "hash and log index # One transaction may contain multiple", "state of nothing scanned.\"\"\" self.state = { \"last_scanned_block\": 0, \"blocks\":", "self.state = json.load(open(self.fname, \"rt\")) print( f\"Restored the state, previously {self.state['last_scanned_block']}", "a pointer that allows us to look up this event", "block_number = event.blockNumber # Convert ERC-20 Transfer event to our", "# Transaction index within the block txhash = event.transactionHash.hex() #", "in self.state[\"blocks\"]: del self.state[\"blocks\"][block_num] def start_chunk(self, block_number, chunk_size): pass def", "Transfer event to our internal format args = event[\"args\"] transfer", "tx entry that contains all events by a log index", "so we can resume in the case of a crash", "Convert ERC-20 Transfer event to our internal format args =", "scan state from a file.\"\"\" try: self.state = json.load(open(self.fname, \"rt\"))", "\"wt\") as f: json.dump(self.state, f) self.last_save = time.time() # #", "by a smart contract). # Create a tx entry that", "the scanner is started we will resume from this block", "block_number): \"\"\"Save at the end of each block, so we", "self.fname = \"test-state.json\" # How many second ago we saved", "index # event_name = event.event # \"Transfer\" log_index = event.logIndex", "- self.last_save > 60: self.save() def process_event( self, block_when: Optional[datetime.datetime],", "Create empty dict as the block that contains all transactions", "each block, so we can resume in the case of", "import EventScannerState class JSONifiedState(EventScannerState): \"\"\"Store the state of scanned blocks", "of each block, so we can resume in the case", "pointer that allows us to look up this event later", "ERC-20 Transfer event to our internal format args = event[\"args\"]", "reorganised blocks from the scan data.\"\"\" for block_num in range(since_block,", "index self.state[\"blocks\"][block_number][txhash] = {} # Record ERC-20 transfer in our", "since_block): \"\"\"Remove potentially reorganised blocks from the scan data.\"\"\" for", "json import time from typing import Optional from web3.datastructures import", "dict as the block that contains all transactions by txhash", "self.state = None self.fname = \"test-state.json\" # How many second", "below # def get_last_scanned_block(self): \"\"\"The number of the last block", "= event.event # \"Transfer\" log_index = event.logIndex # Log index", "self.last_save = 0 def reset(self): \"\"\"Create initial state of nothing", "by their transaction hash and log index # One transaction", "start_chunk(self, block_number, chunk_size): pass def end_chunk(self, block_number): \"\"\"Save at the", "at the end of each block, so we can resume", "those gets their own log index # event_name = event.event", "all transactions by txhash if block_number not in self.state[\"blocks\"]: self.state[\"blocks\"][block_number]", "keyed by their transaction hash and log index # One", "f) self.last_save = time.time() # # EventScannerState methods implemented below", "index # One transaction may contain multiple events # and", "in a file.\"\"\" with open(self.fname, \"wt\") as f: json.dump(self.state, f)", "from the scan data.\"\"\" for block_num in range(since_block, self.get_last_scanned_block()): if", "pass def end_chunk(self, block_number): \"\"\"Save at the end of each", "class JSONifiedState(EventScannerState): \"\"\"Store the state of scanned blocks and all", "\"from\": args[\"from\"], \"to\": args.to, \"value\": args.value, } if block_when is", "case of a crash or CTRL+C\"\"\" # Next time the", "= {} # Record ERC-20 transfer in our database self.state[\"blocks\"][block_number][txhash][log_index]", "transfer # Return a pointer that allows us to look", "have been scanned\" ) except (IOError, json.decoder.JSONDecodeError): print(\"State starting from", "Events are keyed by their transaction hash and log index", "args = event[\"args\"] transfer = { \"from\": args[\"from\"], \"to\": args.to,", "self.reset() def save(self): \"\"\"Save everything we have scanned so far", "transactions by txhash if block_number not in self.state[\"blocks\"]: self.state[\"blocks\"][block_number] =", "= block_number # Save the database file for every minute", "from typing import Optional from web3.datastructures import AttributeDict from .event_scanner_state", "may contain multiple events if executed by a smart contract).", "event.transactionHash.hex() # Transaction hash block_number = event.blockNumber # Convert ERC-20", "Save the database file for every minute if time.time() -", "block_num in range(since_block, self.get_last_scanned_block()): if block_num in self.state[\"blocks\"]: del self.state[\"blocks\"][block_num]", "that allows us to look up this event later if", "will resume from this block self.state[\"last_scanned_block\"] = block_number # Save", "scanned\" ) except (IOError, json.decoder.JSONDecodeError): print(\"State starting from scratch\") self.reset()", "the scan data.\"\"\" for block_num in range(since_block, self.get_last_scanned_block()): if block_num", "our database self.state[\"blocks\"][block_number][txhash][log_index] = transfer # Return a pointer that", "args[\"from\"], \"to\": args.to, \"value\": args.value, } if block_when is not", "not in block: # We have not yet recorded any", "and each one of those gets their own log index", "txhash if block_number not in self.state[\"blocks\"]: self.state[\"blocks\"][block_number] = {} block", "in this transaction # (One transaction may contain multiple events", "their own log index # event_name = event.event # \"Transfer\"", "multiple events # and each one of those gets their", "last block we have stored.\"\"\" return self.state[\"last_scanned_block\"] def delete_data(self, since_block):", "import datetime import json import time from typing import Optional", "as f: json.dump(self.state, f) self.last_save = time.time() # # EventScannerState", "this transaction # (One transaction may contain multiple events if", "and all events. All state is an in-memory dict. Simple", "def __init__(self): self.state = None self.fname = \"test-state.json\" # How", "JSON on start up. \"\"\" def __init__(self): self.state = None", "= {} block = self.state[\"blocks\"][block_number] if txhash not in block:", "if txhash not in block: # We have not yet", "from .event_scanner_state import EventScannerState class JSONifiedState(EventScannerState): \"\"\"Store the state of", "EventScannerState methods implemented below # def get_last_scanned_block(self): \"\"\"The number of", "Transaction hash block_number = event.blockNumber # Convert ERC-20 Transfer event", "scan data.\"\"\" for block_num in range(since_block, self.get_last_scanned_block()): if block_num in", "# Events are keyed by their transaction hash and log", "a tx entry that contains all events by a log", "in-memory dict. Simple load/store massive JSON on start up. \"\"\"", "# Convert ERC-20 Transfer event to our internal format args", "have scanned so far in a file.\"\"\" with open(self.fname, \"wt\")", "\"\"\"The number of the last block we have stored.\"\"\" return", "def save(self): \"\"\"Save everything we have scanned so far in", "an in-memory dict. Simple load/store massive JSON on start up.", "= event[\"args\"] transfer = { \"from\": args[\"from\"], \"to\": args.to, \"value\":", "# Record ERC-20 transfer in our database self.state[\"blocks\"][block_number][txhash][log_index] = transfer", "massive JSON on start up. \"\"\" def __init__(self): self.state =", "events # and each one of those gets their own", "\"last_scanned_block\": 0, \"blocks\": {}, } def restore(self): \"\"\"Restore the last", "transfer = { \"from\": args[\"from\"], \"to\": args.to, \"value\": args.value, }", "time.time() # # EventScannerState methods implemented below # def get_last_scanned_block(self):", "None self.fname = \"test-state.json\" # How many second ago we", "everything we have scanned so far in a file.\"\"\" with", "of a crash or CTRL+C\"\"\" # Next time the scanner", "# Save the database file for every minute if time.time()", "save(self): \"\"\"Save everything we have scanned so far in a", "args.to, \"value\": args.value, } if block_when is not None: transfer[\"timestamp\"]", "self.state[\"blocks\"][block_number][txhash][log_index] = transfer # Return a pointer that allows us", "with open(self.fname, \"wt\") as f: json.dump(self.state, f) self.last_save = time.time()", "def get_last_scanned_block(self): \"\"\"The number of the last block we have", "saved the JSON file self.last_save = 0 def reset(self): \"\"\"Create", "block_when.isoformat() # Create empty dict as the block that contains", "<reponame>zomglings/moonworm<gh_stars>1-10 import datetime import json import time from typing import", "event: AttributeDict ) -> str: \"\"\"Record a ERC-20 transfer in", "multiple events if executed by a smart contract). # Create", "block txhash = event.transactionHash.hex() # Transaction hash block_number = event.blockNumber", "by a log index self.state[\"blocks\"][block_number][txhash] = {} # Record ERC-20", "self.state[\"blocks\"][block_number] = {} block = self.state[\"blocks\"][block_number] if txhash not in", "0, \"blocks\": {}, } def restore(self): \"\"\"Restore the last scan", "try: self.state = json.load(open(self.fname, \"rt\")) print( f\"Restored the state, previously", "been scanned\" ) except (IOError, json.decoder.JSONDecodeError): print(\"State starting from scratch\")", "Create a tx entry that contains all events by a", "{ \"from\": args[\"from\"], \"to\": args.to, \"value\": args.value, } if block_when", "in our database self.state[\"blocks\"][block_number][txhash][log_index] = transfer # Return a pointer", "blocks have been scanned\" ) except (IOError, json.decoder.JSONDecodeError): print(\"State starting", "load/store massive JSON on start up. \"\"\" def __init__(self): self.state", "if executed by a smart contract). # Create a tx", "this block self.state[\"last_scanned_block\"] = block_number # Save the database file", "json.dump(self.state, f) self.last_save = time.time() # # EventScannerState methods implemented", "self.state[\"last_scanned_block\"] = block_number # Save the database file for every", "block_number, chunk_size): pass def end_chunk(self, block_number): \"\"\"Save at the end", "not None: transfer[\"timestamp\"] = block_when.isoformat() # Create empty dict as", "block_when: Optional[datetime.datetime], event: AttributeDict ) -> str: \"\"\"Record a ERC-20", "log_index = event.logIndex # Log index within the block #", "AttributeDict from .event_scanner_state import EventScannerState class JSONifiedState(EventScannerState): \"\"\"Store the state", "a smart contract). # Create a tx entry that contains", "self.state[\"last_scanned_block\"] def delete_data(self, since_block): \"\"\"Remove potentially reorganised blocks from the", "typing import Optional from web3.datastructures import AttributeDict from .event_scanner_state import", "# Next time the scanner is started we will resume", "contain multiple events # and each one of those gets", "the last scan state from a file.\"\"\" try: self.state =", "\"value\": args.value, } if block_when is not None: transfer[\"timestamp\"] =", "each one of those gets their own log index #", "a log index self.state[\"blocks\"][block_number][txhash] = {} # Record ERC-20 transfer", "transfer in our database self.state[\"blocks\"][block_number][txhash][log_index] = transfer # Return a", "event.transactionIndex # Transaction index within the block txhash = event.transactionHash.hex()", "\"\"\" def __init__(self): self.state = None self.fname = \"test-state.json\" #", "Transaction index within the block txhash = event.transactionHash.hex() # Transaction", "file self.last_save = 0 def reset(self): \"\"\"Create initial state of", "for every minute if time.time() - self.last_save > 60: self.save()", "a ERC-20 transfer in our database.\"\"\" # Events are keyed", "end of each block, so we can resume in the", "resume from this block self.state[\"last_scanned_block\"] = block_number # Save the", "is not None: transfer[\"timestamp\"] = block_when.isoformat() # Create empty dict", "blocks from the scan data.\"\"\" for block_num in range(since_block, self.get_last_scanned_block()):", "self.last_save = time.time() # # EventScannerState methods implemented below #", "ERC-20 transfer in our database.\"\"\" # Events are keyed by", "self.get_last_scanned_block()): if block_num in self.state[\"blocks\"]: del self.state[\"blocks\"][block_num] def start_chunk(self, block_number,", "of nothing scanned.\"\"\" self.state = { \"last_scanned_block\": 0, \"blocks\": {},", "is an in-memory dict. Simple load/store massive JSON on start", "block, so we can resume in the case of a", "JSONifiedState(EventScannerState): \"\"\"Store the state of scanned blocks and all events.", "transaction may contain multiple events if executed by a smart", "\"\"\"Store the state of scanned blocks and all events. All", "# We have not yet recorded any transfers in this", "empty dict as the block that contains all transactions by", "__init__(self): self.state = None self.fname = \"test-state.json\" # How many", "Optional from web3.datastructures import AttributeDict from .event_scanner_state import EventScannerState class", "internal format args = event[\"args\"] transfer = { \"from\": args[\"from\"],", "second ago we saved the JSON file self.last_save = 0", "block # transaction_index = event.transactionIndex # Transaction index within the", "} def restore(self): \"\"\"Restore the last scan state from a", "block self.state[\"last_scanned_block\"] = block_number # Save the database file for", "except (IOError, json.decoder.JSONDecodeError): print(\"State starting from scratch\") self.reset() def save(self):", "index within the block # transaction_index = event.transactionIndex # Transaction", "the state of scanned blocks and all events. All state", "have not yet recorded any transfers in this transaction #", "we have stored.\"\"\" return self.state[\"last_scanned_block\"] def delete_data(self, since_block): \"\"\"Remove potentially", "> 60: self.save() def process_event( self, block_when: Optional[datetime.datetime], event: AttributeDict", "Return a pointer that allows us to look up this", "all events by a log index self.state[\"blocks\"][block_number][txhash] = {} #", "event.logIndex # Log index within the block # transaction_index =", "# Transaction hash block_number = event.blockNumber # Convert ERC-20 Transfer" ]
[ "x 3 matrix of velocities mass is an N x", "dt / 2.0 # update time t += dt #", "out[:, 2] = c @dc.program def getAcc(pos: dc.float64[N, 3], mass:", "out=vel) # calculate initial gravitational accelerations acc = getAcc(pos, mass,", "acc = getAcc(pos, mass, G, softening) # calculate initial energy", "x 3 matrix of positions mass is an N x", "is an N x 1 vector of masses G is", "G * (dz * inv_r3) @ mass # pack together", "+ 1, dtype=np.float64) KE[0], PE[0] = getEnergy(pos, vel, mass, G)", "# calculate initial gravitational accelerations acc = getAcc(pos, mass, G,", "np.add.outer(-z, z) # matrix that stores 1/r^3 for all particle", "- x # dy = y.T - y # dz", "# dz = z.T - z # dx = np.transpose(x)", "x 3 matrix of accelerations \"\"\" # positions r =", "/ np.mean(mass) # tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) * vel,", "np.ndarray(Nt + 1, dtype=np.float64) PE = np.ndarray(Nt + 1, dtype=np.float64)", "+ softening**2) # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5) I = inv_r3 >", "pos is an N x 3 matrix of positions mass", "positions r = [x,y,z] for all particles x = pos[:,", "a[:, 1] = ay a[:, 2] = az return a", "inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2) #", "dy = np.transpose(y) - y # dz = np.transpose(z) -", "only once # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE", "= getAcc(pos, mass, G, softening) # calculate initial energy of", "numpy as np import dace as dc \"\"\" Create Your", "acc * dt / 2.0 # drift pos += vel", "the acceleration components # a = np.hstack((ax,ay,az)) a = np.ndarray((N,", "out[:, 0] = a # out[:, 1] = b #", "x # dy = np.transpose(y) - y # dz =", "y # dz = z.T - z # dx =", "@ mass # pack together the acceleration components # a", "*= G return KE, PE @dc.program def nbody(mass: dc.float64[N], pos:", "inv_r3) @ mass az = G * (dz * inv_r3)", "tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0), np.mean(mass)) np.subtract(vel,", "Gravitational constant KE is the kinetic energy of the system", "for k in range(j + 1, N): PE += tmp[j,", "https://github.com/pmocz/nbody-python/blob/master/nbody.py # TODO: Add GPL-3.0 License import numpy as np", "np.transpose(y) - y # dz = np.transpose(z) - z dx", "x # dy = y.T - y # dz =", "dc.float64[N], G: dc.float64): \"\"\" Get kinetic energy (KE) and potential", "x 1 vector of masses G is Newton's Gravitational constant", "all particle pairwise particle separations inv_r = np.sqrt(dx**2 + dy**2", "r = [x,y,z] for all particles x = pos[:, 0:1]", "np.add.outer(-y, y) dz = np.add.outer(-z, z) # matrix that stores", "0] = a # out[:, 1] = b # out[:,", "tmp = -np.multiply.outer(mass, mass) * inv_r PE = 0.0 for", "vel, axis=0) / np.mean(mass) # vel -= np.mean(np.reshape(mass, (N, 1))", "pos += vel * dt # update accelerations acc[:] =", "(2020) Princeton Univeristy, @PMocz Simulate orbits of stars interacting due", "dc.float64[N, 3], a: dc.float64[N], # b: dc.float64[N], c: dc.float64[N]): #", "(N, 1)) * vel**2) # Potential Energy: # positions r", "mass # pack together the acceleration components # a =", "of simulation pos is N x 3 matrix of positions", "def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64, softening: dc.float64):", "vel, mass, G) t = 0.0 # Simulation Main Loop", "energy of the system \"\"\" # Kinetic Energy: # KE", "of system KE[i + 1], PE[i + 1] = getEnergy(pos,", "3], vel: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64): \"\"\" Get", "Loop for i in range(Nt): # (1/2) kick vel +=", "pos[:, 2:3] # matrix that stores all pairwise particle separations:", "# dx = np.transpose(x) - x # dy = np.transpose(y)", "particle separations inv_r3 = (dx**2 + dy**2 + dz**2 +", "- y # dz = z.T - z # dx", "gravity Code calculates pairwise forces according to Newton's Law of", "np.ndarray(Nt + 1, dtype=np.float64) KE[0], PE[0] = getEnergy(pos, vel, mass,", "energy of system KE[i + 1], PE[i + 1] =", "inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5) I = inv_r3 > 0 np.power(inv_r3, -1.5,", "np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0), np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass, (N,", "Simulation Main Loop for i in range(Nt): # (1/2) kick", "vel * dt # update accelerations acc[:] = getAcc(pos, mass,", "i in range(Nt): # (1/2) kick vel += acc *", "acceleration on each particle due to Newton's Law pos is", "stores 1/r^3 for all particle pairwise particle separations inv_r3 =", "# out[:, 0] = a # out[:, 1] = b", "constant softening is the softening length a is N x", "accelerations \"\"\" # positions r = [x,y,z] for all particles", "# Kinetic Energy: # KE = 0.5 * np.sum(np.sum( mass", "0.0 for j in range(N): for k in range(j +", "2.0 # drift pos += vel * dt # update", "# get energy of system KE[i + 1], PE[i +", "np.sum( mass * vel**2 ) KE = 0.5 * np.sum(np.reshape(mass,", "PE = np.ndarray(Nt + 1, dtype=np.float64) KE[0], PE[0] = getEnergy(pos,", "+= tmp[j, k] PE *= G return KE, PE @dc.program", "+ 1] = getEnergy(pos, vel, mass, G) return KE, PE", "np.hstack((ax,ay,az)) a = np.ndarray((N, 3), dtype=np.float64) # hstack(a, ax, ay,", "softening length a is N x 3 matrix of accelerations", "getEnergy(pos, vel, mass, G) t = 0.0 # Simulation Main", "= G * np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp = -np.multiply.outer(mass, mass) * inv_r", "positions vel is N x 3 matrix of velocities mass", "1/r for all particle pairwise particle separations inv_r = np.sqrt(dx**2", "# Convert to Center-of-Mass frame # vel -= np.mean(mass *", "np.sum(np.reshape(mass, (N, 1)) * vel**2) # Potential Energy: # positions", "+ dy**2 + dz**2 + softening**2) # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5)", "upper triangle, to count each interaction only once # PE", "Main Loop for i in range(Nt): # (1/2) kick vel", "softening**2) # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5) I = inv_r3 > 0", "np.sum(np.sum( mass * vel**2 )) # KE = 0.5 *", "of positions mass is an N x 1 vector of", "= np.ndarray((N, 3), dtype=np.float64) # hstack(a, ax, ay, az) a[:,", "PE += tmp[j, k] PE *= G return KE, PE", "= np.add.outer(-y, y) dz = np.add.outer(-z, z) # matrix that", "1)) * vel, axis=0) / np.mean(mass), out=vel) # calculate initial", "from https://github.com/pmocz/nbody-python/blob/master/nbody.py # TODO: Add GPL-3.0 License import numpy as", "of the system PE is the potential energy of the", "N x 3 matrix of positions vel is N x", "system KE[i + 1], PE[i + 1] = getEnergy(pos, vel,", "where=I) ax = G * (dx * inv_r3) @ mass", "* (dz * inv_r3) @ mass # pack together the", "= 1.0/inv_r[inv_r>0] I = inv_r > 0 np.divide(1.0, inv_r, out=inv_r,", "* dt / 2.0 # update time t += dt", "# KE = 0.5 * np.sum(np.sum( mass * vel**2 ))", "vel, axis=0) / np.mean(mass) # tmp = np.divide(np.mean(np.reshape(mass, (N, 1))", "pos[:, 1:2] z = pos[:, 2:3] # matrix that stores", "+ dy**2 + dz**2) # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0] I =", "PE[0] = getEnergy(pos, vel, mass, G) t = 0.0 #", "to Newton's Law of Gravity \"\"\" N, Nt = (dc.symbol(s,", "mass * vel**2 ) KE = 0.5 * np.sum(np.reshape(mass, (N,", "kick vel += acc * dt / 2.0 # update", "* inv_r3) @ mass ay = G * (dy *", "np.mean(mass) # tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0),", "3], mass: dc.float64[N], G: dc.float64, softening: dc.float64): \"\"\" Calculate the", "ay = G * (dy * inv_r3) @ mass az", "the system \"\"\" # Kinetic Energy: # KE = 0.5", "vel += acc * dt / 2.0 # drift pos", "* (dy * inv_r3) @ mass az = G *", "3 matrix of accelerations \"\"\" # positions r = [x,y,z]", "* vel, axis=0) / np.mean(mass), out=vel) # calculate initial gravitational", "dc.float64, softening: dc.float64): \"\"\" Calculate the acceleration on each particle", "Calculate the acceleration on each particle due to Newton's Law", "dz = np.add.outer(-z, z) # matrix that stores 1/r for", "G * (dx * inv_r3) @ mass ay = G", "# Potential Energy: # positions r = [x,y,z] for all", "dz**2 + softening**2) # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5) I = inv_r3", "where=I) # sum over upper triangle, to count each interaction", "a[:, 0] = ax a[:, 1] = ay a[:, 2]", "mass az = G * (dz * inv_r3) @ mass", "dc.float64): \"\"\" Get kinetic energy (KE) and potential energy (PE)", "pairwise particle separations: r_j - r_i # dx = x.T", "for all particles x = pos[:, 0:1] y = pos[:,", "= getEnergy(pos, vel, mass, G) t = 0.0 # Simulation", "= np.add.outer(-z, z) # matrix that stores 1/r for all", "3], mass: dc.float64[N], G: dc.float64): \"\"\" Get kinetic energy (KE)", "for all particle pairwise particle separations inv_r3 = (dx**2 +", "together the acceleration components # a = np.hstack((ax,ay,az)) a =", "dc.float64[N], c: dc.float64[N]): # out[:, 0] = a # out[:,", "energy of system KE = np.ndarray(Nt + 1, dtype=np.float64) PE", "= b # out[:, 2] = c @dc.program def getAcc(pos:", "acceleration components # a = np.hstack((ax,ay,az)) a = np.ndarray((N, 3),", "Your Own N-body Simulation (With Python) <NAME> (2020) Princeton Univeristy,", "= G * (dy * inv_r3) @ mass az =", "dy**2 + dz**2) # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0] I = inv_r", "Univeristy, @PMocz Simulate orbits of stars interacting due to gravity", "z # dx = np.transpose(x) - x # dy =", "np.mean(mass), out=vel) # calculate initial gravitational accelerations acc = getAcc(pos,", "energy (PE) of simulation pos is N x 3 matrix", "potential energy (PE) of simulation pos is N x 3", "y) dz = np.add.outer(-z, z) # matrix that stores 1/r^3", "once # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE =", "\"\"\" N, Nt = (dc.symbol(s, dtype=dc.int64) for s in ('N',", "tmp[j, k] PE *= G return KE, PE @dc.program def", "y) dz = np.add.outer(-z, z) # matrix that stores 1/r", "vel**2 ) KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) *", "dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N, 3], dt: dc.float64, G:", "positions mass is an N x 1 vector of masses", "vel -= np.mean(mass * vel, axis=0) / np.mean(mass) # vel", "each particle due to Newton's Law pos is an N", "y.T - y # dz = z.T - z #", "in range(Nt): # (1/2) kick vel += acc * dt", "matrix of positions mass is an N x 1 vector", "* np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp = -np.multiply.outer(mass, mass) * inv_r PE =", "= (dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt')) # @dc.program", "= pos[:, 2:3] # matrix that stores all pairwise particle", "all particles x = pos[:, 0:1] y = pos[:, 1:2]", "system PE is the potential energy of the system \"\"\"", "G is Newton's Gravitational constant KE is the kinetic energy", "# sum over upper triangle, to count each interaction only", "Center-of-Mass frame # vel -= np.mean(mass * vel, axis=0) /", "as dc \"\"\" Create Your Own N-body Simulation (With Python)", "is the kinetic energy of the system PE is the", "stores all pairwise particle separations: r_j - r_i # dx", "# PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE = G", "* dt # update accelerations acc[:] = getAcc(pos, mass, G,", "3 matrix of positions vel is N x 3 matrix", "= x.T - x # dy = y.T - y", "Own N-body Simulation (With Python) <NAME> (2020) Princeton Univeristy, @PMocz", "axis=0) / np.mean(mass) # vel -= np.mean(np.reshape(mass, (N, 1)) *", "- r_i # dx = x.T - x # dy", "= G * (dx * inv_r3) @ mass ay =", "I = inv_r3 > 0 np.power(inv_r3, -1.5, out=inv_r3, where=I) ax", "KE = 0.5 * np.sum( mass * vel**2 ) KE", "vel -= np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass)", "1)) * vel, axis=0), np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass, (N, 1)) *", "dt # get energy of system KE[i + 1], PE[i", "G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp", "= np.ndarray(Nt + 1, dtype=np.float64) KE[0], PE[0] = getEnergy(pos, vel,", "axis=0) / np.mean(mass) # tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) *", "# (1/2) kick vel += acc * dt / 2.0", "dc.float64[N], G: dc.float64, softening: dc.float64): \"\"\" Calculate the acceleration on", "matrix that stores 1/r^3 for all particle pairwise particle separations", "pos is N x 3 matrix of positions vel is", "- y # dz = np.transpose(z) - z dx =", "hstack(out: dc.float64[N, 3], a: dc.float64[N], # b: dc.float64[N], c: dc.float64[N]):", "(KE) and potential energy (PE) of simulation pos is N", "to count each interaction only once # PE = G", "Gravitational constant softening is the softening length a is N", "-1.5, out=inv_r3, where=I) ax = G * (dx * inv_r3)", "0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2) # Potential Energy:", "= np.sqrt(dx**2 + dy**2 + dz**2) # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]", "3], a: dc.float64[N], # b: dc.float64[N], c: dc.float64[N]): # out[:,", "- z dx = np.add.outer(-x, x) dy = np.add.outer(-y, y)", "= getAcc(pos, mass, G, softening) # (1/2) kick vel +=", "@dc.program def nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N, 3],", "KE is the kinetic energy of the system PE is", "return KE, PE @dc.program def nbody(mass: dc.float64[N], pos: dc.float64[N, 3],", "dace as dc \"\"\" Create Your Own N-body Simulation (With", "c: dc.float64[N]): # out[:, 0] = a # out[:, 1]", "np.transpose(x) - x # dy = np.transpose(y) - y #", "= inv_r > 0 np.divide(1.0, inv_r, out=inv_r, where=I) # sum", "Princeton Univeristy, @PMocz Simulate orbits of stars interacting due to", "np.add.outer(-z, z) # matrix that stores 1/r for all particle", "3), dtype=np.float64) # hstack(a, ax, ay, az) a[:, 0] =", "\"\"\" # positions r = [x,y,z] for all particles x", "dc.float64[N, 3], mass: dc.float64[N], G: dc.float64): \"\"\" Get kinetic energy", "softening is the softening length a is N x 3", "dz**2) # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0] I = inv_r > 0", "+ dz**2 + softening**2) # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5) I =", "that stores 1/r for all particle pairwise particle separations inv_r", "0.5 * np.sum(np.sum( mass * vel**2 )) # KE =", "# matrix that stores 1/r^3 for all particle pairwise particle", "az) a[:, 0] = ax a[:, 1] = ay a[:,", "(N, 1)) * vel, axis=0) / np.mean(mass) # tmp =", "separations inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2)", "1:2] z = pos[:, 2:3] # matrix that stores all", "# vel -= np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) /", "time t += dt # get energy of system KE[i", "forces according to Newton's Law of Gravity \"\"\" N, Nt", "2] = c @dc.program def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N],", "np.sqrt(dx**2 + dy**2 + dz**2) # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0] I", "+= vel * dt # update accelerations acc[:] = getAcc(pos,", "= c @dc.program def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G:", "as np import dace as dc \"\"\" Create Your Own", "vector of masses G is Newton's Gravitational constant softening is", "nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N, 3], dt: dc.float64,", "kinetic energy (KE) and potential energy (PE) of simulation pos", "x = pos[:, 0:1] y = pos[:, 1:2] z =", "x 3 matrix of positions vel is N x 3", "is the softening length a is N x 3 matrix", "# vel -= np.mean(mass * vel, axis=0) / np.mean(mass) #", "components # a = np.hstack((ax,ay,az)) a = np.ndarray((N, 3), dtype=np.float64)", "of the system \"\"\" # Kinetic Energy: # KE =", "Newton's Law of Gravity \"\"\" N, Nt = (dc.symbol(s, dtype=dc.int64)", "# inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5) I = inv_r3 > 0 np.power(inv_r3,", "* vel**2 )) # KE = 0.5 * np.sum( mass", "of accelerations \"\"\" # positions r = [x,y,z] for all", "# update time t += dt # get energy of", "accelerations acc[:] = getAcc(pos, mass, G, softening) # (1/2) kick", "y # dz = np.transpose(z) - z dx = np.add.outer(-x,", "energy of the system PE is the potential energy of", "getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64):", "Convert to Center-of-Mass frame # vel -= np.mean(mass * vel,", "+ dz**2) # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0] I = inv_r >", "def nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N, 3], dt:", "vel is N x 3 matrix of velocities mass is", "\"\"\" Calculate the acceleration on each particle due to Newton's", "ax, ay, az) a[:, 0] = ax a[:, 1] =", "def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3], mass: dc.float64[N], G:", "G: dc.float64, softening: dc.float64): \"\"\" Calculate the acceleration on each", "c @dc.program def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64,", "# calculate initial energy of system KE = np.ndarray(Nt +", "dc.float64): # Convert to Center-of-Mass frame # vel -= np.mean(mass", "getAcc(pos, mass, G, softening) # calculate initial energy of system", "(PE) of simulation pos is N x 3 matrix of", "mass, G) t = 0.0 # Simulation Main Loop for", "= y.T - y # dz = z.T - z", "KE = 0.5 * np.sum(np.sum( mass * vel**2 )) #", "vel**2 )) # KE = 0.5 * np.sum( mass *", "np.subtract(vel, np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass), out=vel)", "ay a[:, 2] = az return a @dc.program def getEnergy(pos:", "return a @dc.program def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],", "b # out[:, 2] = c @dc.program def getAcc(pos: dc.float64[N,", "np.power(inv_r3, -1.5, out=inv_r3, where=I) ax = G * (dx *", "frame # vel -= np.mean(mass * vel, axis=0) / np.mean(mass)", "# hstack(a, ax, ay, az) a[:, 0] = ax a[:,", "N): PE += tmp[j, k] PE *= G return KE,", "an N x 1 vector of masses G is Newton's", "vel, axis=0) / np.mean(mass), out=vel) # calculate initial gravitational accelerations", "import numpy as np import dace as dc \"\"\" Create", "Gravity \"\"\" N, Nt = (dc.symbol(s, dtype=dc.int64) for s in", "@dc.program def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3], mass: dc.float64[N],", "according to Newton's Law of Gravity \"\"\" N, Nt =", "* inv_r3) @ mass # pack together the acceleration components", "pos[:, 0:1] y = pos[:, 1:2] z = pos[:, 2:3]", "+= acc * dt / 2.0 # drift pos +=", "0.0 # Simulation Main Loop for i in range(Nt): #", "for i in range(Nt): # (1/2) kick vel += acc", "= np.transpose(y) - y # dz = np.transpose(z) - z", "np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp = -np.multiply.outer(mass, mass) * inv_r PE = 0.0", "in ('N', 'Nt')) # @dc.program # def hstack(out: dc.float64[N, 3],", "KE[0], PE[0] = getEnergy(pos, vel, mass, G) t = 0.0", "getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64, softening: dc.float64): \"\"\"", "a[:, 2] = az return a @dc.program def getEnergy(pos: dc.float64[N,", "# Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py # TODO: Add GPL-3.0 License import", "N x 3 matrix of velocities mass is an N", "Newton's Gravitational constant KE is the kinetic energy of the", "/ np.mean(mass), out=vel) # calculate initial gravitational accelerations acc =", "gravitational accelerations acc = getAcc(pos, mass, G, softening) # calculate", "z dx = np.add.outer(-x, x) dy = np.add.outer(-y, y) dz", "= 0.5 * np.sum(np.sum( mass * vel**2 )) # KE", "1, N): PE += tmp[j, k] PE *= G return", "matrix that stores 1/r for all particle pairwise particle separations", "interaction only once # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) #", "0] = ax a[:, 1] = ay a[:, 2] =", "1, dtype=np.float64) KE[0], PE[0] = getEnergy(pos, vel, mass, G) t", "* np.sum(np.reshape(mass, (N, 1)) * vel**2) # Potential Energy: #", "# KE = 0.5 * np.sum( mass * vel**2 )", "get energy of system KE[i + 1], PE[i + 1]", "j in range(N): for k in range(j + 1, N):", "(With Python) <NAME> (2020) Princeton Univeristy, @PMocz Simulate orbits of", "<NAME> (2020) Princeton Univeristy, @PMocz Simulate orbits of stars interacting", "system \"\"\" # Kinetic Energy: # KE = 0.5 *", "1)) * vel**2) # Potential Energy: # positions r =", "out=inv_r3, where=I) ax = G * (dx * inv_r3) @", "import dace as dc \"\"\" Create Your Own N-body Simulation", "Newton's Gravitational constant softening is the softening length a is", "dtype=np.float64) KE[0], PE[0] = getEnergy(pos, vel, mass, G) t =", "r_i # dx = x.T - x # dy =", "to Center-of-Mass frame # vel -= np.mean(mass * vel, axis=0)", "np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass),", "1/r^3 for all particle pairwise particle separations inv_r3 = (dx**2", "0 np.power(inv_r3, -1.5, out=inv_r3, where=I) ax = G * (dx", "= ay a[:, 2] = az return a @dc.program def", "dc.float64[N, 3], vel: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64): \"\"\"", "in range(j + 1, N): PE += tmp[j, k] PE", "1, dtype=np.float64) PE = np.ndarray(Nt + 1, dtype=np.float64) KE[0], PE[0]", "2] = az return a @dc.program def getEnergy(pos: dc.float64[N, 3],", "the acceleration on each particle due to Newton's Law pos", "(N, 1)) * vel, axis=0) / np.mean(mass), out=vel) # calculate", "inv_r > 0 np.divide(1.0, inv_r, out=inv_r, where=I) # sum over", "dt # update accelerations acc[:] = getAcc(pos, mass, G, softening)", "accelerations acc = getAcc(pos, mass, G, softening) # calculate initial", "* inv_r PE = 0.0 for j in range(N): for", "in range(N): for k in range(j + 1, N): PE", "PE is the potential energy of the system \"\"\" #", "Code calculates pairwise forces according to Newton's Law of Gravity", "Law pos is an N x 3 matrix of positions", "dc.float64[N, 3], mass: dc.float64[N], G: dc.float64, softening: dc.float64): \"\"\" Calculate", "range(j + 1, N): PE += tmp[j, k] PE *=", "= G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))", "softening) # calculate initial energy of system KE = np.ndarray(Nt", "Simulation (With Python) <NAME> (2020) Princeton Univeristy, @PMocz Simulate orbits", "N x 1 vector of masses G is Newton's Gravitational", "z) # matrix that stores 1/r^3 for all particle pairwise", "np.divide(1.0, inv_r, out=inv_r, where=I) # sum over upper triangle, to", "PE *= G return KE, PE @dc.program def nbody(mass: dc.float64[N],", "N x 3 matrix of accelerations \"\"\" # positions r", "mass: dc.float64[N], G: dc.float64, softening: dc.float64): \"\"\" Calculate the acceleration", "= a # out[:, 1] = b # out[:, 2]", "# matrix that stores all pairwise particle separations: r_j -", "particle pairwise particle separations inv_r3 = (dx**2 + dy**2 +", "# TODO: Add GPL-3.0 License import numpy as np import", "G * (dy * inv_r3) @ mass az = G", "> 0 np.divide(1.0, inv_r, out=inv_r, where=I) # sum over upper", "vel: dc.float64[N, 3], dt: dc.float64, G: dc.float64, softening: dc.float64): #", "# tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0), np.mean(mass))", "axis=0) / np.mean(mass), out=vel) # calculate initial gravitational accelerations acc", "is N x 3 matrix of positions vel is N", "interacting due to gravity Code calculates pairwise forces according to", "inv_r3[inv_r3>0]**(-1.5) I = inv_r3 > 0 np.power(inv_r3, -1.5, out=inv_r3, where=I)", "License import numpy as np import dace as dc \"\"\"", "dc.float64[N, 3], dt: dc.float64, G: dc.float64, softening: dc.float64): # Convert", "dc.float64[N, 3], vel: dc.float64[N, 3], dt: dc.float64, G: dc.float64, softening:", "t += dt # get energy of system KE[i +", "due to gravity Code calculates pairwise forces according to Newton's", "matrix of accelerations \"\"\" # positions r = [x,y,z] for", "0.5 * np.sum( mass * vel**2 ) KE = 0.5", "GPL-3.0 License import numpy as np import dace as dc", "dtype=np.float64) PE = np.ndarray(Nt + 1, dtype=np.float64) KE[0], PE[0] =", "= 0.0 for j in range(N): for k in range(j", "masses G is Newton's Gravitational constant softening is the softening", "pack together the acceleration components # a = np.hstack((ax,ay,az)) a", "* (dx * inv_r3) @ mass ay = G *", "dy**2 + dz**2 + softening**2) # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5) I", "pos: dc.float64[N, 3], vel: dc.float64[N, 3], dt: dc.float64, G: dc.float64,", "dtype=np.float64) # hstack(a, ax, ay, az) a[:, 0] = ax", "the potential energy of the system \"\"\" # Kinetic Energy:", "potential energy of the system \"\"\" # Kinetic Energy: #", "b: dc.float64[N], c: dc.float64[N]): # out[:, 0] = a #", "# dy = y.T - y # dz = z.T", "sum over upper triangle, to count each interaction only once", "z.T - z # dx = np.transpose(x) - x #", "inv_r3) @ mass # pack together the acceleration components #", "softening) # (1/2) kick vel += acc * dt /", "N-body Simulation (With Python) <NAME> (2020) Princeton Univeristy, @PMocz Simulate", "is N x 3 matrix of accelerations \"\"\" # positions", "# @dc.program # def hstack(out: dc.float64[N, 3], a: dc.float64[N], #", "2:3] # matrix that stores all pairwise particle separations: r_j", "separations: r_j - r_i # dx = x.T - x", "Kinetic Energy: # KE = 0.5 * np.sum(np.sum( mass *", "for j in range(N): for k in range(j + 1,", "dx = x.T - x # dy = y.T -", "inv_r3) @ mass ay = G * (dy * inv_r3)", "a # out[:, 1] = b # out[:, 2] =", "dz = np.add.outer(-z, z) # matrix that stores 1/r^3 for", "matrix of positions vel is N x 3 matrix of", "dtype=dc.int64) for s in ('N', 'Nt')) # @dc.program # def", "a is N x 3 matrix of accelerations \"\"\" #", "2.0 # update time t += dt # get energy", "out=inv_r, where=I) # sum over upper triangle, to count each", "np.transpose(z) - z dx = np.add.outer(-x, x) dy = np.add.outer(-y,", "np.mean(mass) # vel -= np.mean(np.reshape(mass, (N, 1)) * vel, axis=0)", "vel += acc * dt / 2.0 # update time", "of stars interacting due to gravity Code calculates pairwise forces", "stars interacting due to gravity Code calculates pairwise forces according", "# update accelerations acc[:] = getAcc(pos, mass, G, softening) #", "np.ndarray((N, 3), dtype=np.float64) # hstack(a, ax, ay, az) a[:, 0]", "mass ay = G * (dy * inv_r3) @ mass", "np.add.outer(-x, x) dy = np.add.outer(-y, y) dz = np.add.outer(-z, z)", "N, Nt = (dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt'))", "= np.add.outer(-x, x) dy = np.add.outer(-y, y) dz = np.add.outer(-z,", "np import dace as dc \"\"\" Create Your Own N-body", "(dy * inv_r3) @ mass az = G * (dz", "# dx = x.T - x # dy = y.T", "# matrix that stores 1/r for all particle pairwise particle", "Energy: # KE = 0.5 * np.sum(np.sum( mass * vel**2", "dx = np.add.outer(-x, x) dy = np.add.outer(-y, y) dz =", "np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp = -np.multiply.outer(mass,", "mass, G, softening) # calculate initial energy of system KE", "to gravity Code calculates pairwise forces according to Newton's Law", "range(N): for k in range(j + 1, N): PE +=", "# inv_r[inv_r>0] = 1.0/inv_r[inv_r>0] I = inv_r > 0 np.divide(1.0,", "az return a @dc.program def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N,", "a = np.ndarray((N, 3), dtype=np.float64) # hstack(a, ax, ay, az)", "length a is N x 3 matrix of accelerations \"\"\"", "particle separations: r_j - r_i # dx = x.T -", "r_j - r_i # dx = x.T - x #", "pairwise particle separations inv_r = np.sqrt(dx**2 + dy**2 + dz**2)", "softening: dc.float64): # Convert to Center-of-Mass frame # vel -=", "to Newton's Law pos is an N x 3 matrix", "(dx * inv_r3) @ mass ay = G * (dy", "= [x,y,z] for all particles x = pos[:, 0:1] y", "= (dx**2 + dy**2 + dz**2 + softening**2) # inv_r3[inv_r3>0]", "az = G * (dz * inv_r3) @ mass #", "np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass), out=vel) #", "inv_r3 > 0 np.power(inv_r3, -1.5, out=inv_r3, where=I) ax = G", "* dt / 2.0 # drift pos += vel *", "@PMocz Simulate orbits of stars interacting due to gravity Code", "(dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt')) # @dc.program #", "pairwise forces according to Newton's Law of Gravity \"\"\" N,", "* vel**2 ) KE = 0.5 * np.sum(np.reshape(mass, (N, 1))", "KE[i + 1], PE[i + 1] = getEnergy(pos, vel, mass,", "ay, az) a[:, 0] = ax a[:, 1] = ay", "# dy = np.transpose(y) - y # dz = np.transpose(z)", "3 matrix of positions mass is an N x 1", "PE[i + 1] = getEnergy(pos, vel, mass, G) return KE,", "dc.float64, G: dc.float64, softening: dc.float64): # Convert to Center-of-Mass frame", "\"\"\" Create Your Own N-body Simulation (With Python) <NAME> (2020)", "(dz * inv_r3) @ mass # pack together the acceleration", "vel: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64): \"\"\" Get kinetic", "# pack together the acceleration components # a = np.hstack((ax,ay,az))", "vel**2) # Potential Energy: # positions r = [x,y,z] for", "for s in ('N', 'Nt')) # @dc.program # def hstack(out:", "Python) <NAME> (2020) Princeton Univeristy, @PMocz Simulate orbits of stars", "vel, axis=0), np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass, (N, 1)) * vel, axis=0)", "mass) * inv_r PE = 0.0 for j in range(N):", "+= acc * dt / 2.0 # update time t", "that stores all pairwise particle separations: r_j - r_i #", "# a = np.hstack((ax,ay,az)) a = np.ndarray((N, 3), dtype=np.float64) #", "Energy: # positions r = [x,y,z] for all particles x", "dt / 2.0 # drift pos += vel * dt", "k in range(j + 1, N): PE += tmp[j, k]", "the softening length a is N x 3 matrix of", "* np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp =", "Newton's Law pos is an N x 3 matrix of", "# out[:, 1] = b # out[:, 2] = c", "[x,y,z] for all particles x = pos[:, 0:1] y =", "Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py # TODO: Add GPL-3.0 License import numpy", "* vel, axis=0) / np.mean(mass) # vel -= np.mean(np.reshape(mass, (N,", "= np.add.outer(-z, z) # matrix that stores 1/r^3 for all", "x) dy = np.add.outer(-y, y) dz = np.add.outer(-z, z) #", "- z # dx = np.transpose(x) - x # dy", "dc.float64): \"\"\" Calculate the acceleration on each particle due to", "* vel, axis=0) / np.mean(mass) # tmp = np.divide(np.mean(np.reshape(mass, (N,", "@ mass az = G * (dz * inv_r3) @", "np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass) # tmp", "= 0.5 * np.sum( mass * vel**2 ) KE =", "inv_r PE = 0.0 for j in range(N): for k", "axis=0), np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) /", "1] = ay a[:, 2] = az return a @dc.program", "PE = 0.0 for j in range(N): for k in", "PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp = -np.multiply.outer(mass, mass) *", "softening: dc.float64): \"\"\" Calculate the acceleration on each particle due", "system KE = np.ndarray(Nt + 1, dtype=np.float64) PE = np.ndarray(Nt", "-= np.mean(mass * vel, axis=0) / np.mean(mass) # vel -=", "of system KE = np.ndarray(Nt + 1, dtype=np.float64) PE =", "3], vel: dc.float64[N, 3], dt: dc.float64, G: dc.float64, softening: dc.float64):", "= az return a @dc.program def getEnergy(pos: dc.float64[N, 3], vel:", "particle pairwise particle separations inv_r = np.sqrt(dx**2 + dy**2 +", "+ 1], PE[i + 1] = getEnergy(pos, vel, mass, G)", "out[:, 1] = b # out[:, 2] = c @dc.program", "vector of masses G is Newton's Gravitational constant KE is", "due to Newton's Law pos is an N x 3", "* np.sum( mass * vel**2 ) KE = 0.5 *", "over upper triangle, to count each interaction only once #", "calculate initial gravitational accelerations acc = getAcc(pos, mass, G, softening)", "/ 2.0 # drift pos += vel * dt #", "* vel**2) # Potential Energy: # positions r = [x,y,z]", "s in ('N', 'Nt')) # @dc.program # def hstack(out: dc.float64[N,", "# def hstack(out: dc.float64[N, 3], a: dc.float64[N], # b: dc.float64[N],", "3 matrix of velocities mass is an N x 1", "I = inv_r > 0 np.divide(1.0, inv_r, out=inv_r, where=I) #", "ax = G * (dx * inv_r3) @ mass ay", "k] PE *= G return KE, PE @dc.program def nbody(mass:", "# positions r = [x,y,z] for all particles x =", "= 0.0 # Simulation Main Loop for i in range(Nt):", "stores 1/r for all particle pairwise particle separations inv_r =", "matrix that stores all pairwise particle separations: r_j - r_i", "= np.transpose(z) - z dx = np.add.outer(-x, x) dy =", "particle separations inv_r = np.sqrt(dx**2 + dy**2 + dz**2) #", "'Nt')) # @dc.program # def hstack(out: dc.float64[N, 3], a: dc.float64[N],", "is an N x 3 matrix of positions mass is", "np.mean(mass * vel, axis=0) / np.mean(mass) # vel -= np.mean(np.reshape(mass,", "0:1] y = pos[:, 1:2] z = pos[:, 2:3] #", "/ 2.0 # update time t += dt # get", "Potential Energy: # positions r = [x,y,z] for all particles", "Add GPL-3.0 License import numpy as np import dace as", "(1/2) kick vel += acc * dt / 2.0 #", "pairwise particle separations inv_r3 = (dx**2 + dy**2 + dz**2", "* vel, axis=0), np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass, (N, 1)) * vel,", "<reponame>frahlg/npbench<filename>npbench/benchmarks/nbody/nbody_dace.py # Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py # TODO: Add GPL-3.0 License", "-= np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass) #", "of masses G is Newton's Gravitational constant softening is the", "is the potential energy of the system \"\"\" # Kinetic", "of Gravity \"\"\" N, Nt = (dc.symbol(s, dtype=dc.int64) for s", "each interaction only once # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))", "= -np.multiply.outer(mass, mass) * inv_r PE = 0.0 for j", "= 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2) # Potential", "hstack(a, ax, ay, az) a[:, 0] = ax a[:, 1]", "1 vector of masses G is Newton's Gravitational constant softening", "1] = b # out[:, 2] = c @dc.program def", "ax a[:, 1] = ay a[:, 2] = az return", "# PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp = -np.multiply.outer(mass, mass)", "def hstack(out: dc.float64[N, 3], a: dc.float64[N], # b: dc.float64[N], c:", "@dc.program def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64, softening:", "1.0/inv_r[inv_r>0] I = inv_r > 0 np.divide(1.0, inv_r, out=inv_r, where=I)", "dt: dc.float64, G: dc.float64, softening: dc.float64): # Convert to Center-of-Mass", ")) # KE = 0.5 * np.sum( mass * vel**2", "range(Nt): # (1/2) kick vel += acc * dt /", "velocities mass is an N x 1 vector of masses", "energy (KE) and potential energy (PE) of simulation pos is", "= np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0), np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass,", "for all particle pairwise particle separations inv_r = np.sqrt(dx**2 +", "inv_r[inv_r>0] = 1.0/inv_r[inv_r>0] I = inv_r > 0 np.divide(1.0, inv_r,", "particles x = pos[:, 0:1] y = pos[:, 1:2] z", "- x # dy = np.transpose(y) - y # dz", "= G * (dz * inv_r3) @ mass # pack", "= ax a[:, 1] = ay a[:, 2] = az", "# Simulation Main Loop for i in range(Nt): # (1/2)", "(dx**2 + dy**2 + dz**2 + softening**2) # inv_r3[inv_r3>0] =", "PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1))) # PE = G *", "dc.float64, softening: dc.float64): # Convert to Center-of-Mass frame # vel", "is Newton's Gravitational constant softening is the softening length a", "dy = np.add.outer(-y, y) dz = np.add.outer(-z, z) # matrix", "> 0 np.power(inv_r3, -1.5, out=inv_r3, where=I) ax = G *", "N x 3 matrix of positions mass is an N", "G, softening) # (1/2) kick vel += acc * dt", "mass: dc.float64[N], G: dc.float64): \"\"\" Get kinetic energy (KE) and", "and potential energy (PE) of simulation pos is N x", "kick vel += acc * dt / 2.0 # drift", "# dz = np.transpose(z) - z dx = np.add.outer(-x, x)", "dc \"\"\" Create Your Own N-body Simulation (With Python) <NAME>", "y = pos[:, 1:2] z = pos[:, 2:3] # matrix", "@ mass ay = G * (dy * inv_r3) @", "+ 1, N): PE += tmp[j, k] PE *= G", "getAcc(pos, mass, G, softening) # (1/2) kick vel += acc", "is N x 3 matrix of velocities mass is an", "/ np.mean(mass) # vel -= np.mean(np.reshape(mass, (N, 1)) * vel,", "= inv_r3 > 0 np.power(inv_r3, -1.5, out=inv_r3, where=I) ax =", "inv_r = np.sqrt(dx**2 + dy**2 + dz**2) # inv_r[inv_r>0] =", "+= dt # get energy of system KE[i + 1],", "-np.multiply.outer(mass, mass) * inv_r PE = 0.0 for j in", "= inv_r3[inv_r3>0]**(-1.5) I = inv_r3 > 0 np.power(inv_r3, -1.5, out=inv_r3,", "all particle pairwise particle separations inv_r3 = (dx**2 + dy**2", "z) # matrix that stores 1/r for all particle pairwise", "= z.T - z # dx = np.transpose(x) - x", "# out[:, 2] = c @dc.program def getAcc(pos: dc.float64[N, 3],", "= np.hstack((ax,ay,az)) a = np.ndarray((N, 3), dtype=np.float64) # hstack(a, ax,", "on each particle due to Newton's Law pos is an", "of velocities mass is an N x 1 vector of", "x.T - x # dy = y.T - y #", "= pos[:, 0:1] y = pos[:, 1:2] z = pos[:,", "update time t += dt # get energy of system", "dc.float64[N]): # out[:, 0] = a # out[:, 1] =", "all pairwise particle separations: r_j - r_i # dx =", "dz = z.T - z # dx = np.transpose(x) -", "# drift pos += vel * dt # update accelerations", "is Newton's Gravitational constant KE is the kinetic energy of", "mass, G, softening) # (1/2) kick vel += acc *", "separations inv_r = np.sqrt(dx**2 + dy**2 + dz**2) # inv_r[inv_r>0]", "1 vector of masses G is Newton's Gravitational constant KE", "dx = np.transpose(x) - x # dy = np.transpose(y) -", "G * np.sum(np.triu(-(mass*mass.T)*inv_r,1)) tmp = -np.multiply.outer(mass, mass) * inv_r PE", "G is Newton's Gravitational constant softening is the softening length", "orbits of stars interacting due to gravity Code calculates pairwise", "simulation pos is N x 3 matrix of positions vel", "masses G is Newton's Gravitational constant KE is the kinetic", "drift pos += vel * dt # update accelerations acc[:]", "* np.sum(np.sum( mass * vel**2 )) # KE = 0.5", "constant KE is the kinetic energy of the system PE", "Create Your Own N-body Simulation (With Python) <NAME> (2020) Princeton", "mass is an N x 1 vector of masses G", "kinetic energy of the system PE is the potential energy", "a: dc.float64[N], # b: dc.float64[N], c: dc.float64[N]): # out[:, 0]", "3], dt: dc.float64, G: dc.float64, softening: dc.float64): # Convert to", "update accelerations acc[:] = getAcc(pos, mass, G, softening) # (1/2)", "calculates pairwise forces according to Newton's Law of Gravity \"\"\"", "KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2) #", "Simulate orbits of stars interacting due to gravity Code calculates", "particle due to Newton's Law pos is an N x", "z = pos[:, 2:3] # matrix that stores all pairwise", "count each interaction only once # PE = G *", "that stores 1/r^3 for all particle pairwise particle separations inv_r3", "mass * vel**2 )) # KE = 0.5 * np.sum(", "0 np.divide(1.0, inv_r, out=inv_r, where=I) # sum over upper triangle,", "KE, PE @dc.program def nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel:", "+ 1, dtype=np.float64) PE = np.ndarray(Nt + 1, dtype=np.float64) KE[0],", "= np.ndarray(Nt + 1, dtype=np.float64) PE = np.ndarray(Nt + 1,", "dz = np.transpose(z) - z dx = np.add.outer(-x, x) dy", "acc[:] = getAcc(pos, mass, G, softening) # (1/2) kick vel", "a @dc.program def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3], mass:", "triangle, to count each interaction only once # PE =", "* inv_r3) @ mass az = G * (dz *", "of masses G is Newton's Gravitational constant KE is the", "@dc.program # def hstack(out: dc.float64[N, 3], a: dc.float64[N], # b:", "calculate initial energy of system KE = np.ndarray(Nt + 1,", "1], PE[i + 1] = getEnergy(pos, vel, mass, G) return", "= np.transpose(x) - x # dy = np.transpose(y) - y", "('N', 'Nt')) # @dc.program # def hstack(out: dc.float64[N, 3], a:", "dc.float64[N], # b: dc.float64[N], c: dc.float64[N]): # out[:, 0] =", "matrix of velocities mass is an N x 1 vector", "\"\"\" Get kinetic energy (KE) and potential energy (PE) of", "= pos[:, 1:2] z = pos[:, 2:3] # matrix that", "G: dc.float64, softening: dc.float64): # Convert to Center-of-Mass frame #", "t = 0.0 # Simulation Main Loop for i in", "G) t = 0.0 # Simulation Main Loop for i", "initial gravitational accelerations acc = getAcc(pos, mass, G, softening) #", "(N, 1)) * vel, axis=0), np.mean(mass)) np.subtract(vel, np.mean(np.reshape(mass, (N, 1))", "\"\"\" # Kinetic Energy: # KE = 0.5 * np.sum(np.sum(", "Law of Gravity \"\"\" N, Nt = (dc.symbol(s, dtype=dc.int64) for", "Get kinetic energy (KE) and potential energy (PE) of simulation", "dy = y.T - y # dz = z.T -", "the kinetic energy of the system PE is the potential", "G return KE, PE @dc.program def nbody(mass: dc.float64[N], pos: dc.float64[N,", "initial energy of system KE = np.ndarray(Nt + 1, dtype=np.float64)", "the system PE is the potential energy of the system", "# b: dc.float64[N], c: dc.float64[N]): # out[:, 0] = a", ") KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)", "acc * dt / 2.0 # update time t +=", "G: dc.float64): \"\"\" Get kinetic energy (KE) and potential energy", "G, softening) # calculate initial energy of system KE =", "Nt = (dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt')) #", "KE = np.ndarray(Nt + 1, dtype=np.float64) PE = np.ndarray(Nt +", "a = np.hstack((ax,ay,az)) a = np.ndarray((N, 3), dtype=np.float64) # hstack(a,", "1)) * vel, axis=0) / np.mean(mass) # tmp = np.divide(np.mean(np.reshape(mass,", "an N x 3 matrix of positions mass is an", "of positions vel is N x 3 matrix of velocities", "inv_r, out=inv_r, where=I) # sum over upper triangle, to count", "TODO: Add GPL-3.0 License import numpy as np import dace", "PE @dc.program def nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N," ]
[ "instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True) database.init_app(application) from application.init.views import blueprint application.register_blueprint(blueprint)", "SQLAlchemy configurations = { 'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration',", "database = SQLAlchemy() def create_application(configuration): application = Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration])", "import Flask from flask_sqlalchemy import SQLAlchemy configurations = { 'development':", "Flask from flask_sqlalchemy import SQLAlchemy configurations = { 'development': 'configurations.DevelopmentConfiguration',", "'configurations.ProductionConfiguration' } database = SQLAlchemy() def create_application(configuration): application = Flask(__name__,", "from flask import Flask from flask_sqlalchemy import SQLAlchemy configurations =", "'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' } database = SQLAlchemy()", "'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' } database = SQLAlchemy() def", "{ 'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' }", "SQLAlchemy() def create_application(configuration): application = Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True)", "def create_application(configuration): application = Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True) database.init_app(application)", "flask_sqlalchemy import SQLAlchemy configurations = { 'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration',", "flask import Flask from flask_sqlalchemy import SQLAlchemy configurations = {", "'production': 'configurations.ProductionConfiguration' } database = SQLAlchemy() def create_application(configuration): application =", "silent=True) database.init_app(application) from application.init.views import blueprint application.register_blueprint(blueprint) from application.metadata.views import", "= Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True) database.init_app(application) from application.init.views import", "'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' } database = SQLAlchemy() def create_application(configuration): application", "<gh_stars>0 # Third-party imports from flask import Flask from flask_sqlalchemy", "'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' } database", "application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True) database.init_app(application) from application.init.views import blueprint application.register_blueprint(blueprint) from", "from flask_sqlalchemy import SQLAlchemy configurations = { 'development': 'configurations.DevelopmentConfiguration', 'testing':", "configurations = { 'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production':", "Third-party imports from flask import Flask from flask_sqlalchemy import SQLAlchemy", "Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True) database.init_app(application) from application.init.views import blueprint", "from application.init.views import blueprint application.register_blueprint(blueprint) from application.metadata.views import blueprint application.register_blueprint(blueprint)", "'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' } database = SQLAlchemy() def create_application(configuration):", "= { 'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration'", "import SQLAlchemy configurations = { 'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging':", "application.init.views import blueprint application.register_blueprint(blueprint) from application.metadata.views import blueprint application.register_blueprint(blueprint) return", "} database = SQLAlchemy() def create_application(configuration): application = Flask(__name__, instance_relative_config=True)", "application = Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True) database.init_app(application) from application.init.views", "= SQLAlchemy() def create_application(configuration): application = Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py',", "import blueprint application.register_blueprint(blueprint) from application.metadata.views import blueprint application.register_blueprint(blueprint) return application", "'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' } database =", "application.config.from_pyfile('configuration.py', silent=True) database.init_app(application) from application.init.views import blueprint application.register_blueprint(blueprint) from application.metadata.views", "# Third-party imports from flask import Flask from flask_sqlalchemy import", "database.init_app(application) from application.init.views import blueprint application.register_blueprint(blueprint) from application.metadata.views import blueprint", "imports from flask import Flask from flask_sqlalchemy import SQLAlchemy configurations", "create_application(configuration): application = Flask(__name__, instance_relative_config=True) application.config.from_object(configurations[configuration]) application.config.from_pyfile('configuration.py', silent=True) database.init_app(application) from" ]
[ "} print(\"I wanna drink 1 bottle of beer...\") take_beer(fridge) print(\"Oooh,", "{}. Let's continue\".format(e)) print(\"Fallback. Try to take 1 bottle of", "take_beer(fridge, number=1): if \"beer\" not in fridge: raise Exception(\"No beer", "2 bottle of beer...\") try: take_beer(fridge, 2) except Exception as", "fridge[\"beer\"] -= number if __name__ == \"__main__\": fridge = {", "if __name__ == \"__main__\": fridge = { \"beer\": 2, \"milk\":", "def take_beer(fridge, number=1): if \"beer\" not in fridge: raise Exception(\"No", "print(\"Error: {}. Let's continue\".format(e)) print(\"Fallback. Try to take 1 bottle", "\"beer\": 2, \"milk\": 1, \"meat\": 3, } print(\"I wanna drink", "\"__main__\": fridge = { \"beer\": 2, \"milk\": 1, \"meat\": 3,", "Exception(\"No beer at all:(\") if number > fridge[\"beer\"]: raise Exception(\"Not", "fridge: raise Exception(\"No beer at all:(\") if number > fridge[\"beer\"]:", "bottle of beer...\") take_beer(fridge) print(\"Oooh, great!\") print(\"I wanna drink 2", "beer...\") try: take_beer(fridge, 2) except Exception as e: print(\"Error: {}.", "fridge = { \"beer\": 2, \"milk\": 1, \"meat\": 3, }", "try: take_beer(fridge, 2) except Exception as e: print(\"Error: {}. Let's", "> fridge[\"beer\"]: raise Exception(\"Not enough beer:(\") fridge[\"beer\"] -= number if", "Exception as e: print(\"Error: {}. Let's continue\".format(e)) print(\"Fallback. Try to", "all:(\") if number > fridge[\"beer\"]: raise Exception(\"Not enough beer:(\") fridge[\"beer\"]", "beer at all:(\") if number > fridge[\"beer\"]: raise Exception(\"Not enough", "take_beer(fridge, 2) except Exception as e: print(\"Error: {}. Let's continue\".format(e))", "beer...\") take_beer(fridge) print(\"Oooh, great!\") print(\"I wanna drink 2 bottle of", "in fridge: raise Exception(\"No beer at all:(\") if number >", "Try to take 1 bottle of beer...\") take_beer(fridge, 1) print(\"Oooh,", "print(\"Fallback. Try to take 1 bottle of beer...\") take_beer(fridge, 1)", "number if __name__ == \"__main__\": fridge = { \"beer\": 2,", "print(\"I wanna drink 2 bottle of beer...\") try: take_beer(fridge, 2)", "\"milk\": 1, \"meat\": 3, } print(\"I wanna drink 1 bottle", "\"meat\": 3, } print(\"I wanna drink 1 bottle of beer...\")", "Exception(\"Not enough beer:(\") fridge[\"beer\"] -= number if __name__ == \"__main__\":", "e: print(\"Error: {}. Let's continue\".format(e)) print(\"Fallback. Try to take 1", "to take 1 bottle of beer...\") take_beer(fridge, 1) print(\"Oooh, awesome!\")", "\"beer\" not in fridge: raise Exception(\"No beer at all:(\") if", "Let's continue\".format(e)) print(\"Fallback. Try to take 1 bottle of beer...\")", "at all:(\") if number > fridge[\"beer\"]: raise Exception(\"Not enough beer:(\")", "1 bottle of beer...\") take_beer(fridge) print(\"Oooh, great!\") print(\"I wanna drink", "fridge[\"beer\"]: raise Exception(\"Not enough beer:(\") fridge[\"beer\"] -= number if __name__", "2) except Exception as e: print(\"Error: {}. Let's continue\".format(e)) print(\"Fallback.", "great!\") print(\"I wanna drink 2 bottle of beer...\") try: take_beer(fridge,", "wanna drink 2 bottle of beer...\") try: take_beer(fridge, 2) except", "of beer...\") try: take_beer(fridge, 2) except Exception as e: print(\"Error:", "drink 2 bottle of beer...\") try: take_beer(fridge, 2) except Exception", "enough beer:(\") fridge[\"beer\"] -= number if __name__ == \"__main__\": fridge", "3, } print(\"I wanna drink 1 bottle of beer...\") take_beer(fridge)", "beer:(\") fridge[\"beer\"] -= number if __name__ == \"__main__\": fridge =", "not in fridge: raise Exception(\"No beer at all:(\") if number", "if \"beer\" not in fridge: raise Exception(\"No beer at all:(\")", "__name__ == \"__main__\": fridge = { \"beer\": 2, \"milk\": 1,", "== \"__main__\": fridge = { \"beer\": 2, \"milk\": 1, \"meat\":", "print(\"Oooh, great!\") print(\"I wanna drink 2 bottle of beer...\") try:", "2, \"milk\": 1, \"meat\": 3, } print(\"I wanna drink 1", "raise Exception(\"No beer at all:(\") if number > fridge[\"beer\"]: raise", "= { \"beer\": 2, \"milk\": 1, \"meat\": 3, } print(\"I", "1, \"meat\": 3, } print(\"I wanna drink 1 bottle of", "take_beer(fridge) print(\"Oooh, great!\") print(\"I wanna drink 2 bottle of beer...\")", "as e: print(\"Error: {}. Let's continue\".format(e)) print(\"Fallback. Try to take", "except Exception as e: print(\"Error: {}. Let's continue\".format(e)) print(\"Fallback. Try", "number > fridge[\"beer\"]: raise Exception(\"Not enough beer:(\") fridge[\"beer\"] -= number", "of beer...\") take_beer(fridge) print(\"Oooh, great!\") print(\"I wanna drink 2 bottle", "wanna drink 1 bottle of beer...\") take_beer(fridge) print(\"Oooh, great!\") print(\"I", "bottle of beer...\") try: take_beer(fridge, 2) except Exception as e:", "-= number if __name__ == \"__main__\": fridge = { \"beer\":", "print(\"I wanna drink 1 bottle of beer...\") take_beer(fridge) print(\"Oooh, great!\")", "{ \"beer\": 2, \"milk\": 1, \"meat\": 3, } print(\"I wanna", "continue\".format(e)) print(\"Fallback. Try to take 1 bottle of beer...\") take_beer(fridge,", "drink 1 bottle of beer...\") take_beer(fridge) print(\"Oooh, great!\") print(\"I wanna", "if number > fridge[\"beer\"]: raise Exception(\"Not enough beer:(\") fridge[\"beer\"] -=", "number=1): if \"beer\" not in fridge: raise Exception(\"No beer at", "raise Exception(\"Not enough beer:(\") fridge[\"beer\"] -= number if __name__ ==" ]
[]
[ "merge the results together. # # We size the non-members", "filter used to fetch state from the database. cache_seq_num_members (int):", "2.0 (the \"License\"); # you may not use this file", "stuff we pulled out of the database. for group, group_state_dict", "= ?\", ((sg,) for sg in state_groups_to_delete), ) @defer.inlineCallbacks def", "all state (which DictionaryCache will similarly handle fine) # and", "= v else: state_dict_non_members[k] = v self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members,", "self._state_group_cache, state_filter=non_member_filter ) ( member_state, incomplete_groups_m, ) = yield self._get_state_for_groups_using_cache(", "group, group_state_dict in group_to_state_dict.items(): state_dict_members = {} state_dict_non_members = {}", "def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() ):", "curr_state[sg] self.db.simple_delete_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": sg} ) self.db.simple_delete_txn( txn, table=\"state_group_edges\",", "else: self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group, \"room_id\": room_id,", "in chunks: res = yield self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk, state_filter,", ") # And finally update the result dict, by filtering", "state_groups_state WHERE state_group = ?\", ((sg,) for sg in state_groups_to_delete),", "in state_dict_ids and key not in known_absent: missing_types = True", "state (which DictionaryCache will similarly handle fine) # and then", "list of state groups for which we want to get", "# We don't know if we fetched all the state", "or want everything, either way # `is_all` tells us whether", "fields, apart from this subset\". # This is problematic when", "iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",), ) remaining_state_groups = { row[\"state_group\"] for row", "state groups states logger.info(\"[purge] removing %s from state_groups_state\", room_id) self.db.simple_delete_many_txn(", "requires this behaviour, # as without it the cache has", "there are wild # cards in the filter, but that's", "\"[purge] de-delta-ing %i remaining state groups\", len(remaining_state_groups), ) # Now", "(dict): The state to store. Map of (type, state_key) to", "under the License. import logging from collections import namedtuple from", "non-members cache to be smaller than the members cache as", "got_all = self._get_state_for_group_using_cache( cache, group, state_filter ) results[group] = state_dict_ids", "try to return a previous group and a delta between", "of users\", as DictionaryCache's API isn't # rich enough to", "filter used to fetch state from the database. Returns 2-tuple", "self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids), ) current_non_member_state_ids = { s: ev", "\"\"\" results = {} chunks = [groups[i : i +", "of state groups, optionally filtering by type/state_key Args: groups: list", "DictionaryCache's API isn't # rich enough to say \"please cache", "key[1], \"event_id\": state_id, } for key, state_id in delta_ids.items() ],", ") potential_hops = self._count_state_group_hops_txn(txn, prev_group) if prev_group and potential_hops <", "prev_group: return _GetStateGroupDelta(None, None) delta_ids = self.db.simple_select_list_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\":", "tracking non-member events, # and the other for tracking member_events.", "found %i state groups to delete\", len(state_groups_to_delete) ) rows =", "any of our requested types are missing from the cache", "current_state_ids.items() if s[0] != EventTypes.Member } txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group,", "state for this group, but # limit members to this", "keyvalues={\"state_group\": sg} ) self.db.simple_delete_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": sg} ) self.db.simple_insert_many_txn(", "the database. for group, group_state_dict in group_to_state_dict.items(): # We just", "= self._count_state_group_hops_txn(txn, prev_group) if prev_group and potential_hops < MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn(", "which negates the efficiency being sought. # # Rather than", "rich enough to say \"please cache any of these fields,", "`current_state_ids`, if `prev_group` was given. Same format as `current_state_ids`. current_state_ids", "state groups logger.info(\"[purge] removing %s from state_groups\", room_id) self.db.simple_delete_many_txn( txn,", "return results def _get_state_for_group_using_cache(self, cache, group, state_filter): \"\"\"Checks if group", "License for the specific language governing permissions and # limitations", "cache, group, state_filter ) results[group] = state_dict_ids if not got_all:", "_GetStateGroupDelta(None, None) delta_ids = self.db.simple_select_list_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": state_group}, retcols=(\"type\",", "state = dict(non_member_state) for group in groups: state[group].update(member_state[group]) # Now", "the old and the new. Returns: (prev_group, delta_ids), where both", "to non delta versions. for sg in remaining_state_groups: logger.info(\"[purge] de-delta-ing", "values=[ { \"state_group\": state_group, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1],", "state was calculated room_id (str) prev_group (int|None): A previous state", "This means that lazy loading # queries can be made", "another update) current_member_state_ids = { s: ev for (s, ev)", "Returns: Tuple of dict of state_group_id to state map of", "= False if state_filter.has_wildcards(): # We don't know if we", "incomplete_groups_m | incomplete_groups_nm if not incomplete_groups: return state cache_sequence_nm =", "prev_group) if prev_group and potential_hops < MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn, table=\"state_group_edges\",", "member cache since last lookup in cache cache_seq_num_non_members (int): Sequence", "remaining_state_groups: logger.info(\"[purge] de-delta-ing remaining state group %s\", sg) curr_state =", "optionally filtering by type/state_key Args: groups: list of state groups", "in range(0, len(groups), 100)] for chunk in chunks: res =", "groups: list of state groups for which we want to", "else: # `concrete_types()` will only return a subset when there", "if `prev_group` was given. Same format as `current_state_ids`. current_state_ids (dict):", "\"event_id\"), ) return _GetStateGroupDelta( prev_group, {(row[\"type\"], row[\"state_key\"]): row[\"event_id\"] for row", "will only return a subset when there are wild #", "for row in delta_ids}, ) return self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn )", "synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from synapse.storage.database import Database from synapse.storage.state import", "\"[purge] found %i state groups to delete\", len(state_groups_to_delete) ) rows", "when there are wild # cards in the filter, but", "room_id, state_groups_to_delete, ) def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete): # first", "to state map. \"\"\" results = {} chunks = [groups[i", "all member events member_types = None else: # `concrete_types()` will", "at `prev_group` and `current_state_ids`, if `prev_group` was given. Same format", "if s[0] != EventTypes.Member } txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids),", "self.db.simple_insert_txn( txn, table=\"state_group_edges\", values={\"state_group\": state_group, \"prev_state_group\": prev_group}, ) self.db.simple_insert_many_txn( txn,", "we turn the state groups that reference to-be-deleted state #", "\"\"\" def _store_state_group_txn(txn): if current_state_ids is None: # AFAIK, this", "returns the # complete list of event types we're wanting.", "from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache logger =", "me all the state for this group, but # limit", "purge_room_state(self, room_id, state_groups_to_delete): \"\"\"Deletes all record of a room from", "key, state_id in delta_ids.items() ], ) else: self.db.simple_insert_many_txn( txn, table=\"state_groups_state\",", "through - either the normal state cache or the specific", "SQLBaseStore from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from synapse.storage.database import Database from", "OF ANY KIND, either express or implied. # See the", "See the License for the specific language governing permissions and", "True break return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks def _get_state_for_groups( self,", "set of state, returning a newly assigned state group. Args:", "to in writing, software # distributed under the License is", "of state, returning a newly assigned state group. Args: event_id", "sequence like this as the state group map # is", "as the state group map # is immutable. (If the", "import Database from synapse.storage.state import StateFilter from synapse.types import StateMap", ") if not prev_group: return _GetStateGroupDelta(None, None) delta_ids = self.db.simple_select_list_txn(", "from state group to state dict state_filter (StateFilter): The state", "None\") state_group = self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn, table=\"state_groups\", values={\"id\": state_group, \"room_id\":", "or agreed to in writing, software # distributed under the", "state at each of a list of state groups, optionally", "# We size the non-members cache to be smaller than", "} for key, state_id in delta_ids.items() ], ) else: self.db.simple_insert_many_txn(", "state, returning a newly assigned state group. Args: event_id (str):", "_GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\")) ): \"\"\"Return type of get_state_group_delta that", "where both may be None. \"\"\" def _get_state_group_delta_txn(txn): prev_group =", "group map # is immutable. (If the map wasn't immutable", "caching \"\"\" __slots__ = [] def __len__(self): return len(self.delta_ids) if", "room_id (str) prev_group (int|None): A previous state group for the", "return self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn ) @defer.inlineCallbacks def _get_state_groups_from_groups( self, groups:", "# would query the members cache for a specific subset", "state_groups_to_delete): # first we have to delete the state groups", "compliance with the License. # You may obtain a copy", "super(StateGroupDataStore, self).__init__(database, db_conn, hs) # Originally the state store used", "\"\"\" return self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, ) def _purge_unreferenced_state_groups(self,", "# `concrete_types()` will only return a subset when there are", "= yield self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\",", "{ s: ev for (s, ev) in current_state_ids.items() if s[0]", "self._state_group_members_cache, state_filter=member_filter ) state = dict(non_member_state) for group in groups:", "False if state_filter.has_wildcards(): # We don't know if we fetched", "txn, table=\"state_groups\", values={\"id\": state_group, \"room_id\": room_id, \"event_id\": event_id}, ) #", "set() for group in set(groups): state_dict_ids, got_all = self._get_state_for_group_using_cache( cache,", "state_id, } for key, state_id in current_state_ids.items() ], ) #", "# ... and the state group edges logger.info(\"[purge] removing %s", "be in the same room). state_groups_to_delete (Collection[int]): Set of all", "by type/state_key, querying from a specific cache. Args: groups: list", "= v self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members,", "everything. return state_filter.filter_state(state_dict_ids), is_all # tracks whether any of our", "Set, Tuple from twisted.internet import defer from synapse.api.constants import EventTypes", "the cache missing_types = False if state_filter.has_wildcards(): # We don't", "self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members,", "return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks def _get_state_for_groups( self, groups: Iterable[int],", "row in rows if row[\"state_group\"] not in state_groups_to_delete } logger.info(", "non-member events, # and the other for tracking member_events. This", "\"state_key\": key[1], \"event_id\": state_id, } for key, state_id in current_state_ids.items()", "txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids), ) current_non_member_state_ids = { s:", "} for key, state_id in curr_state.items() ], ) logger.info(\"[purge] removing", "not use this file except in compliance with the License.", "we need from the database anyway. state[group] = state_filter.filter_state(group_state_dict) return", "# # However, this poses problems when performing complicated queries", "state groups for a given set of groups from the", "was calculated room_id (str) prev_group (int|None): A previous state group", "table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",), ) remaining_state_groups = { row[\"state_group\"]", "And finally update the result dict, by filtering out any", "state tables Args: room_id (str): state_groups_to_delete (list[int]): State groups to", "you may not use this file except in compliance with", "range(0, len(groups), 100)] for chunk in chunks: res = yield", "`got_all`). `got_all` is a bool indicating if we successfully retrieved", "fetched_keys=non_member_types, ) def store_state_group( self, event_id, room_id, prev_group, delta_ids, current_state_ids", "keys for a # given dict which is an entry", "the chain # of deltas isn't tooo long, as otherwise", "in current_state_ids.items() ], ) # Prefill the state group caches", "{(row[\"type\"], row[\"state_key\"]): row[\"event_id\"] for row in delta_ids}, ) return self.db.runInteraction(", "# # We size the non-members cache to be smaller", "retcols=(\"state_group\",), ) remaining_state_groups = { row[\"state_group\"] for row in rows", "cache the # event IDs for the state types in", "OpenMarket Ltd # # Licensed under the Apache License, Version", "# We fetched all member events member_types = None else:", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", ") self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": sg, \"room_id\": room_id,", "previous state group. \"\"\" rows = yield self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\",", "isn't # rich enough to say \"please cache any of", "of group ids to state dicts which we will pass", "prev_group = self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\", allow_none=True, )", "synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from synapse.storage.database import", "# state events for the group, which negates the efficiency", "the cache of group ids to state dicts which we", "state group for the room, optional. delta_ids (dict|None): The delta", "groups for which we want to get the state. state_filter:", "hasn't been tuned yet 50000, ) self._state_group_members_cache = DictionaryCache( \"*stateGroupMembersCache*\",", "state group to previous state group. \"\"\" rows = yield", "the other for tracking member_events. This means that lazy loading", "`is_all` tells us whether we've gotten everything. return state_filter.filter_state(state_dict_ids), is_all", "\"delta_ids\")) ): \"\"\"Return type of get_state_group_delta that implements __len__, which", "dicts which we will pass through - either the normal", "ids either missing from the cache or incomplete. \"\"\" results", "the cache, if False we need to query the DB", "\"\"\" return self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn, room_id, state_groups_to_delete, ) def _purge_room_state_txn(self,", "if not incomplete_groups: return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m =", "we're wanting. for key in state_filter.concrete_types(): if key not in", "table=\"state_groups_state\", values=[ { \"state_group\": state_group, \"room_id\": room_id, \"type\": key[0], \"state_key\":", "= DictionaryCache( \"*stateGroupCache*\", # TODO: this hasn't been tuned yet", "from the DB for the # member vs non-member caches.", "two halves - one for tracking non-member events, # and", "the DB for the missing state. \"\"\" is_all, known_absent, state_dict_ids", "-> Tuple[Dict[int, StateMap[str]], Set[int]]: \"\"\"Gets the state at each of", "the state groups states logger.info(\"[purge] removing %s from state_groups_state\", room_id)", "for group, group_state_dict in group_to_state_dict.items(): state_dict_members = {} state_dict_non_members =", "governing permissions and # limitations under the License. import logging", "caches ( non_member_state, incomplete_groups_nm, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_cache,", "can never happen raise Exception(\"current_state_ids cannot be None\") state_group =", "state groups to delete. \"\"\" return self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id,", "in # the filter that are wildcards, so we have", "\"\"\" member_filter, non_member_filter = state_filter.get_member_split() # Now we look them", "state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence # Help the", "Args: event_id (str): The event ID for which the state", "pulled out of the database. for group, group_state_dict in group_to_state_dict.items():", "# rich enough to say \"please cache any of these", "all record of a room from state tables Args: room_id", "a given state group (i.e. a subset of the keys", "a bool indicating if we successfully retrieved all requests state", "group to state map. \"\"\" member_filter, non_member_filter = state_filter.get_member_split() #", "group, which negates the efficiency being sought. # # Rather", "Args: state_groups (Iterable[int]) Returns: Deferred[dict[int, int]]: mapping from state group", "state group. \"\"\" rows = yield self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups,", "from state_groups_state\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, )", "from synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util.caches.descriptors", "(Collection[int]): Set of all state groups to delete. \"\"\" return", "the database. cache_seq_num_members (int): Sequence number of member cache since", "retcol=\"id\", allow_none=True, ) if not is_in_db: raise Exception( \"Trying to", "state_filter.filter_state(group_state_dict) return state def _get_state_for_groups_using_cache( self, groups: Iterable[int], cache: DictionaryCache,", "= state_filter.filter_state(group_state_dict) return state def _get_state_for_groups_using_cache( self, groups: Iterable[int], cache:", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", ") self.db.simple_delete_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": sg} ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\",", "DictionaryCache( \"*stateGroupCache*\", # TODO: this hasn't been tuned yet 50000,", "and a delta between the old and the new. Returns:", "state_filter=db_state_filter ) # Now lets update the caches self._insert_into_cache( group_to_state_dict,", "was given. Same format as `current_state_ids`. current_state_ids (dict): The state", "will pass through - either the normal state cache or", "row in rows} def purge_room_state(self, room_id, state_groups_to_delete): \"\"\"Deletes all record", "cache or incomplete. \"\"\" results = {} incomplete_groups = set()", "def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete): logger.info( \"[purge] found %i state", "# This is problematic when lazy loading members, which requires", "# Rather than overcomplicating DictionaryCache's API, we instead split the", "{} for k, v in group_state_dict.items(): if k[0] == EventTypes.Member:", "cache. Args: group_to_state_dict (dict): The new entries pulled from database.", "performing complicated queries # on the store - for instance:", "Sequence number of member cache since last lookup in cache", "to state map of entries in the cache, and the", "replace any existing entries, as we will have loaded #", "all non member events non_member_types = None else: non_member_types =", "mapping from state group to previous state group. \"\"\" rows", "= self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence # Help the cache hit", "to previous state group. \"\"\" rows = yield self.db.simple_select_many_batch( table=\"state_group_edges\",", "file except in compliance with the License. # You may", "API, we instead split the # state_group_cache into two halves", "\"\"\" def _get_state_group_delta_txn(txn): prev_group = self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": state_group},", "all the state keys for the types in # the", "# However, this poses problems when performing complicated queries #", "See `_get_state_for_groups` Args: cache(DictionaryCache): the state group cache to use", "given set of groups from the database, filtering on types", "state_filter): \"\"\"Checks if group is in cache. See `_get_state_for_groups` Args:", "\"DELETE FROM state_groups WHERE id = ?\", ((sg,) for sg", "query the DB for the missing state. \"\"\" is_all, known_absent,", "Returns: Deferred[dict[int, int]]: mapping from state group to previous state", "key not in known_absent: missing_types = True break return state_filter.filter_state(state_dict_ids),", "groups: state[group].update(member_state[group]) # Now fetch any missing groups from the", "__len__, which lets us use the itrable flag when caching", "key=group, value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types, )", "ev) in current_state_ids.items() if s[0] == EventTypes.Member } txn.call_after( self._state_group_members_cache.update,", "together. # # We size the non-members cache to be", "of a list of state groups, optionally filtering by type/state_key", "for a given state group ID). # # However, this", "as we will have loaded # everything we need from", "< MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn, table=\"state_group_edges\", values={\"state_group\": state_group, \"prev_state_group\": prev_group}, )", "!= EventTypes.Member } txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids), ) return", "everything, either way # `is_all` tells us whether we've gotten", "- either the normal state cache or the specific members", "of using a DictionaryCache is that it can cache a", "return self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, ) def _purge_unreferenced_state_groups(self, txn,", "2-tuple (`state_dict`, `got_all`). `got_all` is a bool indicating if we", "language governing permissions and # limitations under the License. import", "a state group try to return a previous group and", "We need to work out which types we've fetched from", "@cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): \"\"\"Given a state group try", "we want to get the state. cache: the cache of", "longer referenced state groups and de-deltas any state groups that", "state_filter: StateFilter ) -> Tuple[Dict[int, StateMap[str]], Set[int]]: \"\"\"Gets the state", "to delete the state groups states logger.info(\"[purge] removing %s from", "to use the sequence like this as the state group", "(e.g. when we have wild cards) member_filter, non_member_filter = state_filter.get_member_split()", "= member_filter.concrete_types() if non_member_filter.is_full(): # We fetched all non member", "member_filter, non_member_filter = state_filter.get_member_split() if member_filter.is_full(): # We fetched all", "state groups that reference to-be-deleted state # groups to non", "v in group_state_dict.items(): if k[0] == EventTypes.Member: state_dict_members[k] = v", "we've fetched from the DB for the # member vs", "in rows if row[\"state_group\"] not in state_groups_to_delete } logger.info( \"[purge]", "s[0] == EventTypes.Member } txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids), )", "rows if row[\"state_group\"] not in state_groups_to_delete } logger.info( \"[purge] de-delta-ing", "column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and the state group", "KIND, either express or implied. # See the License for", "= self.db.simple_select_many_txn( txn, table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",), ) remaining_state_groups", "state filter used to fetch state from the database. Returns:", "list of state groups, optionally filtering by type/state_key Args: groups:", "incomplete_groups_nm if not incomplete_groups: return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m", "to return a previous group and a delta between the", "the state group map # is immutable. (If the map", "smaller than the members cache as the # vast majority", "s[0] != EventTypes.Member } txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids), )", "apart from this subset\". # This is problematic when lazy", "problematic when lazy loading members, which requires this behaviour, #", "not missing_types @defer.inlineCallbacks def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter", "key[1], \"event_id\": state_id, } for key, state_id in curr_state.items() ],", "self).__init__(database, db_conn, hs) # Originally the state store used a", "that reference to-be-deleted state # groups to non delta versions.", "None) delta_ids = self.db.simple_select_list_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": state_group}, retcols=(\"type\", \"state_key\",", "self._count_state_group_hops_txn(txn, prev_group) if prev_group and potential_hops < MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn,", "lookup state_filter (StateFilter): The state filter used to fetch state", "group. \"\"\" rows = yield self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups, keyvalues={},", "(today) is member events. self._state_group_cache = DictionaryCache( \"*stateGroupCache*\", # TODO:", "(the \"License\"); # you may not use this file except", "specific cache. Args: groups: list of state groups for which", "a DictionaryCache is that it can cache a subset #", "self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter ) ( member_state, incomplete_groups_m, ) =", "tracking member_events. This means that lazy loading # queries can", "logging from collections import namedtuple from typing import Dict, Iterable,", "get_state_group_delta(self, state_group): \"\"\"Given a state group try to return a", "up in the member and non-member caches ( non_member_state, incomplete_groups_nm,", "\"\"\"Inserts results from querying the database into the relevant cache.", "for the example above, you # would query the members", "= { s: ev for (s, ev) in current_state_ids.items() if", "= set() for group in set(groups): state_dict_ids, got_all = self._get_state_for_group_using_cache(", "got_all: incomplete_groups.add(group) return results, incomplete_groups def _insert_into_cache( self, group_to_state_dict, state_filter,", "prev_group}, ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group, \"room_id\":", "missing groups from the database incomplete_groups = incomplete_groups_m | incomplete_groups_nm", "groups to delete \"\"\" return self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn, room_id, state_groups_to_delete,", "need from the database anyway. state[group] = state_filter.filter_state(group_state_dict) return state", "# # Unless required by applicable law or agreed to", "txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids), ) return state_group return self.db.runInteraction(\"store_state_group\",", "state group cache to use group(int): The state group to", "self._state_group_members_cache = DictionaryCache( \"*stateGroupMembersCache*\", 500000, ) @cached(max_entries=10000, iterable=True) def get_state_group_delta(self,", "(type, state_key) to event_id. Returns: Deferred[int]: The state group ID", "keyvalues={}, ) # ... and the state group edges logger.info(\"[purge]", "of dict of state_group_id to state map of entries in", "types in # the filter that are wildcards, so we", "then this prefill could # race with another update) current_member_state_ids", "cache for all state (which DictionaryCache will similarly handle fine)", "database. Returns: Tuple of dict of state_group_id to state map", "iterable=state_groups_to_delete, keyvalues={}, ) # ... and the state groups logger.info(\"[purge]", "database. cache_seq_num_members (int): Sequence number of member cache since last", "problems when performing complicated queries # on the store -", "cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types, ) def store_state_group( self, event_id, room_id,", "} txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids), ) current_non_member_state_ids = {", "that we may # have missed some. missing_types = True", "v else: state_dict_non_members[k] = v self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types,", "loading # queries can be made in a cache-friendly manner", "without it the cache has no choice but to speculatively", "Returns: (prev_group, delta_ids), where both may be None. \"\"\" def", "implied. # See the License for the specific language governing", "\"\"\"Fetch the previous groups of the given state groups. Args:", "Args: groups: list of state groups for which we want", "not incomplete_groups: return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence", "state group (i.e. a subset of the keys for a", "in group_to_state_dict.items(): # We just replace any existing entries, as", "ev for (s, ev) in current_state_ids.items() if s[0] == EventTypes.Member", "column=\"prev_state_group\", iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\", ) return {row[\"state_group\"]: row[\"prev_state_group\"]", "state. state_filter: The state filter used to fetch state from", "of (type, state_key) to event_id. Returns: Deferred[int]: The state group", "synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache logger = logging.getLogger(__name__)", "the state group ids either missing from the cache or", "self._get_state_groups_from_groups_txn(txn, [sg]) curr_state = curr_state[sg] self.db.simple_delete_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": sg}", "# given dict which is an entry in the cache", "groups that reference them. Args: room_id: The room the state", "member_state, incomplete_groups_m, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter )", "state groups. Args: state_groups (Iterable[int]) Returns: Deferred[dict[int, int]]: mapping from", "for group, group_state_dict in group_to_state_dict.items(): # We just replace any", "when performing complicated queries # on the store - for", "However, this poses problems when performing complicated queries # on", "for the # member vs non-member caches. This should be", "(str) prev_group (int|None): A previous state group for the room,", "logger.info(\"[purge] removing %s from state_group_edges\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_group_edges\", column=\"state_group\",", "import defer from synapse.api.constants import EventTypes from synapse.storage._base import SQLBaseStore", "whether any of our requested types are missing from the", "wild # cards in the filter, but that's fine. member_types", "events member_types = None else: # `concrete_types()` will only return", "state_groups_to_delete): logger.info( \"[purge] found %i state groups to delete\", len(state_groups_to_delete)", "(which DictionaryCache will similarly handle fine) # and then just", "and key not in known_absent: missing_types = True break return", "be an underestimate (e.g. when we have wild cards) member_filter,", "used to fetch state from the database. Returns: Tuple of", "self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, ) def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete): logger.info(", "state group to avoid hammering # on the state_group* tables.", "], ) logger.info(\"[purge] removing redundant state groups\") txn.executemany( \"DELETE FROM", "Either we have everything or want everything, either way #", "So for the example above, you # would query the", "txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group, \"room_id\": room_id, \"type\": key[0],", "the # vast majority of state in Matrix (today) is", "filter used to fetch state from the database. Returns: Deferred[Dict[int,", "It's fine to use the sequence like this as the", "Database from synapse.storage.state import StateFilter from synapse.types import StateMap from", "Unless required by applicable law or agreed to in writing,", "hammering # on the state_group* tables. # # The point", "set(groups): state_dict_ids, got_all = self._get_state_for_group_using_cache( cache, group, state_filter ) results[group]", "txn, table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and the", "events. self._state_group_cache = DictionaryCache( \"*stateGroupCache*\", # TODO: this hasn't been", "the members cache for a specific subset of state keys", "some. missing_types = True else: # There aren't any wild", "the database anyway. state[group] = state_filter.filter_state(group_state_dict) return state def _get_state_for_groups_using_cache(", "groups to delete\", len(state_groups_to_delete) ) rows = self.db.simple_select_many_txn( txn, table=\"state_group_edges\",", "group_to_state_dict.items(): # We just replace any existing entries, as we", ") # ... and the state group edges logger.info(\"[purge] removing", "keys for the types in # the filter that are", "keys # (which DictionaryCache will handle efficiently and fine) and", "cache_seq_num_non_members, ): \"\"\"Inserts results from querying the database into the", "which lets us use the itrable flag when caching \"\"\"", "the specific language governing permissions and # limitations under the", "for a # given dict which is an entry in", "= yield self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter ) state = dict(non_member_state)", "False we need to query the DB for the missing", "events for a given state group (i.e. a subset of", "txn.executemany( \"DELETE FROM state_groups_state WHERE state_group = ?\", ((sg,) for", "delete. \"\"\" return self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, ) def", "state_group): \"\"\"Given a state group try to return a previous", "non_member_filter.concrete_types() for group, group_state_dict in group_to_state_dict.items(): state_dict_members = {} state_dict_non_members", "and # limitations under the License. import logging from collections", "self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn ) @defer.inlineCallbacks def _get_state_groups_from_groups( self, groups: List[int],", "for the types in # the filter that are wildcards,", "= logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\"))", "in remaining_state_groups: logger.info(\"[purge] de-delta-ing remaining state group %s\", sg) curr_state", "permissions and # limitations under the License. import logging from", "of our requested types are missing from the cache missing_types", "state_groups (Iterable[int]) Returns: Deferred[dict[int, int]]: mapping from state group to", "ev for (s, ev) in current_state_ids.items() if s[0] != EventTypes.Member", "@defer.inlineCallbacks def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()", "import EventTypes from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore", "state_groups_to_delete), ) @defer.inlineCallbacks def get_previous_state_groups(self, state_groups): \"\"\"Fetch the previous groups", "values={\"state_group\": state_group, \"prev_state_group\": prev_group}, ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ {", "_store_state_group_txn(txn): if current_state_ids is None: # AFAIK, this can never", "from synapse.util.caches.dictionary_cache import DictionaryCache logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100", "group %s\", sg) curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) curr_state = curr_state[sg]", "TODO: this hasn't been tuned yet 50000, ) self._state_group_members_cache =", ") -> defer.Deferred: \"\"\"Deletes no longer referenced state groups and", "for this group, but # limit members to this subset", "in state_groups_to_delete), ) @defer.inlineCallbacks def get_previous_state_groups(self, state_groups): \"\"\"Fetch the previous", "to persist state with unpersisted prev_group: %r\" % (prev_group,) )", "use the itrable flag when caching \"\"\" __slots__ = []", "missed some. missing_types = True else: # There aren't any", "caches. This should be as accurate as possible, # but", "k[0] == EventTypes.Member: state_dict_members[k] = v else: state_dict_non_members[k] = v", "de-delta-ing remaining state group %s\", sg) curr_state = self._get_state_groups_from_groups_txn(txn, [sg])", "we have everything or want everything, either way # `is_all`", "table=\"state_groups\", values={\"id\": state_group, \"room_id\": room_id, \"event_id\": event_id}, ) # We", "itrable flag when caching \"\"\" __slots__ = [] def __len__(self):", "StateFilter ) -> Tuple[Dict[int, StateMap[str]], Set[int]]: \"\"\"Gets the state at", "queries # on the store - for instance: \"give me", "wild cards, so `concrete_types()` returns the # complete list of", "the group, which negates the efficiency being sought. # #", "Returns: Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.", "from the cache, if False we need to query the", "MAX_STATE_DELTA_HOPS = 100 class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\")) ): \"\"\"Return", "v self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members, key=group,", "a list of state groups, optionally filtering by type/state_key Args:", "_purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete): logger.info( \"[purge] found %i state groups", "__len__(self): return len(self.delta_ids) if self.delta_ids else 0 class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):", "== EventTypes.Member } txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids), ) current_non_member_state_ids", "# on the state_group* tables. # # The point of", "and the new. Returns: (prev_group, delta_ids), where both may be", "iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\", ) return {row[\"state_group\"]: row[\"prev_state_group\"] for", "[groups[i : i + 100] for i in range(0, len(groups),", ") @defer.inlineCallbacks def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter ):", "\"\"\" is_all, known_absent, state_dict_ids = cache.get(group) if is_all or state_filter.is_full():", "(int): Sequence number of member cache since last lookup in", "removing %s from state_group_edges\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete,", "but that's fine. member_types = member_filter.concrete_types() if non_member_filter.is_full(): # We", "cache to be smaller than the members cache as the", "} txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids), ) return state_group return", "txn, table=\"state_groups_state\", keyvalues={\"state_group\": sg} ) self.db.simple_delete_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": sg}", "a cache-friendly manner by querying both caches # separately and", "group ids to state dicts which we will pass through", "_get_state_group_delta_txn ) @defer.inlineCallbacks def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter", "\"room_id\": room_id, \"event_id\": event_id}, ) # We persist as a", "known_absent: missing_types = True break return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks", "sg, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\": state_id, }", "if we successfully retrieved all requests state from the cache,", "results = {} incomplete_groups = set() for group in set(groups):", "the state. state_filter: The state filter used to fetch state", "missing_types = True break return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks def", "txn, table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",), ) remaining_state_groups = {", "Now fetch any missing groups from the database incomplete_groups =", "state_id in delta_ids.items() ], ) else: self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[", "# Now lets update the caches self._insert_into_cache( group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m,", "the state groups belong to (must all be in the", "on the store - for instance: \"give me all the", "want everything, either way # `is_all` tells us whether we've", "EventTypes.Member: state_dict_members[k] = v else: state_dict_non_members[k] = v self._state_group_members_cache.update( cache_seq_num_members,", "to get the state. cache: the cache of group ids", "allow_none=True, ) if not prev_group: return _GetStateGroupDelta(None, None) delta_ids =", "relevant cache. Args: group_to_state_dict (dict): The new entries pulled from", "is_in_db: raise Exception( \"Trying to persist state with unpersisted prev_group:", "else: # There aren't any wild cards, so `concrete_types()` returns", "from the database incomplete_groups = incomplete_groups_m | incomplete_groups_nm if not", "groups to delete. \"\"\" return self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete,", "StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache logger", "Prefill the state group caches with this group. # It's", "of event types we're wanting. for key in state_filter.concrete_types(): if", "it can cache a subset # of the state events", "any of these fields, apart from this subset\". # This", "for row in rows if row[\"state_group\"] not in state_groups_to_delete }", "curr_state.items() ], ) logger.info(\"[purge] removing redundant state groups\") txn.executemany( \"DELETE", "a subset # of the state events for a given", "in state_filter.concrete_types(): if key not in state_dict_ids and key not", "the relevant cache. Args: group_to_state_dict (dict): The new entries pulled", "You may obtain a copy of the License at #", "the filter a bit db_state_filter = state_filter.return_expanded() group_to_state_dict = yield", "state dicts which we will pass through - either the", "state_filter=member_filter ) state = dict(non_member_state) for group in groups: state[group].update(member_state[group])", "when lazy loading members, which requires this behaviour, # as", "degrades. if prev_group: is_in_db = self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\", keyvalues={\"id\": prev_group},", "state group. Args: event_id (str): The event ID for which", "for which we want to get the state. state_filter: The", "sg) curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) curr_state = curr_state[sg] self.db.simple_delete_txn( txn,", "txn, table=\"state_group_edges\", keyvalues={\"state_group\": sg} ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ {", "# AFAIK, this can never happen raise Exception(\"current_state_ids cannot be", "to cache the # event IDs for the state types", "that are wildcards, so we have to assume that we", "# have missed some. missing_types = True else: # There", "self.delta_ids else 0 class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A data store for", "# There aren't any wild cards, so `concrete_types()` returns the", "{} chunks = [groups[i : i + 100] for i", "which we will pass through - either the normal state", "state_dict_non_members[k] = v self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update(", "retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\", ) return {row[\"state_group\"]: row[\"prev_state_group\"] for row in", "event types we're wanting. for key in state_filter.concrete_types(): if key", "as accurate as possible, # but can be an underestimate", "a delta between the old and the new. Returns: (prev_group,", "all # state events for the group, which negates the", "Deferred[dict[int, int]]: mapping from state group to previous state group.", "table=\"state_groups\", keyvalues={\"id\": prev_group}, retcol=\"id\", allow_none=True, ) if not is_in_db: raise", "to state dict state_filter (StateFilter): The state filter used to", "update the caches self._insert_into_cache( group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, ) #", "members cache as the # vast majority of state in", "that reference them. Args: room_id: The room the state groups", "we instead split the # state_group_cache into two halves -", "members state cache. state_filter: The state filter used to fetch", "from database. Map from state group to state dict state_filter", "( non_member_state, incomplete_groups_nm, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "# event IDs for the state types in a given", "logger.info( \"[purge] de-delta-ing %i remaining state groups\", len(remaining_state_groups), ) #", "table=\"state_group_edges\", values={\"state_group\": state_group, \"prev_state_group\": prev_group}, ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[", "vast majority of state in Matrix (today) is member events.", "state group ids either missing from the cache or incomplete.", "all state groups to delete. \"\"\" return self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups,", "database: Database, db_conn, hs): super(StateGroupDataStore, self).__init__(database, db_conn, hs) # Originally", "if we can, while also ensuring the chain # of", "cache any of these fields, apart from this subset\". #", "state_group* tables. # # The point of using a DictionaryCache", "\"\"\" rows = yield self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\",", "store for fetching/storing state groups. \"\"\" def __init__(self, database: Database,", "A previous state group for the room, optional. delta_ids (dict|None):", "= 100 class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\")) ): \"\"\"Return type", "The state group to lookup state_filter (StateFilter): The state filter", ": i + 100] for i in range(0, len(groups), 100)]", "state groups that reference them. Args: room_id: The room the", "we look them up in the member and non-member caches", "self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids), ) return state_group return self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def", "the state group cache to use group(int): The state group", "belong to (must all be in the same room). state_groups_to_delete", "import Dict, Iterable, List, Set, Tuple from twisted.internet import defer", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "edges logger.info(\"[purge] removing %s from state_group_edges\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_group_edges\",", "= self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\", allow_none=True, ) if", "): \"\"\"Inserts results from querying the database into the relevant", "License. # You may obtain a copy of the License", "groups that reference to-be-deleted state # groups to non delta", "# Now fetch any missing groups from the database incomplete_groups", "in known_absent: missing_types = True break return state_filter.filter_state(state_dict_ids), not missing_types", "FROM state_groups WHERE id = ?\", ((sg,) for sg in", "fetch any missing groups from the database incomplete_groups = incomplete_groups_m", "state_filter.filter_state(state_dict_ids), is_all # tracks whether any of our requested types", "state_group_cache into two halves - one for tracking non-member events,", "self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() ): \"\"\"Gets the", "member events non_member_types = None else: non_member_types = non_member_filter.concrete_types() for", "= state_filter.return_expanded() group_to_state_dict = yield self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter ) #", "member_events. This means that lazy loading # queries can be", "event IDs for the state types in a given state", "self.db.simple_select_list_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": state_group}, retcols=(\"type\", \"state_key\", \"event_id\"), ) return", "them. Args: room_id: The room the state groups belong to", "state_dict_members[k] = v else: state_dict_non_members[k] = v self._state_group_members_cache.update( cache_seq_num_members, key=group,", "are wildcards, so we have to assume that we may", "state at `prev_group` and `current_state_ids`, if `prev_group` was given. Same", "these fields, apart from this subset\". # This is problematic", "can be made in a cache-friendly manner by querying both", "of a list of state groups, optionally filtering by type/state_key,", "fine. member_types = member_filter.concrete_types() if non_member_filter.is_full(): # We fetched all", "all requests state from the cache, if False we need", "queries can be made in a cache-friendly manner by querying", "state dict state_filter (StateFilter): The state filter used to fetch", "db_conn, hs): super(StateGroupDataStore, self).__init__(database, db_conn, hs) # Originally the state", "?\", ((sg,) for sg in state_groups_to_delete), ) @defer.inlineCallbacks def get_previous_state_groups(self,", "can cache a subset # of the state events for", "state groups and de-deltas any state groups that reference them.", "removing %s from state_groups\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups\", column=\"id\", iterable=state_groups_to_delete,", "groups, self._state_group_cache, state_filter=non_member_filter ) ( member_state, incomplete_groups_m, ) = yield", "Iterable[int], state_filter: StateFilter = StateFilter.all() ): \"\"\"Gets the state at", "results def _get_state_for_group_using_cache(self, cache, group, state_filter): \"\"\"Checks if group is", "a given set of groups from the database, filtering on", "group and a delta between the old and the new.", "this subset\". # This is problematic when lazy loading members,", "that lazy loading # queries can be made in a", "(dict): The new entries pulled from database. Map from state", ") @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): \"\"\"Given a state group", "(s, ev) in current_state_ids.items() if s[0] == EventTypes.Member } txn.call_after(", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "the non-members # cache for all state (which DictionaryCache will", "group_state_dict in group_to_state_dict.items(): # We just replace any existing entries,", "type of get_state_group_delta that implements __len__, which lets us use", "state_dict_members = {} state_dict_non_members = {} for k, v in", "\"Trying to persist state with unpersisted prev_group: %r\" % (prev_group,)", "# And finally update the result dict, by filtering out", "delta_ids), where both may be None. \"\"\" def _get_state_group_delta_txn(txn): prev_group", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "and then just merge the results together. # # We", "if is_all or state_filter.is_full(): # Either we have everything or", "results, incomplete_groups def _insert_into_cache( self, group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members, ):", "int]]: mapping from state group to previous state group. \"\"\"", "`got_all` is a bool indicating if we successfully retrieved all", "incomplete. \"\"\" results = {} incomplete_groups = set() for group", "{ \"state_group\": sg, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\":", "self, group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members, ): \"\"\"Inserts results from querying", "required by applicable law or agreed to in writing, software", "fetched all non member events non_member_types = None else: non_member_types", "delta if we can, while also ensuring the chain #", "may be None. \"\"\" def _get_state_group_delta_txn(txn): prev_group = self.db.simple_select_one_onecol_txn( txn,", "res = yield self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "given state groups. Args: state_groups (Iterable[int]) Returns: Deferred[dict[int, int]]: mapping", "\"type\": key[0], \"state_key\": key[1], \"event_id\": state_id, } for key, state_id", "Originally the state store used a single DictionaryCache to cache", "db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, ) # And finally update the result", "indicating if we successfully retrieved all requests state from the", "\"\"\"Store a new set of state, returning a newly assigned", "state # groups to non delta versions. for sg in", "state filter used to fetch state from the database. cache_seq_num_members", "len(groups), 100)] for chunk in chunks: res = yield self.db.runInteraction(", "given. Same format as `current_state_ids`. current_state_ids (dict): The state to", "agreed to in writing, software # distributed under the License", "incomplete_groups.add(group) return results, incomplete_groups def _insert_into_cache( self, group_to_state_dict, state_filter, cache_seq_num_members,", "missing_types @defer.inlineCallbacks def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter =", "negates the efficiency being sought. # # Rather than overcomplicating", "distributed under the License is distributed on an \"AS IS\"", "state group ID \"\"\" def _store_state_group_txn(txn): if current_state_ids is None:", "logger.info(\"[purge] removing %s from state_groups_state\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups_state\", column=\"state_group\",", ") if not is_in_db: raise Exception( \"Trying to persist state", "txn, table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and the", "Matrix (today) is member events. self._state_group_cache = DictionaryCache( \"*stateGroupCache*\", #", "for a given set of groups from the database, filtering", "set of groups from the database, filtering on types of", "of the state events for a given state group (i.e.", "cache cache_seq_num_non_members (int): Sequence number of member cache since last", "tooo long, as otherwise read performance degrades. if prev_group: is_in_db", "s: ev for (s, ev) in current_state_ids.items() if s[0] ==", "gotten everything. return state_filter.filter_state(state_dict_ids), is_all # tracks whether any of", "prev_group}, retcol=\"id\", allow_none=True, ) if not is_in_db: raise Exception( \"Trying", "state_groups_to_delete (Collection[int]): Set of all state groups to delete. \"\"\"", "this prefill could # race with another update) current_member_state_ids =", "state. \"\"\" is_all, known_absent, state_dict_ids = cache.get(group) if is_all or", "since last lookup in cache cache_seq_num_non_members (int): Sequence number of", "will have loaded # everything we need from the database", "fetched all the state keys for the types in #", "as a delta if we can, while also ensuring the", "is member events. self._state_group_cache = DictionaryCache( \"*stateGroupCache*\", # TODO: this", "} logger.info( \"[purge] de-delta-ing %i remaining state groups\", len(remaining_state_groups), )", "self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) return results def _get_state_for_group_using_cache(self, cache,", "= dict(non_member_state) for group in groups: state[group].update(member_state[group]) # Now fetch", "if not prev_group: return _GetStateGroupDelta(None, None) delta_ids = self.db.simple_select_list_txn( txn,", "% (prev_group,) ) potential_hops = self._count_state_group_hops_txn(txn, prev_group) if prev_group and", "cannot be None\") state_group = self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn, table=\"state_groups\", values={\"id\":", "then just merge the results together. # # We size", "map. \"\"\" results = {} chunks = [groups[i : i", "Tuple of dict of state_group_id to state map of entries", "for sg in remaining_state_groups: logger.info(\"[purge] de-delta-ing remaining state group %s\",", "of state group IDs to query state_filter: The state filter", "since last lookup in cache \"\"\" # We need to", "txn, table=\"state_group_edges\", keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\", allow_none=True, ) if not prev_group:", "Exception(\"current_state_ids cannot be None\") state_group = self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn, table=\"state_groups\",", "the types in # the filter that are wildcards, so", "for row in rows} def purge_room_state(self, room_id, state_groups_to_delete): \"\"\"Deletes all", "of state groups, optionally filtering by type/state_key, querying from a", "return results, incomplete_groups def _insert_into_cache( self, group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members,", "group try to return a previous group and a delta", "from collections import namedtuple from typing import Dict, Iterable, List,", "used a single DictionaryCache to cache the # event IDs", "else: state_dict_non_members[k] = v self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types, )", "not in state_groups_to_delete } logger.info( \"[purge] de-delta-ing %i remaining state", "\"state_key\": key[1], \"event_id\": state_id, } for key, state_id in delta_ids.items()", "twisted.internet import defer from synapse.api.constants import EventTypes from synapse.storage._base import", "be as accurate as possible, # but can be an", "synapse.api.constants import EventTypes from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.state.bg_updates import", "Args: room_id: The room the state groups belong to (must", "point of using a DictionaryCache is that it can cache", "our requested types are missing from the cache missing_types =", "room from state tables Args: room_id (str): state_groups_to_delete (list[int]): State", "txn, room_id, state_groups_to_delete): # first we have to delete the", "reference to-be-deleted state # groups to non delta versions. for", "the previous groups of the given state groups. Args: state_groups", "caches self._insert_into_cache( group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, ) # And finally", "if s[0] == EventTypes.Member } txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids),", "yet 50000, ) self._state_group_members_cache = DictionaryCache( \"*stateGroupMembersCache*\", 500000, ) @cached(max_entries=10000,", "state groups to delete\", len(state_groups_to_delete) ) rows = self.db.simple_select_many_txn( txn,", "self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids), ) current_non_member_state_ids = { s: ev for", "OR CONDITIONS OF ANY KIND, either express or implied. #", "the License is distributed on an \"AS IS\" BASIS, #", "Dict of state group to state map. \"\"\" results =", "SQLBaseStore): \"\"\"A data store for fetching/storing state groups. \"\"\" def", "= incomplete_groups_m | incomplete_groups_nm if not incomplete_groups: return state cache_sequence_nm", "is a bool indicating if we successfully retrieved all requests", "DictionaryCache, state_filter: StateFilter ) -> Tuple[Dict[int, StateMap[str]], Set[int]]: \"\"\"Gets the", "room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\": state_id, } for key,", "know if we fetched all the state keys for the", "versions. for sg in remaining_state_groups: logger.info(\"[purge] de-delta-ing remaining state group", "the given state groups. Args: state_groups (Iterable[int]) Returns: Deferred[dict[int, int]]:", "above, you # would query the members cache for a", "to fetch state from the database. cache_seq_num_members (int): Sequence number", "a list of state groups, optionally filtering by type/state_key, querying", "The room the state groups belong to (must all be", "return self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn, room_id, state_groups_to_delete, ) def _purge_room_state_txn(self, txn,", "missing state. \"\"\" is_all, known_absent, state_dict_ids = cache.get(group) if is_all", "group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members, ): \"\"\"Inserts results from querying the", "first we have to delete the state groups states logger.info(\"[purge]", "of state groups for which we want to get the", "we will pass through - either the normal state cache", "and then merging the result. So for the example above,", "state_groups_to_delete, ) def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete): # first we", "the results together. # # We size the non-members cache", "group caches with this group. # It's fine to use", "law or agreed to in writing, software # distributed under", "the cache has no choice but to speculatively load all", "# of the state events for a given state group", "The state to store. Map of (type, state_key) to event_id.", "state_key) to event_id. Returns: Deferred[int]: The state group ID \"\"\"", "not got_all: incomplete_groups.add(group) return results, incomplete_groups def _insert_into_cache( self, group_to_state_dict,", "filtering out any extra # stuff we pulled out of", "# complete list of event types we're wanting. for key", "tables Args: room_id (str): state_groups_to_delete (list[int]): State groups to delete", "cache for a specific subset of state keys # (which", "implements __len__, which lets us use the itrable flag when", "# stuff we pulled out of the database. for group,", "by filtering out any extra # stuff we pulled out", "would query the members cache for a specific subset of", "state def _get_state_for_groups_using_cache( self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter", "the room, optional. delta_ids (dict|None): The delta between state at", "= self._get_state_groups_from_groups_txn(txn, [sg]) curr_state = curr_state[sg] self.db.simple_delete_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\":", "Iterable, List, Set, Tuple from twisted.internet import defer from synapse.api.constants", "caches with this group. # It's fine to use the", "event_id}, ) # We persist as a delta if we", "from the database. Returns: Deferred[Dict[int, StateMap[str]]]: Dict of state group", "a subset when there are wild # cards in the", "the keys for a # given dict which is an", "may obtain a copy of the License at # #", "self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\", allow_none=True, ) if not", "of state_group_id to state map of entries in the cache,", "Returns: Deferred[int]: The state group ID \"\"\" def _store_state_group_txn(txn): if", "= None else: # `concrete_types()` will only return a subset", "for key, state_id in delta_ids.items() ], ) else: self.db.simple_insert_many_txn( txn,", "use the sequence like this as the state group map", "tables. # # The point of using a DictionaryCache is", "state_group_id to state map of entries in the cache, and", "the sequence like this as the state group map #", "100] for i in range(0, len(groups), 100)] for chunk in", "optionally filtering by type/state_key, querying from a specific cache. Args:", "may not use this file except in compliance with the", "group_to_state_dict.items(): state_dict_members = {} state_dict_non_members = {} for k, v", "a delta if we can, while also ensuring the chain", "\"state_key\": key[1], \"event_id\": state_id, } for key, state_id in curr_state.items()", "state_filter ) results[group] = state_dict_ids if not got_all: incomplete_groups.add(group) return", "the state. cache: the cache of group ids to state", "str, state_groups_to_delete ) -> defer.Deferred: \"\"\"Deletes no longer referenced state", "member events member_types = None else: # `concrete_types()` will only", "this file except in compliance with the License. # You", "expanding the filter a bit db_state_filter = state_filter.return_expanded() group_to_state_dict =", "Args: groups: list of state group IDs to query state_filter:", "isn't tooo long, as otherwise read performance degrades. if prev_group:", "can be an underestimate (e.g. when we have wild cards)", "given state group (i.e. a subset of the keys for", "to speculatively load all # state events for the group,", "# cache for all state (which DictionaryCache will similarly handle", "state_filter (StateFilter): The state filter used to fetch state from", "map. \"\"\" member_filter, non_member_filter = state_filter.get_member_split() # Now we look", "\"*stateGroupCache*\", # TODO: this hasn't been tuned yet 50000, )", "than the members cache as the # vast majority of", "and the other for tracking member_events. This means that lazy", "database. for group, group_state_dict in group_to_state_dict.items(): # We just replace", "other for tracking member_events. This means that lazy loading #", "as the # vast majority of state in Matrix (today)", "# # Licensed under the Apache License, Version 2.0 (the", "current_state_ids.items() if s[0] == EventTypes.Member } txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group,", "either the normal state cache or the specific members state", "delete\", len(state_groups_to_delete) ) rows = self.db.simple_select_many_txn( txn, table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks def _get_state_for_groups( self, groups: Iterable[int], state_filter:", "by type/state_key Args: groups: list of state groups for which", "groups\") txn.executemany( \"DELETE FROM state_groups_state WHERE state_group = ?\", ((sg,)", "group_to_state_dict = yield self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter ) # Now lets", "%r\" % (prev_group,) ) potential_hops = self._count_state_group_hops_txn(txn, prev_group) if prev_group", "requested types are missing from the cache missing_types = False", "for instance: \"give me all the state for this group,", "missing from the cache or incomplete. \"\"\" results = {}", ") remaining_state_groups = { row[\"state_group\"] for row in rows if", "= state_filter.get_member_split() # Now we look them up in the", "not is_in_db: raise Exception( \"Trying to persist state with unpersisted", "event_id (str): The event ID for which the state was", "self.db.simple_delete_many_txn( txn, table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and", "member and non-member caches ( non_member_state, incomplete_groups_nm, ) = yield", "return _GetStateGroupDelta( prev_group, {(row[\"type\"], row[\"state_key\"]): row[\"event_id\"] for row in delta_ids},", "(Iterable[int]) Returns: Deferred[dict[int, int]]: mapping from state group to previous", "in group_state_dict.items(): if k[0] == EventTypes.Member: state_dict_members[k] = v else:", "return state def _get_state_for_groups_using_cache( self, groups: Iterable[int], cache: DictionaryCache, state_filter:", "len(self.delta_ids) if self.delta_ids else 0 class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A data", "and the non-members # cache for all state (which DictionaryCache", "to use group(int): The state group to lookup state_filter (StateFilter):", "(dict|None): The delta between state at `prev_group` and `current_state_ids`, if", "tracks whether any of our requested types are missing from", "], ) # Prefill the state group caches with this", "self.db.simple_delete_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": sg} ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[", "the database into the relevant cache. Args: group_to_state_dict (dict): The", "values=[ { \"state_group\": sg, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1],", "calculated room_id (str) prev_group (int|None): A previous state group for", "or implied. # See the License for the specific language", "bool indicating if we successfully retrieved all requests state from", ") results[group] = state_dict_ids if not got_all: incomplete_groups.add(group) return results,", "given dict which is an entry in the cache for", "state types in a given state group to avoid hammering", "DB for the missing state. \"\"\" is_all, known_absent, state_dict_ids =", "def _store_state_group_txn(txn): if current_state_ids is None: # AFAIK, this can", "synapse.types import StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import", "cache(DictionaryCache): the state group cache to use group(int): The state", "row in delta_ids}, ) return self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn ) @defer.inlineCallbacks", "to get the state. state_filter: The state filter used to", "room_id, state_groups_to_delete, ) def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete): logger.info( \"[purge]", "for sg in state_groups_to_delete), ) txn.executemany( \"DELETE FROM state_groups WHERE", "we successfully retrieved all requests state from the cache, if", "immutable then this prefill could # race with another update)", "room). state_groups_to_delete (Collection[int]): Set of all state groups to delete.", "Tuple from twisted.internet import defer from synapse.api.constants import EventTypes from", "cards) member_filter, non_member_filter = state_filter.get_member_split() if member_filter.is_full(): # We fetched", "`_get_state_for_groups` Args: cache(DictionaryCache): the state group cache to use group(int):", "the # member vs non-member caches. This should be as", "_get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter ): \"\"\"Returns the state", "# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the", "number of member cache since last lookup in cache \"\"\"", "to delete\", len(state_groups_to_delete) ) rows = self.db.simple_select_many_txn( txn, table=\"state_group_edges\", column=\"prev_state_group\",", "state groups\", len(remaining_state_groups), ) # Now we turn the state", "state from the database. cache_seq_num_members (int): Sequence number of member", ") def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete): logger.info( \"[purge] found %i", "incomplete_groups def _insert_into_cache( self, group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members, ): \"\"\"Inserts", "from the database anyway. state[group] = state_filter.filter_state(group_state_dict) return state def", "which types we've fetched from the DB for the #", "chunk in chunks: res = yield self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk,", "Exception( \"Trying to persist state with unpersisted prev_group: %r\" %", "tells us whether we've gotten everything. return state_filter.filter_state(state_dict_ids), is_all #", "500000, ) @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): \"\"\"Given a state", "query the members cache for a specific subset of state", "the state keys for the types in # the filter", "specific subset of state keys # (which DictionaryCache will handle", "the store - for instance: \"give me all the state", "100 class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\")) ): \"\"\"Return type of", "for key, state_id in curr_state.items() ], ) logger.info(\"[purge] removing redundant", "IDs for the state types in a given state group", "to avoid hammering # on the state_group* tables. # #", "chunks: res = yield self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk, state_filter, )", "state group %s\", sg) curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) curr_state =", "just merge the results together. # # We size the", "We fetched all non member events non_member_types = None else:", "self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\", keyvalues={\"id\": prev_group}, retcol=\"id\", allow_none=True, ) if not", "split the # state_group_cache into two halves - one for", "state keys # (which DictionaryCache will handle efficiently and fine)", "# first we have to delete the state groups states", "that's fine. member_types = member_filter.concrete_types() if non_member_filter.is_full(): # We fetched", "update) current_member_state_ids = { s: ev for (s, ev) in", ") def store_state_group( self, event_id, room_id, prev_group, delta_ids, current_state_ids ):", "self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter ) # Now lets update the caches", "\"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, ) def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):", "%s from state_groups_state\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={},", "API isn't # rich enough to say \"please cache any", "room the state groups belong to (must all be in", "prev_group and potential_hops < MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn, table=\"state_group_edges\", values={\"state_group\": state_group,", "= {} state_dict_non_members = {} for k, v in group_state_dict.items():", "get_state_group_delta that implements __len__, which lets us use the itrable", "state events for a given state group (i.e. a subset", "We size the non-members cache to be smaller than the", "if current_state_ids is None: # AFAIK, this can never happen", "cache_sequence_m = self._state_group_members_cache.sequence # Help the cache hit ratio by", "de-delta-ing %i remaining state groups\", len(remaining_state_groups), ) # Now we", "import namedtuple from typing import Dict, Iterable, List, Set, Tuple", "filter, but that's fine. member_types = member_filter.concrete_types() if non_member_filter.is_full(): #", "prev_group, delta_ids, current_state_ids ): \"\"\"Store a new set of state,", "and `current_state_ids`, if `prev_group` was given. Same format as `current_state_ids`.", "is_all, known_absent, state_dict_ids = cache.get(group) if is_all or state_filter.is_full(): #", "self.db.simple_delete_many_txn( txn, table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and", ") # We persist as a delta if we can,", "be made in a cache-friendly manner by querying both caches", "This should be as accurate as possible, # but can", "group. Args: event_id (str): The event ID for which the", "__slots__ = [] def __len__(self): return len(self.delta_ids) if self.delta_ids else", "from state_group_edges\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, )", "break return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks def _get_state_for_groups( self, groups:", "for all state (which DictionaryCache will similarly handle fine) #", "state group ID). # # However, this poses problems when", "database. Returns: Deferred[Dict[int, StateMap[str]]]: Dict of state group to state", "synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util.caches.descriptors import", "non_member_types = None else: non_member_types = non_member_filter.concrete_types() for group, group_state_dict", "users\", as DictionaryCache's API isn't # rich enough to say", "of groups from the database, filtering on types of state", "def __init__(self, database: Database, db_conn, hs): super(StateGroupDataStore, self).__init__(database, db_conn, hs)", "ratio by expanding the filter a bit db_state_filter = state_filter.return_expanded()", "self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete ) ->", "assume that we may # have missed some. missing_types =", "Dict of state group to state map. \"\"\" member_filter, non_member_filter", "the state_group* tables. # # The point of using a", "... and the state groups logger.info(\"[purge] removing %s from state_groups\",", "group(int): The state group to lookup state_filter (StateFilter): The state", "def purge_room_state(self, room_id, state_groups_to_delete): \"\"\"Deletes all record of a room", "the state store used a single DictionaryCache to cache the", "as otherwise read performance degrades. if prev_group: is_in_db = self.db.simple_select_one_onecol_txn(", "current_state_ids ): \"\"\"Store a new set of state, returning a", "groups for which we want to get the state. cache:", "txn.executemany( \"DELETE FROM state_groups WHERE id = ?\", ((sg,) for", "# # Rather than overcomplicating DictionaryCache's API, we instead split", "normal state cache or the specific members state cache. state_filter:", "a bit db_state_filter = state_filter.return_expanded() group_to_state_dict = yield self._get_state_groups_from_groups( list(incomplete_groups),", "immutable. (If the map wasn't immutable then this prefill could", "state_group}, retcols=(\"type\", \"state_key\", \"event_id\"), ) return _GetStateGroupDelta( prev_group, {(row[\"type\"], row[\"state_key\"]):", "cache or the specific members state cache. state_filter: The state", "self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids), ) return state_group return self.db.runInteraction(\"store_state_group\", _store_state_group_txn)", "(s, ev) in current_state_ids.items() if s[0] != EventTypes.Member } txn.call_after(", "in writing, software # distributed under the License is distributed", "and de-deltas any state groups that reference them. Args: room_id:", "state_filter.has_wildcards(): # We don't know if we fetched all the", "fine) and the non-members # cache for all state (which", "that it can cache a subset # of the state", ") # ... and the state groups logger.info(\"[purge] removing %s", "def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete): # first we have to", ") # Now lets update the caches self._insert_into_cache( group_to_state_dict, db_state_filter,", "non-member caches. This should be as accurate as possible, #", "to delete. \"\"\" return self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, )", "subset of state keys # (which DictionaryCache will handle efficiently", "fetch state from the database. cache_seq_num_members (int): Sequence number of", "value=dict(current_non_member_state_ids), ) return state_group return self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def purge_unreferenced_state_groups( self,", "should be as accurate as possible, # but can be", ") = yield self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter ) state =", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "current_non_member_state_ids = { s: ev for (s, ev) in current_state_ids.items()", "complete list of event types we're wanting. for key in", "License, Version 2.0 (the \"License\"); # you may not use", "Database, db_conn, hs): super(StateGroupDataStore, self).__init__(database, db_conn, hs) # Originally the", "EventTypes.Member } txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, value=dict(current_member_state_ids), ) current_non_member_state_ids =", "from state tables Args: room_id (str): state_groups_to_delete (list[int]): State groups", ") self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types, ) def store_state_group( self,", "ids to state dicts which we will pass through -", "current_member_state_ids = { s: ev for (s, ev) in current_state_ids.items()", "that implements __len__, which lets us use the itrable flag", "is that it can cache a subset # of the", "The new entries pulled from database. Map from state group", "(If the map wasn't immutable then this prefill could #", "removing %s from state_groups_state\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete,", "{ \"state_group\": state_group, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\":", "a newly assigned state group. Args: event_id (str): The event", "if member_filter.is_full(): # We fetched all member events member_types =", "AFAIK, this can never happen raise Exception(\"current_state_ids cannot be None\")", "s: ev for (s, ev) in current_state_ids.items() if s[0] !=", "db_state_filter = state_filter.return_expanded() group_to_state_dict = yield self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter )", "for fetching/storing state groups. \"\"\" def __init__(self, database: Database, db_conn,", "wildcards, so we have to assume that we may #", "the License for the specific language governing permissions and #", "fetch state from the database. Returns: Tuple of dict of", "key, state_id in current_state_ids.items() ], ) # Prefill the state", "_get_state_for_groups_using_cache( self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter ) ->", "filtering by type/state_key, querying from a specific cache. Args: groups:", "value=dict(current_member_state_ids), ) current_non_member_state_ids = { s: ev for (s, ev)", "keyvalues={\"state_group\": state_group}, retcols=(\"type\", \"state_key\", \"event_id\"), ) return _GetStateGroupDelta( prev_group, {(row[\"type\"],", "by expanding the filter a bit db_state_filter = state_filter.return_expanded() group_to_state_dict", "non_member_filter = state_filter.get_member_split() if member_filter.is_full(): # We fetched all member", "record of a room from state tables Args: room_id (str):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "_get_state_for_group_using_cache(self, cache, group, state_filter): \"\"\"Checks if group is in cache.", "\"\"\"Deletes all record of a room from state tables Args:", "for sg in state_groups_to_delete), ) @defer.inlineCallbacks def get_previous_state_groups(self, state_groups): \"\"\"Fetch", "cache has no choice but to speculatively load all #", "which we want to get the state. cache: the cache", "self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group, \"room_id\": room_id, \"type\":", "of state in Matrix (today) is member events. self._state_group_cache =", "`concrete_types()` will only return a subset when there are wild", "def purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete ) -> defer.Deferred: \"\"\"Deletes", "keyvalues={\"state_group\": sg} ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": sg,", "\"state_group\": sg, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\": state_id,", "else 0 class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A data store for fetching/storing", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "members cache for a specific subset of state keys #", "utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed", "new. Returns: (prev_group, delta_ids), where both may be None. \"\"\"", "database into the relevant cache. Args: group_to_state_dict (dict): The new", "state_dict_non_members = {} for k, v in group_state_dict.items(): if k[0]", "\"\"\" __slots__ = [] def __len__(self): return len(self.delta_ids) if self.delta_ids", "state_groups_to_delete, ) def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete): logger.info( \"[purge] found", "self._state_group_members_cache.sequence # Help the cache hit ratio by expanding the", "table=\"state_groups_state\", keyvalues={\"state_group\": state_group}, retcols=(\"type\", \"state_key\", \"event_id\"), ) return _GetStateGroupDelta( prev_group,", "which requires this behaviour, # as without it the cache", "previous groups of the given state groups. Args: state_groups (Iterable[int])", "keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\", allow_none=True, ) if not prev_group: return _GetStateGroupDelta(None,", "group to previous state group. \"\"\" rows = yield self.db.simple_select_many_batch(", "table=\"state_groups_state\", keyvalues={\"state_group\": sg} ) self.db.simple_delete_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": sg} )", "may # have missed some. missing_types = True else: #", "{} incomplete_groups = set() for group in set(groups): state_dict_ids, got_all", "return len(self.delta_ids) if self.delta_ids else 0 class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A", "groups. \"\"\" def __init__(self, database: Database, db_conn, hs): super(StateGroupDataStore, self).__init__(database,", ") # Prefill the state group caches with this group.", "while also ensuring the chain # of deltas isn't tooo", "(int|None): A previous state group for the room, optional. delta_ids", "the database. Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool", "example above, you # would query the members cache for", "# distributed under the License is distributed on an \"AS", "prev_group (int|None): A previous state group for the room, optional.", "been tuned yet 50000, ) self._state_group_members_cache = DictionaryCache( \"*stateGroupMembersCache*\", 500000,", "# Unless required by applicable law or agreed to in", "\"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) return results def _get_state_for_group_using_cache(self,", "# queries can be made in a cache-friendly manner by", "cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the state group cache to", "given state group ID). # # However, this poses problems", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "): \"\"\"Return type of get_state_group_delta that implements __len__, which lets", "delta between state at `prev_group` and `current_state_ids`, if `prev_group` was", "this behaviour, # as without it the cache has no", "for (s, ev) in current_state_ids.items() if s[0] == EventTypes.Member }", "both caches # separately and then merging the result. So", "state_filter, cache_seq_num_members, cache_seq_num_non_members, ): \"\"\"Inserts results from querying the database", "flag when caching \"\"\" __slots__ = [] def __len__(self): return", "as possible, # but can be an underestimate (e.g. when", ") self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group, \"room_id\": room_id,", "room_id) self.db.simple_delete_many_txn( txn, table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ...", "way # `is_all` tells us whether we've gotten everything. return", "in delta_ids}, ) return self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn ) @defer.inlineCallbacks def", "cache. Args: groups: list of state groups for which we", "group, but # limit members to this subset of users\",", "the Apache License, Version 2.0 (the \"License\"); # you may", "extra # stuff we pulled out of the database. for", "poses problems when performing complicated queries # on the store", "room_id, state_groups_to_delete): logger.info( \"[purge] found %i state groups to delete\",", "returning a newly assigned state group. Args: event_id (str): The", "Dict, Iterable, List, Set, Tuple from twisted.internet import defer from", "= state_filter.get_member_split() if member_filter.is_full(): # We fetched all member events", "groups of the given state groups. Args: state_groups (Iterable[int]) Returns:", "# on the store - for instance: \"give me all", "namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\")) ): \"\"\"Return type of get_state_group_delta that implements", "accurate as possible, # but can be an underestimate (e.g.", "cached from synapse.util.caches.dictionary_cache import DictionaryCache logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS =", "\"prev_state_group\": prev_group}, ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group,", "row[\"state_group\"] for row in rows if row[\"state_group\"] not in state_groups_to_delete", "a specific cache. Args: groups: list of state groups for", "data store for fetching/storing state groups. \"\"\" def __init__(self, database:", "sought. # # Rather than overcomplicating DictionaryCache's API, we instead", "results.update(res) return results def _get_state_for_group_using_cache(self, cache, group, state_filter): \"\"\"Checks if", "state_filter=non_member_filter ) ( member_state, incomplete_groups_m, ) = yield self._get_state_for_groups_using_cache( groups,", "the state was calculated room_id (str) prev_group (int|None): A previous", "state with unpersisted prev_group: %r\" % (prev_group,) ) potential_hops =", "a single DictionaryCache to cache the # event IDs for", "the License. import logging from collections import namedtuple from typing", "# Now we turn the state groups that reference to-be-deleted", "None else: non_member_types = non_member_filter.concrete_types() for group, group_state_dict in group_to_state_dict.items():", "+ 100] for i in range(0, len(groups), 100)] for chunk", "event ID for which the state was calculated room_id (str)", "# groups to non delta versions. for sg in remaining_state_groups:", "and the state group ids either missing from the cache", "this group, but # limit members to this subset of", ") def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete): # first we have", "from the cache missing_types = False if state_filter.has_wildcards(): # We", "look them up in the member and non-member caches (", "state groups belong to (must all be in the same", "type/state_key, querying from a specific cache. Args: groups: list of", "incomplete_groups = incomplete_groups_m | incomplete_groups_nm if not incomplete_groups: return state", "this poses problems when performing complicated queries # on the", "is_all or state_filter.is_full(): # Either we have everything or want", "fetch state from the database. Returns 2-tuple (`state_dict`, `got_all`). `got_all`", "raise Exception(\"current_state_ids cannot be None\") state_group = self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn,", "successfully retrieved all requests state from the cache, if False", "for which the state was calculated room_id (str) prev_group (int|None):", "has no choice but to speculatively load all # state", "): \"\"\"Returns the state groups for a given set of", "from querying the database into the relevant cache. Args: group_to_state_dict", "the non-members cache to be smaller than the members cache", "# Prefill the state group caches with this group. #", "state_dict_ids, got_all = self._get_state_for_group_using_cache( cache, group, state_filter ) results[group] =", "group to state dict state_filter (StateFilter): The state filter used", "all the state for this group, but # limit members", "get the state. state_filter: The state filter used to fetch", ") results.update(res) return results def _get_state_for_group_using_cache(self, cache, group, state_filter): \"\"\"Checks", "size the non-members cache to be smaller than the members", "the DB for the # member vs non-member caches. This", "_GetStateGroupDelta( prev_group, {(row[\"type\"], row[\"state_key\"]): row[\"event_id\"] for row in delta_ids}, )", "the cache, and the state group ids either missing from", "to say \"please cache any of these fields, apart from", "from state group to previous state group. \"\"\" rows =", "groups, self._state_group_members_cache, state_filter=member_filter ) state = dict(non_member_state) for group in", "a previous group and a delta between the old and", "StateFilter ): \"\"\"Returns the state groups for a given set", "under the License is distributed on an \"AS IS\" BASIS,", "state events. Args: groups: list of state group IDs to", "group edges logger.info(\"[purge] removing %s from state_group_edges\", room_id) self.db.simple_delete_many_txn( txn,", "row[\"prev_state_group\"] for row in rows} def purge_room_state(self, room_id, state_groups_to_delete): \"\"\"Deletes", "newly assigned state group. Args: event_id (str): The event ID", "state. cache: the cache of group ids to state dicts", "on types of state events. Args: groups: list of state", "map of entries in the cache, and the state group", "table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\", ) return {row[\"state_group\"]:", "# limit members to this subset of users\", as DictionaryCache's", "missing_types = False if state_filter.has_wildcards(): # We don't know if", "self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter ) state = dict(non_member_state) for group", "key=state_group, value=dict(current_member_state_ids), ) current_non_member_state_ids = { s: ev for (s,", "StateFilter from synapse.types import StateMap from synapse.util.caches.descriptors import cached from", "i in range(0, len(groups), 100)] for chunk in chunks: res", "# everything we need from the database anyway. state[group] =", "the cache for a given state group ID). # #", "defer.Deferred: \"\"\"Deletes no longer referenced state groups and de-deltas any", "an underestimate (e.g. when we have wild cards) member_filter, non_member_filter", "non member events non_member_types = None else: non_member_types = non_member_filter.concrete_types()", "# `is_all` tells us whether we've gotten everything. return state_filter.filter_state(state_dict_ids),", "deltas isn't tooo long, as otherwise read performance degrades. if", "DictionaryCache logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\",", "incomplete_groups: return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence #", "logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\",", "state group caches with this group. # It's fine to", "= [groups[i : i + 100] for i in range(0,", "of member cache since last lookup in cache cache_seq_num_non_members (int):", "group, state_filter ) results[group] = state_dict_ids if not got_all: incomplete_groups.add(group)", "fetched from the DB for the # member vs non-member", "txn, table=\"state_groups_state\", keyvalues={\"state_group\": state_group}, retcols=(\"type\", \"state_key\", \"event_id\"), ) return _GetStateGroupDelta(", "wanting. for key in state_filter.concrete_types(): if key not in state_dict_ids", "similarly handle fine) # and then just merge the results", "cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, ) # And finally update the result dict,", "return self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete )", "if False we need to query the DB for the", "unpersisted prev_group: %r\" % (prev_group,) ) potential_hops = self._count_state_group_hops_txn(txn, prev_group)", "room_id: str, state_groups_to_delete ) -> defer.Deferred: \"\"\"Deletes no longer referenced", "fetched all member events member_types = None else: # `concrete_types()`", "instance: \"give me all the state for this group, but", "\"purge_room_state\", self._purge_room_state_txn, room_id, state_groups_to_delete, ) def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):", "return a previous group and a delta between the old", "room_id, \"event_id\": event_id}, ) # We persist as a delta", "There aren't any wild cards, so `concrete_types()` returns the #", "key=group, value=state_dict_non_members, fetched_keys=non_member_types, ) def store_state_group( self, event_id, room_id, prev_group,", "last lookup in cache \"\"\" # We need to work", "happen raise Exception(\"current_state_ids cannot be None\") state_group = self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn(", "current_state_ids.items() ], ) # Prefill the state group caches with", "# as without it the cache has no choice but", "existing entries, as we will have loaded # everything we", "prefill could # race with another update) current_member_state_ids = {", "FROM state_groups_state WHERE state_group = ?\", ((sg,) for sg in", "delta_ids = self.db.simple_select_list_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": state_group}, retcols=(\"type\", \"state_key\", \"event_id\"),", "member_filter.is_full(): # We fetched all member events member_types = None", "cache to use group(int): The state group to lookup state_filter", "ANY KIND, either express or implied. # See the License", "if key not in state_dict_ids and key not in known_absent:", "finally update the result dict, by filtering out any extra", "state_id in curr_state.items() ], ) logger.info(\"[purge] removing redundant state groups\")", "the License. # You may obtain a copy of the", "- for instance: \"give me all the state for this", "referenced state groups and de-deltas any state groups that reference", "# See the License for the specific language governing permissions", "def get_state_group_delta(self, state_group): \"\"\"Given a state group try to return", "retcol=\"prev_state_group\", allow_none=True, ) if not prev_group: return _GetStateGroupDelta(None, None) delta_ids", "state[group].update(member_state[group]) # Now fetch any missing groups from the database", "in state_groups_to_delete } logger.info( \"[purge] de-delta-ing %i remaining state groups\",", "return state_group return self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def purge_unreferenced_state_groups( self, room_id: str,", "last lookup in cache cache_seq_num_non_members (int): Sequence number of member", "member vs non-member caches. This should be as accurate as", "remaining state group %s\", sg) curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) curr_state", "for key, state_id in current_state_ids.items() ], ) # Prefill the", "100)] for chunk in chunks: res = yield self.db.runInteraction( \"_get_state_groups_from_groups\",", "us use the itrable flag when caching \"\"\" __slots__ =", "to store. Map of (type, state_key) to event_id. Returns: Deferred[int]:", "Map of (type, state_key) to event_id. Returns: Deferred[int]: The state", "the cache or incomplete. \"\"\" results = {} incomplete_groups =", "map wasn't immutable then this prefill could # race with", "key in state_filter.concrete_types(): if key not in state_dict_ids and key", "have wild cards) member_filter, non_member_filter = state_filter.get_member_split() if member_filter.is_full(): #", "is immutable. (If the map wasn't immutable then this prefill", "Iterable[int], cache: DictionaryCache, state_filter: StateFilter ) -> Tuple[Dict[int, StateMap[str]], Set[int]]:", "# Originally the state store used a single DictionaryCache to", "to-be-deleted state # groups to non delta versions. for sg", "everything we need from the database anyway. state[group] = state_filter.filter_state(group_state_dict)", "The event ID for which the state was calculated room_id", "yield self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter ) state = dict(non_member_state) for", "delta_ids.items() ], ) else: self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\":", "\"state_key\", \"event_id\"), ) return _GetStateGroupDelta( prev_group, {(row[\"type\"], row[\"state_key\"]): row[\"event_id\"] for", "(i.e. a subset of the keys for a # given", "filter used to fetch state from the database. Returns: Tuple", "of member cache since last lookup in cache \"\"\" #", "into the relevant cache. Args: group_to_state_dict (dict): The new entries", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "aren't any wild cards, so `concrete_types()` returns the # complete", "ensuring the chain # of deltas isn't tooo long, as", "incomplete_groups = set() for group in set(groups): state_dict_ids, got_all =", "writing, software # distributed under the License is distributed on", "non_member_types = non_member_filter.concrete_types() for group, group_state_dict in group_to_state_dict.items(): state_dict_members =", "in Matrix (today) is member events. self._state_group_cache = DictionaryCache( \"*stateGroupCache*\",", "raise Exception( \"Trying to persist state with unpersisted prev_group: %r\"", "\"event_id\": state_id, } for key, state_id in curr_state.items() ], )", "We persist as a delta if we can, while also", "License. import logging from collections import namedtuple from typing import", "querying from a specific cache. Args: groups: list of state", "pass through - either the normal state cache or the", "return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence # Help", "the normal state cache or the specific members state cache.", "from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from synapse.storage.database", "a specific subset of state keys # (which DictionaryCache will", "event_id. Returns: Deferred[int]: The state group ID \"\"\" def _store_state_group_txn(txn):", "cards in the filter, but that's fine. member_types = member_filter.concrete_types()", "means that lazy loading # queries can be made in", "want to get the state. cache: the cache of group", "synapse.util.caches.dictionary_cache import DictionaryCache logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 class", "the cache hit ratio by expanding the filter a bit", "all be in the same room). state_groups_to_delete (Collection[int]): Set of", "self.db.runInteraction( \"purge_unreferenced_state_groups\", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, ) def _purge_unreferenced_state_groups(self, txn, room_id,", "entry in the cache for a given state group ID).", "when we have wild cards) member_filter, non_member_filter = state_filter.get_member_split() if", "delta_ids, current_state_ids ): \"\"\"Store a new set of state, returning", "key, state_id in curr_state.items() ], ) logger.info(\"[purge] removing redundant state", "out any extra # stuff we pulled out of the", "= {} chunks = [groups[i : i + 100] for", "also ensuring the chain # of deltas isn't tooo long,", "return _GetStateGroupDelta(None, None) delta_ids = self.db.simple_select_list_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": state_group},", "groups\", len(remaining_state_groups), ) # Now we turn the state groups", "subset\". # This is problematic when lazy loading members, which", "dict which is an entry in the cache for a", "new set of state, returning a newly assigned state group.", "IDs to query state_filter: The state filter used to fetch", "group. # It's fine to use the sequence like this", "list(incomplete_groups), state_filter=db_state_filter ) # Now lets update the caches self._insert_into_cache(", "but # limit members to this subset of users\", as", "this can never happen raise Exception(\"current_state_ids cannot be None\") state_group", "to event_id. Returns: Deferred[int]: The state group ID \"\"\" def", "self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types, ) def store_state_group( self, event_id,", "yield self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\", )", "in rows} def purge_room_state(self, room_id, state_groups_to_delete): \"\"\"Deletes all record of", "-*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd #", "groups and de-deltas any state groups that reference them. Args:", "redundant state groups\") txn.executemany( \"DELETE FROM state_groups_state WHERE state_group =", "state group try to return a previous group and a", "otherwise read performance degrades. if prev_group: is_in_db = self.db.simple_select_one_onecol_txn( txn,", "cache as the # vast majority of state in Matrix", "value=state_dict_non_members, fetched_keys=non_member_types, ) def store_state_group( self, event_id, room_id, prev_group, delta_ids,", "tuned yet 50000, ) self._state_group_members_cache = DictionaryCache( \"*stateGroupMembersCache*\", 500000, )", "\"\"\" def __init__(self, database: Database, db_conn, hs): super(StateGroupDataStore, self).__init__(database, db_conn,", "Rather than overcomplicating DictionaryCache's API, we instead split the #", "majority of state in Matrix (today) is member events. self._state_group_cache", "enough to say \"please cache any of these fields, apart", "is problematic when lazy loading members, which requires this behaviour,", "being sought. # # Rather than overcomplicating DictionaryCache's API, we", "state_dict_ids = cache.get(group) if is_all or state_filter.is_full(): # Either we", "store. Map of (type, state_key) to event_id. Returns: Deferred[int]: The", "[sg]) curr_state = curr_state[sg] self.db.simple_delete_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": sg} )", "state group to state dict state_filter (StateFilter): The state filter", "def store_state_group( self, event_id, room_id, prev_group, delta_ids, current_state_ids ): \"\"\"Store", "Set[int]]: \"\"\"Gets the state at each of a list of", "prev_group: is_in_db = self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\", keyvalues={\"id\": prev_group}, retcol=\"id\", allow_none=True,", "WHERE id = ?\", ((sg,) for sg in state_groups_to_delete), )", "in curr_state.items() ], ) logger.info(\"[purge] removing redundant state groups\") txn.executemany(", "cache: the cache of group ids to state dicts which", "not in state_dict_ids and key not in known_absent: missing_types =", "Args: cache(DictionaryCache): the state group cache to use group(int): The", "= state_dict_ids if not got_all: incomplete_groups.add(group) return results, incomplete_groups def", "synapse.storage.database import Database from synapse.storage.state import StateFilter from synapse.types import", "group_state_dict in group_to_state_dict.items(): state_dict_members = {} state_dict_non_members = {} for", "in set(groups): state_dict_ids, got_all = self._get_state_for_group_using_cache( cache, group, state_filter )", "non_member_filter = state_filter.get_member_split() # Now we look them up in", "cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types,", "True else: # There aren't any wild cards, so `concrete_types()`", "We fetched all member events member_types = None else: #", "room_id, state_groups_to_delete): \"\"\"Deletes all record of a room from state", "\"event_id\": state_id, } for key, state_id in current_state_ids.items() ], )", "the missing state. \"\"\" is_all, known_absent, state_dict_ids = cache.get(group) if", "import SQLBaseStore from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from synapse.storage.database import Database", "state group map # is immutable. (If the map wasn't", "State groups to delete \"\"\" return self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn, room_id,", "for i in range(0, len(groups), 100)] for chunk in chunks:", "entries, as we will have loaded # everything we need", "# We fetched all non member events non_member_types = None", "subset # of the state events for a given state", "lets us use the itrable flag when caching \"\"\" __slots__", "groups: List[int], state_filter: StateFilter ): \"\"\"Returns the state groups for", "rows = yield self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"),", "state_filter.get_member_split() # Now we look them up in the member", "__init__(self, database: Database, db_conn, hs): super(StateGroupDataStore, self).__init__(database, db_conn, hs) #", "# limitations under the License. import logging from collections import", "to delete \"\"\" return self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn, room_id, state_groups_to_delete, )", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "used to fetch state from the database. cache_seq_num_members (int): Sequence", "if group is in cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the", "else: non_member_types = non_member_filter.concrete_types() for group, group_state_dict in group_to_state_dict.items(): state_dict_members", "retcols=(\"type\", \"state_key\", \"event_id\"), ) return _GetStateGroupDelta( prev_group, {(row[\"type\"], row[\"state_key\"]): row[\"event_id\"]", "# We just replace any existing entries, as we will", "cache: DictionaryCache, state_filter: StateFilter ) -> Tuple[Dict[int, StateMap[str]], Set[int]]: \"\"\"Gets", "results[group] = state_dict_ids if not got_all: incomplete_groups.add(group) return results, incomplete_groups", "of state group to state map. \"\"\" results = {}", ") txn.executemany( \"DELETE FROM state_groups WHERE id = ?\", ((sg,)", "dict, by filtering out any extra # stuff we pulled", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "if non_member_filter.is_full(): # We fetched all non member events non_member_types", "groups belong to (must all be in the same room).", "50000, ) self._state_group_members_cache = DictionaryCache( \"*stateGroupMembersCache*\", 500000, ) @cached(max_entries=10000, iterable=True)", "with this group. # It's fine to use the sequence", ") @defer.inlineCallbacks def get_previous_state_groups(self, state_groups): \"\"\"Fetch the previous groups of", "choice but to speculatively load all # state events for", "previous group and a delta between the old and the", "for (s, ev) in current_state_ids.items() if s[0] != EventTypes.Member }", "# of deltas isn't tooo long, as otherwise read performance", "the itrable flag when caching \"\"\" __slots__ = [] def", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "ID). # # However, this poses problems when performing complicated", "state group to state map. \"\"\" results = {} chunks", "in the same room). state_groups_to_delete (Collection[int]): Set of all state", "Help the cache hit ratio by expanding the filter a", "(prev_group, delta_ids), where both may be None. \"\"\" def _get_state_group_delta_txn(txn):", "state_id, } for key, state_id in delta_ids.items() ], ) else:", "List[int], state_filter: StateFilter ): \"\"\"Returns the state groups for a", "or incomplete. \"\"\" results = {} incomplete_groups = set() for", "def _get_state_for_groups_using_cache( self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter )", "cache missing_types = False if state_filter.has_wildcards(): # We don't know", "Tuple[Dict[int, StateMap[str]], Set[int]]: \"\"\"Gets the state at each of a", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "to fetch state from the database. Returns: Tuple of dict", "the state types in a given state group to avoid", "store used a single DictionaryCache to cache the # event", "can, while also ensuring the chain # of deltas isn't", "-> defer.Deferred: \"\"\"Deletes no longer referenced state groups and de-deltas", "ID \"\"\" def _store_state_group_txn(txn): if current_state_ids is None: # AFAIK,", "on the state_group* tables. # # The point of using", "state_filter.is_full(): # Either we have everything or want everything, either", "the filter that are wildcards, so we have to assume", "lets update the caches self._insert_into_cache( group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, )", "just replace any existing entries, as we will have loaded", "table=\"state_group_edges\", keyvalues={\"state_group\": sg} ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\":", "map # is immutable. (If the map wasn't immutable then", "specific language governing permissions and # limitations under the License.", "for k, v in group_state_dict.items(): if k[0] == EventTypes.Member: state_dict_members[k]", "an entry in the cache for a given state group", "state_group, \"room_id\": room_id, \"event_id\": event_id}, ) # We persist as", "entries pulled from database. Map from state group to state", "- one for tracking non-member events, # and the other", "# (which DictionaryCache will handle efficiently and fine) and the", "from synapse.types import StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache", "sg} ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": sg, \"room_id\":", "need to query the DB for the missing state. \"\"\"", "state_group, \"prev_state_group\": prev_group}, ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\":", "state_groups WHERE id = ?\", ((sg,) for sg in state_groups_to_delete),", "Same format as `current_state_ids`. current_state_ids (dict): The state to store.", "and potential_hops < MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn, table=\"state_group_edges\", values={\"state_group\": state_group, \"prev_state_group\":", "rows} def purge_room_state(self, room_id, state_groups_to_delete): \"\"\"Deletes all record of a", "cache, and the state group ids either missing from the", "is an entry in the cache for a given state", "in cache cache_seq_num_non_members (int): Sequence number of member cache since", "# you may not use this file except in compliance", "the map wasn't immutable then this prefill could # race", "key=state_group, value=dict(current_non_member_state_ids), ) return state_group return self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def purge_unreferenced_state_groups(", "the state groups for a given set of groups from", "current_state_ids (dict): The state to store. Map of (type, state_key)", "handle efficiently and fine) and the non-members # cache for", "and the state groups logger.info(\"[purge] removing %s from state_groups\", room_id)", "have missed some. missing_types = True else: # There aren't", "= self._get_state_for_group_using_cache( cache, group, state_filter ) results[group] = state_dict_ids if", "the members cache as the # vast majority of state", "group (i.e. a subset of the keys for a #", "current_state_ids is None: # AFAIK, this can never happen raise", "entries in the cache, and the state group ids either", "removing redundant state groups\") txn.executemany( \"DELETE FROM state_groups_state WHERE state_group", "as without it the cache has no choice but to", "subset when there are wild # cards in the filter,", "prev_group, {(row[\"type\"], row[\"state_key\"]): row[\"event_id\"] for row in delta_ids}, ) return", "class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A data store for fetching/storing state groups.", ") # Now we turn the state groups that reference", "number of member cache since last lookup in cache cache_seq_num_non_members", "remaining state groups\", len(remaining_state_groups), ) # Now we turn the", "performance degrades. if prev_group: is_in_db = self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\", keyvalues={\"id\":", "} for key, state_id in current_state_ids.items() ], ) # Prefill", "events, # and the other for tracking member_events. This means", "when caching \"\"\" __slots__ = [] def __len__(self): return len(self.delta_ids)", "dict(non_member_state) for group in groups: state[group].update(member_state[group]) # Now fetch any", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "`prev_group` and `current_state_ids`, if `prev_group` was given. Same format as", "StateFilter = StateFilter.all() ): \"\"\"Gets the state at each of", "cache \"\"\" # We need to work out which types", "groups to non delta versions. for sg in remaining_state_groups: logger.info(\"[purge]", "iterable=state_groups_to_delete, keyvalues={}, ) # ... and the state group edges", "of a room from state tables Args: room_id (str): state_groups_to_delete", ") self._state_group_members_cache = DictionaryCache( \"*stateGroupMembersCache*\", 500000, ) @cached(max_entries=10000, iterable=True) def", "= yield self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) return", "we fetched all the state keys for the types in", "delta_ids (dict|None): The delta between state at `prev_group` and `current_state_ids`,", "txn, table=\"state_groups\", keyvalues={\"id\": prev_group}, retcol=\"id\", allow_none=True, ) if not is_in_db:", "non_member_filter.is_full(): # We fetched all non member events non_member_types =", "any missing groups from the database incomplete_groups = incomplete_groups_m |", "which we want to get the state. state_filter: The state", "This is problematic when lazy loading members, which requires this", "made in a cache-friendly manner by querying both caches #", "{ row[\"state_group\"] for row in rows if row[\"state_group\"] not in", "under the Apache License, Version 2.0 (the \"License\"); # you", "loading members, which requires this behaviour, # as without it", "state_groups_to_delete (list[int]): State groups to delete \"\"\" return self.db.runInteraction( \"purge_room_state\",", "not prev_group: return _GetStateGroupDelta(None, None) delta_ids = self.db.simple_select_list_txn( txn, table=\"state_groups_state\",", "The point of using a DictionaryCache is that it can", "group is in cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the state", "\"\"\" results = {} incomplete_groups = set() for group in", "querying both caches # separately and then merging the result.", "member_filter.concrete_types() if non_member_filter.is_full(): # We fetched all non member events", "have to delete the state groups states logger.info(\"[purge] removing %s", "not in known_absent: missing_types = True break return state_filter.filter_state(state_dict_ids), not", "any extra # stuff we pulled out of the database.", "return {row[\"state_group\"]: row[\"prev_state_group\"] for row in rows} def purge_room_state(self, room_id,", "the state at each of a list of state groups,", "sg} ) self.db.simple_delete_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": sg} ) self.db.simple_insert_many_txn( txn,", "work out which types we've fetched from the DB for", "list of event types we're wanting. for key in state_filter.concrete_types():", "state_dict_ids and key not in known_absent: missing_types = True break", "if self.delta_ids else 0 class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A data store", "groups, optionally filtering by type/state_key, querying from a specific cache.", "# separately and then merging the result. So for the", "or state_filter.is_full(): # Either we have everything or want everything,", "\"please cache any of these fields, apart from this subset\".", "@defer.inlineCallbacks def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter ): \"\"\"Returns", "cache_seq_num_non_members=cache_sequence_nm, ) # And finally update the result dict, by", "List, Set, Tuple from twisted.internet import defer from synapse.api.constants import", "filtering on types of state events. Args: groups: list of", "cache, group, state_filter): \"\"\"Checks if group is in cache. See", "): \"\"\"Gets the state at each of a list of", "handle fine) # and then just merge the results together.", "in the cache, and the state group ids either missing", "# TODO: this hasn't been tuned yet 50000, ) self._state_group_members_cache", "so we have to assume that we may # have", "self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn, table=\"state_groups\", values={\"id\": state_group, \"room_id\": room_id, \"event_id\": event_id},", "and non-member caches ( non_member_state, incomplete_groups_nm, ) = yield self._get_state_for_groups_using_cache(", "curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) curr_state = curr_state[sg] self.db.simple_delete_txn( txn, table=\"state_groups_state\",", "(which DictionaryCache will handle efficiently and fine) and the non-members", "events non_member_types = None else: non_member_types = non_member_filter.concrete_types() for group,", ") -> Tuple[Dict[int, StateMap[str]], Set[int]]: \"\"\"Gets the state at each", "groups for a given set of groups from the database,", "import StateBackgroundUpdateStore from synapse.storage.database import Database from synapse.storage.state import StateFilter", "use group(int): The state group to lookup state_filter (StateFilter): The", "long, as otherwise read performance degrades. if prev_group: is_in_db =", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "this group. # It's fine to use the sequence like", ") return {row[\"state_group\"]: row[\"prev_state_group\"] for row in rows} def purge_room_state(self,", "txn, room_id, state_groups_to_delete): logger.info( \"[purge] found %i state groups to", "for tracking non-member events, # and the other for tracking", "turn the state groups that reference to-be-deleted state # groups", "loaded # everything we need from the database anyway. state[group]", "the # complete list of event types we're wanting. for", "= non_member_filter.concrete_types() for group, group_state_dict in group_to_state_dict.items(): state_dict_members = {}", "# It's fine to use the sequence like this as", "as `current_state_ids`. current_state_ids (dict): The state to store. Map of", "events for the group, which negates the efficiency being sought.", "of state group to state map. \"\"\" member_filter, non_member_filter =", "overcomplicating DictionaryCache's API, we instead split the # state_group_cache into", "purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete ) -> defer.Deferred: \"\"\"Deletes no", "delete \"\"\" return self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn, room_id, state_groups_to_delete, ) def", "= DictionaryCache( \"*stateGroupMembersCache*\", 500000, ) @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group):", "be None. \"\"\" def _get_state_group_delta_txn(txn): prev_group = self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\",", "The state group ID \"\"\" def _store_state_group_txn(txn): if current_state_ids is", "= [] def __len__(self): return len(self.delta_ids) if self.delta_ids else 0", "race with another update) current_member_state_ids = { s: ev for", "of entries in the cache, and the state group ids", "logger.info(\"[purge] removing %s from state_groups\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups\", column=\"id\",", "# Now we look them up in the member and", "filter that are wildcards, so we have to assume that", "from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.storage._base", "state_groups_to_delete ) -> defer.Deferred: \"\"\"Deletes no longer referenced state groups", "the specific members state cache. state_filter: The state filter used", "MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn, table=\"state_group_edges\", values={\"state_group\": state_group, \"prev_state_group\": prev_group}, ) self.db.simple_insert_many_txn(", "in the filter, but that's fine. member_types = member_filter.concrete_types() if", "We don't know if we fetched all the state keys", "only return a subset when there are wild # cards", "curr_state = curr_state[sg] self.db.simple_delete_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": sg} ) self.db.simple_delete_txn(", "subset of users\", as DictionaryCache's API isn't # rich enough", "any wild cards, so `concrete_types()` returns the # complete list", "the member and non-member caches ( non_member_state, incomplete_groups_nm, ) =", "# The point of using a DictionaryCache is that it", "None else: # `concrete_types()` will only return a subset when", "a room from state tables Args: room_id (str): state_groups_to_delete (list[int]):", "= cache.get(group) if is_all or state_filter.is_full(): # Either we have", "state_dict_ids if not got_all: incomplete_groups.add(group) return results, incomplete_groups def _insert_into_cache(", "self._get_state_for_group_using_cache( cache, group, state_filter ) results[group] = state_dict_ids if not", "the same room). state_groups_to_delete (Collection[int]): Set of all state groups", "load all # state events for the group, which negates", "= yield self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter ) ( member_state, incomplete_groups_m,", "state_group_edges\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) #", "dict of state_group_id to state map of entries in the", "DictionaryCache's API, we instead split the # state_group_cache into two", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "get_previous_state_groups(self, state_groups): \"\"\"Fetch the previous groups of the given state", "state map. \"\"\" member_filter, non_member_filter = state_filter.get_member_split() # Now we", "the filter, but that's fine. member_types = member_filter.concrete_types() if non_member_filter.is_full():", "from the cache or incomplete. \"\"\" results = {} incomplete_groups", "= None else: non_member_types = non_member_filter.concrete_types() for group, group_state_dict in", "if not got_all: incomplete_groups.add(group) return results, incomplete_groups def _insert_into_cache( self,", "us whether we've gotten everything. return state_filter.filter_state(state_dict_ids), is_all # tracks", "the # event IDs for the state types in a", ") return state_group return self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def purge_unreferenced_state_groups( self, room_id:", "non_member_state, incomplete_groups_nm, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter )", "= { row[\"state_group\"] for row in rows if row[\"state_group\"] not", "lookup in cache \"\"\" # We need to work out", "which is an entry in the cache for a given", "any state groups that reference them. Args: room_id: The room", "room_id, state_groups_to_delete): # first we have to delete the state", "2014-2016 OpenMarket Ltd # # Licensed under the Apache License,", "will handle efficiently and fine) and the non-members # cache", "Apache License, Version 2.0 (the \"License\"); # you may not", "collections import namedtuple from typing import Dict, Iterable, List, Set,", "cache a subset # of the state events for a", "state in Matrix (today) is member events. self._state_group_cache = DictionaryCache(", "filtering by type/state_key Args: groups: list of state groups for", "either express or implied. # See the License for the", "for which we want to get the state. cache: the", "speculatively load all # state events for the group, which", "= self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\", keyvalues={\"id\": prev_group}, retcol=\"id\", allow_none=True, ) if", "logger.info(\"[purge] removing redundant state groups\") txn.executemany( \"DELETE FROM state_groups_state WHERE", "`current_state_ids`. current_state_ids (dict): The state to store. Map of (type,", "to be smaller than the members cache as the #", "room, optional. delta_ids (dict|None): The delta between state at `prev_group`", "update the result dict, by filtering out any extra #", "underestimate (e.g. when we have wild cards) member_filter, non_member_filter =", "database, filtering on types of state events. Args: groups: list", "self, room_id: str, state_groups_to_delete ) -> defer.Deferred: \"\"\"Deletes no longer", "None: # AFAIK, this can never happen raise Exception(\"current_state_ids cannot", "the database incomplete_groups = incomplete_groups_m | incomplete_groups_nm if not incomplete_groups:", "self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence # Help the cache hit ratio", "\"\"\"Given a state group try to return a previous group", "column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and the state groups", "delta between the old and the new. Returns: (prev_group, delta_ids),", "{} state_dict_non_members = {} for k, v in group_state_dict.items(): if", "Args: group_to_state_dict (dict): The new entries pulled from database. Map", "persist as a delta if we can, while also ensuring", "this subset of users\", as DictionaryCache's API isn't # rich", "for a given state group (i.e. a subset of the", "cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence # Help the cache", "we've gotten everything. return state_filter.filter_state(state_dict_ids), is_all # tracks whether any", "which the state was calculated room_id (str) prev_group (int|None): A", "types are missing from the cache missing_types = False if", "non-members # cache for all state (which DictionaryCache will similarly", "of deltas isn't tooo long, as otherwise read performance degrades.", "from the database, filtering on types of state events. Args:", "groups. Args: state_groups (Iterable[int]) Returns: Deferred[dict[int, int]]: mapping from state", "# member vs non-member caches. This should be as accurate", "= self.db.simple_select_list_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": state_group}, retcols=(\"type\", \"state_key\", \"event_id\"), )", "we will have loaded # everything we need from the", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "DB for the # member vs non-member caches. This should", "state keys for the types in # the filter that", "Map from state group to state dict state_filter (StateFilter): The", "state from the database. Returns 2-tuple (`state_dict`, `got_all`). `got_all` is", "group in set(groups): state_dict_ids, got_all = self._get_state_for_group_using_cache( cache, group, state_filter", "# cards in the filter, but that's fine. member_types =", "hit ratio by expanding the filter a bit db_state_filter =", "cache since last lookup in cache \"\"\" # We need", "same room). state_groups_to_delete (Collection[int]): Set of all state groups to", "we have to delete the state groups states logger.info(\"[purge] removing", "-*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under", ") rows = self.db.simple_select_many_txn( txn, table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",),", "cache_seq_num_members (int): Sequence number of member cache since last lookup", "fine) # and then just merge the results together. #", "as DictionaryCache's API isn't # rich enough to say \"please", "# is immutable. (If the map wasn't immutable then this", "# tracks whether any of our requested types are missing", "persist state with unpersisted prev_group: %r\" % (prev_group,) ) potential_hops", "reference them. Args: room_id: The room the state groups belong", "= StateFilter.all() ): \"\"\"Gets the state at each of a", "we want to get the state. state_filter: The state filter", "say \"please cache any of these fields, apart from this", "StateMap[str]], Set[int]]: \"\"\"Gets the state at each of a list", "possible, # but can be an underestimate (e.g. when we", "never happen raise Exception(\"current_state_ids cannot be None\") state_group = self.database_engine.get_next_state_group_id(txn)", "fine to use the sequence like this as the state", "group, state_filter): \"\"\"Checks if group is in cache. See `_get_state_for_groups`", "the database, filtering on types of state events. Args: groups:", "a # given dict which is an entry in the", "\"state_group\": state_group, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\": state_id,", "used to fetch state from the database. Returns: Deferred[Dict[int, StateMap[str]]]:", "query state_filter: The state filter used to fetch state from", "self._state_group_cache = DictionaryCache( \"*stateGroupCache*\", # TODO: this hasn't been tuned", "= yield self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter ) # Now lets update", "The delta between state at `prev_group` and `current_state_ids`, if `prev_group`", "state events for the group, which negates the efficiency being", "halves - one for tracking non-member events, # and the", "specific members state cache. state_filter: The state filter used to", "class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\")) ): \"\"\"Return type of get_state_group_delta", "\"get_state_group_delta\", _get_state_group_delta_txn ) @defer.inlineCallbacks def _get_state_groups_from_groups( self, groups: List[int], state_filter:", "results from querying the database into the relevant cache. Args:", "the caches self._insert_into_cache( group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, ) # And", "\"room_id\": room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\": state_id, } for", "avoid hammering # on the state_group* tables. # # The", "state groups, optionally filtering by type/state_key Args: groups: list of", "the state groups logger.info(\"[purge] removing %s from state_groups\", room_id) self.db.simple_delete_many_txn(", "((sg,) for sg in state_groups_to_delete), ) txn.executemany( \"DELETE FROM state_groups", "from state_groups\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups\", column=\"id\", iterable=state_groups_to_delete, keyvalues={}, )", "use this file except in compliance with the License. #", "= True break return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks def _get_state_for_groups(", "assigned state group. Args: event_id (str): The event ID for", "the state events for a given state group (i.e. a", "if k[0] == EventTypes.Member: state_dict_members[k] = v else: state_dict_non_members[k] =", "so `concrete_types()` returns the # complete list of event types", "store - for instance: \"give me all the state for", "type/state_key Args: groups: list of state groups for which we", "state_filter, ) results.update(res) return results def _get_state_for_group_using_cache(self, cache, group, state_filter):", "cache of group ids to state dicts which we will", "yield self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) return results", "in groups: state[group].update(member_state[group]) # Now fetch any missing groups from", "a new set of state, returning a newly assigned state", "\"*stateGroupMembersCache*\", 500000, ) @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): \"\"\"Given a", "to state map. \"\"\" member_filter, non_member_filter = state_filter.get_member_split() # Now", "key[0], \"state_key\": key[1], \"event_id\": state_id, } for key, state_id in", "old and the new. Returns: (prev_group, delta_ids), where both may", "groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter ) -> Tuple[Dict[int, StateMap[str]],", "state groups. \"\"\" def __init__(self, database: Database, db_conn, hs): super(StateGroupDataStore,", "member cache since last lookup in cache \"\"\" # We", "need to work out which types we've fetched from the", "optional. delta_ids (dict|None): The delta between state at `prev_group` and", "in the member and non-member caches ( non_member_state, incomplete_groups_nm, )", "# Help the cache hit ratio by expanding the filter", "= ?\", ((sg,) for sg in state_groups_to_delete), ) txn.executemany( \"DELETE", "group to state map. \"\"\" results = {} chunks =", "cards, so `concrete_types()` returns the # complete list of event", "id = ?\", ((sg,) for sg in state_groups_to_delete), ) @defer.inlineCallbacks", "between the old and the new. Returns: (prev_group, delta_ids), where", "self._insert_into_cache( group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, ) # And finally update", "values={\"id\": state_group, \"room_id\": room_id, \"event_id\": event_id}, ) # We persist", "defer from synapse.api.constants import EventTypes from synapse.storage._base import SQLBaseStore from", "results together. # # We size the non-members cache to", "in compliance with the License. # You may obtain a", "| incomplete_groups_nm if not incomplete_groups: return state cache_sequence_nm = self._state_group_cache.sequence", "incomplete_groups_nm, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter ) (", "# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd", "def _get_state_for_group_using_cache(self, cache, group, state_filter): \"\"\"Checks if group is in", "manner by querying both caches # separately and then merging", "software # distributed under the License is distributed on an", "querying the database into the relevant cache. Args: group_to_state_dict (dict):", "(str): state_groups_to_delete (list[int]): State groups to delete \"\"\" return self.db.runInteraction(", "yield self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter ) # Now lets update the", "chunk, state_filter, ) results.update(res) return results def _get_state_for_group_using_cache(self, cache, group,", "0 class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A data store for fetching/storing state", "groups logger.info(\"[purge] removing %s from state_groups\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups\",", "table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and the state", "you # would query the members cache for a specific", "== EventTypes.Member: state_dict_members[k] = v else: state_dict_non_members[k] = v self._state_group_members_cache.update(", "state_filter.return_expanded() group_to_state_dict = yield self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter ) # Now", "= self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn, table=\"state_groups\", values={\"id\": state_group, \"room_id\": room_id, \"event_id\":", "chunks = [groups[i : i + 100] for i in", "def __len__(self): return len(self.delta_ids) if self.delta_ids else 0 class StateGroupDataStore(StateBackgroundUpdateStore,", "Now lets update the caches self._insert_into_cache( group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm,", "of state events. Args: groups: list of state group IDs", "don't know if we fetched all the state keys for", "want to get the state. state_filter: The state filter used", "group in groups: state[group].update(member_state[group]) # Now fetch any missing groups", "database. Map from state group to state dict state_filter (StateFilter):", "members, which requires this behaviour, # as without it the", "to query the DB for the missing state. \"\"\" is_all,", "be smaller than the members cache as the # vast", "# We persist as a delta if we can, while", "from synapse.api.constants import EventTypes from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.state.bg_updates", "efficiency being sought. # # Rather than overcomplicating DictionaryCache's API,", "EventTypes.Member } txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, value=dict(current_non_member_state_ids), ) return state_group", "( member_state, incomplete_groups_m, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter", "into two halves - one for tracking non-member events, #", "state from the cache, if False we need to query", "ev) in current_state_ids.items() if s[0] != EventTypes.Member } txn.call_after( self._state_group_cache.update,", "sg in state_groups_to_delete), ) txn.executemany( \"DELETE FROM state_groups WHERE id", "the result dict, by filtering out any extra # stuff", "this as the state group map # is immutable. (If", "the database. Returns: Tuple of dict of state_group_id to state", "self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn, room_id, state_groups_to_delete, ) def _purge_room_state_txn(self, txn, room_id,", "room_id, prev_group, delta_ids, current_state_ids ): \"\"\"Store a new set of", "return a subset when there are wild # cards in", "(`state_dict`, `got_all`). `got_all` is a bool indicating if we successfully", "have everything or want everything, either way # `is_all` tells", "StateMap[str]]]: Dict of state group to state map. \"\"\" member_filter,", "be None\") state_group = self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn, table=\"state_groups\", values={\"id\": state_group,", "(StateFilter): The state filter used to fetch state from the", "= curr_state[sg] self.db.simple_delete_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": sg} ) self.db.simple_delete_txn( txn,", "DictionaryCache will handle efficiently and fine) and the non-members #", "self.db.simple_delete_txn( txn, table=\"state_groups_state\", keyvalues={\"state_group\": sg} ) self.db.simple_delete_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\":", "Now we look them up in the member and non-member", "`concrete_types()` returns the # complete list of event types we're", "groups states logger.info(\"[purge] removing %s from state_groups_state\", room_id) self.db.simple_delete_many_txn( txn,", "of all state groups to delete. \"\"\" return self.db.runInteraction( \"purge_unreferenced_state_groups\",", "i + 100] for i in range(0, len(groups), 100)] for", "with the License. # You may obtain a copy of", "= self._state_group_members_cache.sequence # Help the cache hit ratio by expanding", "def _get_state_group_delta_txn(txn): prev_group = self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\",", "vs non-member caches. This should be as accurate as possible,", "new entries pulled from database. Map from state group to", "from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from synapse.storage.database import Database from synapse.storage.state", "state groups for which we want to get the state.", "groups: Iterable[int], state_filter: StateFilter = StateFilter.all() ): \"\"\"Gets the state", "def get_previous_state_groups(self, state_groups): \"\"\"Fetch the previous groups of the given", "are wild # cards in the filter, but that's fine.", "allow_none=True, ) if not is_in_db: raise Exception( \"Trying to persist", "in delta_ids.items() ], ) else: self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ {", "state store used a single DictionaryCache to cache the #", "\"\"\"Returns the state groups for a given set of groups", "for key in state_filter.concrete_types(): if key not in state_dict_ids and", "desc=\"get_previous_state_groups\", ) return {row[\"state_group\"]: row[\"prev_state_group\"] for row in rows} def", "group for the room, optional. delta_ids (dict|None): The delta between", "for a specific subset of state keys # (which DictionaryCache", "The state filter used to fetch state from the database.", "cache-friendly manner by querying both caches # separately and then", "], ) else: self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group,", "any existing entries, as we will have loaded # everything", "cache since last lookup in cache cache_seq_num_non_members (int): Sequence number", "if row[\"state_group\"] not in state_groups_to_delete } logger.info( \"[purge] de-delta-ing %i", ") return self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn ) @defer.inlineCallbacks def _get_state_groups_from_groups( self,", "and fine) and the non-members # cache for all state", "are missing from the cache missing_types = False if state_filter.has_wildcards():", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "= True else: # There aren't any wild cards, so", "\"DELETE FROM state_groups_state WHERE state_group = ?\", ((sg,) for sg", "state[group] = state_filter.filter_state(group_state_dict) return state def _get_state_for_groups_using_cache( self, groups: Iterable[int],", "_get_state_group_delta_txn(txn): prev_group = self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\", allow_none=True,", "self, event_id, room_id, prev_group, delta_ids, current_state_ids ): \"\"\"Store a new", "them up in the member and non-member caches ( non_member_state,", "format as `current_state_ids`. current_state_ids (dict): The state to store. Map", "= {} incomplete_groups = set() for group in set(groups): state_dict_ids,", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "state_id in current_state_ids.items() ], ) # Prefill the state group", "states logger.info(\"[purge] removing %s from state_groups_state\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups_state\",", "import StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache", "table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ... and the state", "state_group, \"room_id\": room_id, \"type\": key[0], \"state_key\": key[1], \"event_id\": state_id, }", "the result. So for the example above, you # would", "# # The point of using a DictionaryCache is that", "results = {} chunks = [groups[i : i + 100]", ") state = dict(non_member_state) for group in groups: state[group].update(member_state[group]) #", "database anyway. state[group] = state_filter.filter_state(group_state_dict) return state def _get_state_for_groups_using_cache( self,", "or the specific members state cache. state_filter: The state filter", "DictionaryCache to cache the # event IDs for the state", "keyvalues={\"id\": prev_group}, retcol=\"id\", allow_none=True, ) if not is_in_db: raise Exception(", "known_absent, state_dict_ids = cache.get(group) if is_all or state_filter.is_full(): # Either", "list of state groups, optionally filtering by type/state_key, querying from", "self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) return results def", "group cache to use group(int): The state group to lookup", "CONDITIONS OF ANY KIND, either express or implied. # See", "delta_ids}, ) return self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn ) @defer.inlineCallbacks def _get_state_groups_from_groups(", "Ltd # # Licensed under the Apache License, Version 2.0", "state_filter.concrete_types(): if key not in state_dict_ids and key not in", "database. Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool indicating", "we have wild cards) member_filter, non_member_filter = state_filter.get_member_split() if member_filter.is_full():", "to (must all be in the same room). state_groups_to_delete (Collection[int]):", "missing_types = True else: # There aren't any wild cards,", "we may # have missed some. missing_types = True else:", "will similarly handle fine) # and then just merge the", "groups: list of state group IDs to query state_filter: The", "self, groups: List[int], state_filter: StateFilter ): \"\"\"Returns the state groups", "Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache", "a given state group ID). # # However, this poses", "Deferred[int]: The state group ID \"\"\" def _store_state_group_txn(txn): if current_state_ids", "using a DictionaryCache is that it can cache a subset", ") current_non_member_state_ids = { s: ev for (s, ev) in", "\"\"\"A data store for fetching/storing state groups. \"\"\" def __init__(self,", "is_in_db = self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\", keyvalues={\"id\": prev_group}, retcol=\"id\", allow_none=True, )", "key[1], \"event_id\": state_id, } for key, state_id in current_state_ids.items() ],", "from the database. cache_seq_num_members (int): Sequence number of member cache", "state cache. state_filter: The state filter used to fetch state", "state_group = self.database_engine.get_next_state_group_id(txn) self.db.simple_insert_txn( txn, table=\"state_groups\", values={\"id\": state_group, \"room_id\": room_id,", "caches # separately and then merging the result. So for", "the state group edges logger.info(\"[purge] removing %s from state_group_edges\", room_id)", ") return _GetStateGroupDelta( prev_group, {(row[\"type\"], row[\"state_key\"]): row[\"event_id\"] for row in", "complicated queries # on the store - for instance: \"give", "members to this subset of users\", as DictionaryCache's API isn't", "We just replace any existing entries, as we will have", "# Either we have everything or want everything, either way", "chain # of deltas isn't tooo long, as otherwise read", "# the filter that are wildcards, so we have to", "value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types, ) def", "the new. Returns: (prev_group, delta_ids), where both may be None.", "group ids either missing from the cache or incomplete. \"\"\"", "whether we've gotten everything. return state_filter.filter_state(state_dict_ids), is_all # tracks whether", "previous state group for the room, optional. delta_ids (dict|None): The", "but can be an underestimate (e.g. when we have wild", "`prev_group` was given. Same format as `current_state_ids`. current_state_ids (dict): The", "iterable=True) def get_state_group_delta(self, state_group): \"\"\"Given a state group try to", "subset of the keys for a # given dict which", "the # state_group_cache into two halves - one for tracking", "result dict, by filtering out any extra # stuff we", "group to avoid hammering # on the state_group* tables. #", "is_all # tracks whether any of our requested types are", "is None: # AFAIK, this can never happen raise Exception(\"current_state_ids", "cache.get(group) if is_all or state_filter.is_full(): # Either we have everything", "rows = self.db.simple_select_many_txn( txn, table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",), )", "len(remaining_state_groups), ) # Now we turn the state groups that", "from typing import Dict, Iterable, List, Set, Tuple from twisted.internet", "group IDs to query state_filter: The state filter used to", "DictionaryCache is that it can cache a subset # of", "the example above, you # would query the members cache", "the database. Returns: Deferred[Dict[int, StateMap[str]]]: Dict of state group to", "delete the state groups states logger.info(\"[purge] removing %s from state_groups_state\",", "of get_state_group_delta that implements __len__, which lets us use the", "groups from the database, filtering on types of state events.", "events. Args: groups: list of state group IDs to query", "lookup in cache cache_seq_num_non_members (int): Sequence number of member cache", "WHERE state_group = ?\", ((sg,) for sg in state_groups_to_delete), )", "def _insert_into_cache( self, group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members, ): \"\"\"Inserts results", "?\", ((sg,) for sg in state_groups_to_delete), ) txn.executemany( \"DELETE FROM", "# state_group_cache into two halves - one for tracking non-member", "state cache or the specific members state cache. state_filter: The", "the efficiency being sought. # # Rather than overcomplicating DictionaryCache's", "in group_to_state_dict.items(): state_dict_members = {} state_dict_non_members = {} for k,", "the state groups that reference to-be-deleted state # groups to", "): \"\"\"Store a new set of state, returning a newly", "de-deltas any state groups that reference them. Args: room_id: The", "list of state group IDs to query state_filter: The state", "like this as the state group map # is immutable.", "yield self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter ) ( member_state, incomplete_groups_m, )", "return state_filter.filter_state(state_dict_ids), is_all # tracks whether any of our requested", "at each of a list of state groups, optionally filtering", "delta versions. for sg in remaining_state_groups: logger.info(\"[purge] de-delta-ing remaining state", "sg in state_groups_to_delete), ) @defer.inlineCallbacks def get_previous_state_groups(self, state_groups): \"\"\"Fetch the", "state_filter: StateFilter ): \"\"\"Returns the state groups for a given", "and the state group edges logger.info(\"[purge] removing %s from state_group_edges\",", "no choice but to speculatively load all # state events", "fetch state from the database. Returns: Deferred[Dict[int, StateMap[str]]]: Dict of", "have loaded # everything we need from the database anyway.", "# ... and the state groups logger.info(\"[purge] removing %s from", "incomplete_groups_m, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter ) state", "room_id: The room the state groups belong to (must all", "state_filter.get_member_split() if member_filter.is_full(): # We fetched all member events member_types", "state filter used to fetch state from the database. Returns", "# but can be an underestimate (e.g. when we have", "state groups\") txn.executemany( \"DELETE FROM state_groups_state WHERE state_group = ?\",", "lazy loading # queries can be made in a cache-friendly", "Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map. \"\"\"", "the state group caches with this group. # It's fine", "anyway. state[group] = state_filter.filter_state(group_state_dict) return state def _get_state_for_groups_using_cache( self, groups:", "import cached from synapse.util.caches.dictionary_cache import DictionaryCache logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS", "\"\"\"Deletes no longer referenced state groups and de-deltas any state", "\"give me all the state for this group, but #", "state_group}, retcol=\"prev_state_group\", allow_none=True, ) if not prev_group: return _GetStateGroupDelta(None, None)", "from a specific cache. Args: groups: list of state groups", "keyvalues={}, ) # ... and the state groups logger.info(\"[purge] removing", "we can, while also ensuring the chain # of deltas", "state map. \"\"\" results = {} chunks = [groups[i :", "None. \"\"\" def _get_state_group_delta_txn(txn): prev_group = self.db.simple_select_one_onecol_txn( txn, table=\"state_group_edges\", keyvalues={\"state_group\":", "for the missing state. \"\"\" is_all, known_absent, state_dict_ids = cache.get(group)", "self.db.simple_insert_txn( txn, table=\"state_groups\", values={\"id\": state_group, \"room_id\": room_id, \"event_id\": event_id}, )", "in a given state group to avoid hammering # on", "limit members to this subset of users\", as DictionaryCache's API", "for group in groups: state[group].update(member_state[group]) # Now fetch any missing", "in state_groups_to_delete), ) txn.executemany( \"DELETE FROM state_groups WHERE id =", "group_state_dict.items(): if k[0] == EventTypes.Member: state_dict_members[k] = v else: state_dict_non_members[k]", "instead split the # state_group_cache into two halves - one", "row[\"state_key\"]): row[\"event_id\"] for row in delta_ids}, ) return self.db.runInteraction( \"get_state_group_delta\",", "key not in state_dict_ids and key not in known_absent: missing_types", "have to assume that we may # have missed some.", "given state group to avoid hammering # on the state_group*", "of state keys # (which DictionaryCache will handle efficiently and", "groups from the database incomplete_groups = incomplete_groups_m | incomplete_groups_nm if", "the state for this group, but # limit members to", "state_groups_to_delete), ) txn.executemany( \"DELETE FROM state_groups WHERE id = ?\",", "behaviour, # as without it the cache has no choice", "%s from state_group_edges\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_group_edges\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={},", "state_id, } for key, state_id in curr_state.items() ], ) logger.info(\"[purge]", "in current_state_ids.items() if s[0] != EventTypes.Member } txn.call_after( self._state_group_cache.update, self._state_group_cache.sequence,", "retrieved all requests state from the cache, if False we", ") logger.info(\"[purge] removing redundant state groups\") txn.executemany( \"DELETE FROM state_groups_state", "DictionaryCache will similarly handle fine) # and then just merge", "# and the other for tracking member_events. This means that", "to this subset of users\", as DictionaryCache's API isn't #", "in the cache for a given state group ID). #", "DictionaryCache( \"*stateGroupMembersCache*\", 500000, ) @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): \"\"\"Given", "with another update) current_member_state_ids = { s: ev for (s,", "Set of all state groups to delete. \"\"\" return self.db.runInteraction(", "import StateFilter from synapse.types import StateMap from synapse.util.caches.descriptors import cached", "ID for which the state was calculated room_id (str) prev_group", ") else: self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": state_group, \"room_id\":", "# race with another update) current_member_state_ids = { s: ev", "hs) # Originally the state store used a single DictionaryCache", "in a cache-friendly manner by querying both caches # separately", "(prev_group,) ) potential_hops = self._count_state_group_hops_txn(txn, prev_group) if prev_group and potential_hops", "room_id (str): state_groups_to_delete (list[int]): State groups to delete \"\"\" return", "group to lookup state_filter (StateFilter): The state filter used to", "of these fields, apart from this subset\". # This is", "both may be None. \"\"\" def _get_state_group_delta_txn(txn): prev_group = self.db.simple_select_one_onecol_txn(", "# vast majority of state in Matrix (today) is member", "StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): \"\"\"A data store for fetching/storing state groups. \"\"\"", "len(state_groups_to_delete) ) rows = self.db.simple_select_many_txn( txn, table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={},", "if prev_group and potential_hops < MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn, table=\"state_group_edges\", values={\"state_group\":", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "state_group = ?\", ((sg,) for sg in state_groups_to_delete), ) txn.executemany(", "groups, optionally filtering by type/state_key Args: groups: list of state", "group_to_state_dict (dict): The new entries pulled from database. Map from", "%s from state_groups\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups\", column=\"id\", iterable=state_groups_to_delete, keyvalues={},", "row[\"state_group\"] not in state_groups_to_delete } logger.info( \"[purge] de-delta-ing %i remaining", "to state dicts which we will pass through - either", "state group IDs to query state_filter: The state filter used", "self.db.simple_select_many_txn( txn, table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",), ) remaining_state_groups =", "%i state groups to delete\", len(state_groups_to_delete) ) rows = self.db.simple_select_many_txn(", "member events. self._state_group_cache = DictionaryCache( \"*stateGroupCache*\", # TODO: this hasn't", "of the database. for group, group_state_dict in group_to_state_dict.items(): # We", "self.db.simple_select_many_batch( table=\"state_group_edges\", column=\"prev_state_group\", iterable=state_groups, keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\", ) return", "for the state types in a given state group to", "this hasn't been tuned yet 50000, ) self._state_group_members_cache = DictionaryCache(", "%i remaining state groups\", len(remaining_state_groups), ) # Now we turn", "store_state_group( self, event_id, room_id, prev_group, delta_ids, current_state_ids ): \"\"\"Store a", "cache, if False we need to query the DB for", "state from the database. Returns: Deferred[Dict[int, StateMap[str]]]: Dict of state", "a subset of the keys for a # given dict", "if we fetched all the state keys for the types", "wasn't immutable then this prefill could # race with another", "group ID \"\"\" def _store_state_group_txn(txn): if current_state_ids is None: #", "if not is_in_db: raise Exception( \"Trying to persist state with", "k, v in group_state_dict.items(): if k[0] == EventTypes.Member: state_dict_members[k] =", "Version 2.0 (the \"License\"); # you may not use this", "cache_seq_num_members, cache_seq_num_non_members, ): \"\"\"Inserts results from querying the database into", "table=\"state_group_edges\", keyvalues={\"state_group\": state_group}, retcol=\"prev_state_group\", allow_none=True, ) if not prev_group: return", "could # race with another update) current_member_state_ids = { s:", "it the cache has no choice but to speculatively load", "state_filter: StateFilter = StateFilter.all() ): \"\"\"Gets the state at each", "EventTypes from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from", "of the given state groups. Args: state_groups (Iterable[int]) Returns: Deferred[dict[int,", "then merging the result. So for the example above, you", "import DictionaryCache logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 class _GetStateGroupDelta(", "sg in remaining_state_groups: logger.info(\"[purge] de-delta-ing remaining state group %s\", sg)", "hs): super(StateGroupDataStore, self).__init__(database, db_conn, hs) # Originally the state store", "of the keys for a # given dict which is", "but to speculatively load all # state events for the", "merging the result. So for the example above, you #", "self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter ) -> Tuple[Dict[int,", "typing import Dict, Iterable, List, Set, Tuple from twisted.internet import", "non delta versions. for sg in remaining_state_groups: logger.info(\"[purge] de-delta-ing remaining", "= {} for k, v in group_state_dict.items(): if k[0] ==", "[] def __len__(self): return len(self.delta_ids) if self.delta_ids else 0 class", "state map of entries in the cache, and the state", "_insert_into_cache( self, group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members, ): \"\"\"Inserts results from", "... and the state group edges logger.info(\"[purge] removing %s from", "by applicable law or agreed to in writing, software #", "to assume that we may # have missed some. missing_types", "efficiently and fine) and the non-members # cache for all", "types we're wanting. for key in state_filter.concrete_types(): if key not", "member_types = None else: # `concrete_types()` will only return a", "filter a bit db_state_filter = state_filter.return_expanded() group_to_state_dict = yield self._get_state_groups_from_groups(", "_store_state_group_txn) def purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete ) -> defer.Deferred:", "everything or want everything, either way # `is_all` tells us", "to fetch state from the database. Returns: Deferred[Dict[int, StateMap[str]]]: Dict", "in current_state_ids.items() if s[0] == EventTypes.Member } txn.call_after( self._state_group_members_cache.update, self._state_group_members_cache.sequence,", "logger.info(\"[purge] de-delta-ing remaining state group %s\", sg) curr_state = self._get_state_groups_from_groups_txn(txn,", "member_types = member_filter.concrete_types() if non_member_filter.is_full(): # We fetched all non", "state_groups): \"\"\"Fetch the previous groups of the given state groups.", "either missing from the cache or incomplete. \"\"\" results =", "one for tracking non-member events, # and the other for", "group, group_state_dict in group_to_state_dict.items(): # We just replace any existing", "for group in set(groups): state_dict_ids, got_all = self._get_state_for_group_using_cache( cache, group,", "keyvalues={}, retcols=(\"prev_state_group\", \"state_group\"), desc=\"get_previous_state_groups\", ) return {row[\"state_group\"]: row[\"prev_state_group\"] for row", "prev_group: %r\" % (prev_group,) ) potential_hops = self._count_state_group_hops_txn(txn, prev_group) if", "logger.info( \"[purge] found %i state groups to delete\", len(state_groups_to_delete) )", "either way # `is_all` tells us whether we've gotten everything.", "by querying both caches # separately and then merging the", "bit db_state_filter = state_filter.return_expanded() group_to_state_dict = yield self._get_state_groups_from_groups( list(incomplete_groups), state_filter=db_state_filter", "for chunk in chunks: res = yield self.db.runInteraction( \"_get_state_groups_from_groups\", self._get_state_groups_from_groups_txn,", "each of a list of state groups, optionally filtering by", "cache hit ratio by expanding the filter a bit db_state_filter", "Now we turn the state groups that reference to-be-deleted state", "missing from the cache missing_types = False if state_filter.has_wildcards(): #", "{row[\"state_group\"]: row[\"prev_state_group\"] for row in rows} def purge_room_state(self, room_id, state_groups_to_delete):", "is in cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the state group", "applicable law or agreed to in writing, software # distributed", "with unpersisted prev_group: %r\" % (prev_group,) ) potential_hops = self._count_state_group_hops_txn(txn,", "from synapse.storage.database import Database from synapse.storage.state import StateFilter from synapse.types", "state_filter: The state filter used to fetch state from the", "we pulled out of the database. for group, group_state_dict in", "to work out which types we've fetched from the DB", "single DictionaryCache to cache the # event IDs for the", "\"\"\"Return type of get_state_group_delta that implements __len__, which lets us", "state_groups_to_delete): \"\"\"Deletes all record of a room from state tables", "to query state_filter: The state filter used to fetch state", "types in a given state group to avoid hammering #", "\"\"\"Checks if group is in cache. See `_get_state_for_groups` Args: cache(DictionaryCache):", "in cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the state group cache", "dict state_filter (StateFilter): The state filter used to fetch state", "db_conn, hs) # Originally the state store used a single", "result. So for the example above, you # would query", "read performance degrades. if prev_group: is_in_db = self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\",", "limitations under the License. import logging from collections import namedtuple", "Args: room_id (str): state_groups_to_delete (list[int]): State groups to delete \"\"\"", "lazy loading members, which requires this behaviour, # as without", "potential_hops < MAX_STATE_DELTA_HOPS: self.db.simple_insert_txn( txn, table=\"state_group_edges\", values={\"state_group\": state_group, \"prev_state_group\": prev_group},", "StateFilter.all() ): \"\"\"Gets the state at each of a list", ") ( member_state, incomplete_groups_m, ) = yield self._get_state_for_groups_using_cache( groups, self._state_group_members_cache,", "for tracking member_events. This means that lazy loading # queries", "_get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() ): \"\"\"Gets", "for the group, which negates the efficiency being sought. #", "state group edges logger.info(\"[purge] removing %s from state_group_edges\", room_id) self.db.simple_delete_many_txn(", "# You may obtain a copy of the License at", "get the state. cache: the cache of group ids to", "no longer referenced state groups and de-deltas any state groups", "from the database. Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a", "StateMap[str]]]: Dict of state group to state map. \"\"\" results", "room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) # ...", "we need to query the DB for the missing state.", "self.db.simple_insert_many_txn( txn, table=\"state_groups_state\", values=[ { \"state_group\": sg, \"room_id\": room_id, \"type\":", "((sg,) for sg in state_groups_to_delete), ) @defer.inlineCallbacks def get_previous_state_groups(self, state_groups):", "to fetch state from the database. Returns 2-tuple (`state_dict`, `got_all`).", "a given state group to avoid hammering # on the", "cache. state_filter: The state filter used to fetch state from", "@defer.inlineCallbacks def get_previous_state_groups(self, state_groups): \"\"\"Fetch the previous groups of the", "import logging from collections import namedtuple from typing import Dict,", "state from the database. Returns: Tuple of dict of state_group_id", "(list[int]): State groups to delete \"\"\" return self.db.runInteraction( \"purge_room_state\", self._purge_room_state_txn,", "(\"prev_group\", \"delta_ids\")) ): \"\"\"Return type of get_state_group_delta that implements __len__,", "logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 class _GetStateGroupDelta( namedtuple(\"_GetStateGroupDelta\", (\"prev_group\", \"delta_ids\")) ):", "types we've fetched from the DB for the # member", "self._purge_room_state_txn, room_id, state_groups_to_delete, ) def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete): #", "pulled from database. Map from state group to state dict", "column=\"prev_state_group\", iterable=state_groups_to_delete, keyvalues={}, retcols=(\"state_group\",), ) remaining_state_groups = { row[\"state_group\"] for", "\"\"\" # We need to work out which types we've", "non-member caches ( non_member_state, incomplete_groups_nm, ) = yield self._get_state_for_groups_using_cache( groups,", "requests state from the cache, if False we need to", "txn, table=\"state_groups_state\", values=[ { \"state_group\": sg, \"room_id\": room_id, \"type\": key[0],", "out of the database. for group, group_state_dict in group_to_state_dict.items(): #", "if state_filter.has_wildcards(): # We don't know if we fetched all", "from the database. Returns: Tuple of dict of state_group_id to", "out which types we've fetched from the DB for the", "database incomplete_groups = incomplete_groups_m | incomplete_groups_nm if not incomplete_groups: return", "state_group return self.db.runInteraction(\"store_state_group\", _store_state_group_txn) def purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete", "(str): The event ID for which the state was calculated", "(must all be in the same room). state_groups_to_delete (Collection[int]): Set", "group_to_state_dict, db_state_filter, cache_seq_num_members=cache_sequence_m, cache_seq_num_non_members=cache_sequence_nm, ) # And finally update the", ") = yield self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter ) ( member_state,", "state group to state map. \"\"\" member_filter, non_member_filter = state_filter.get_member_split()", "coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # #", "keyvalues={}, retcols=(\"state_group\",), ) remaining_state_groups = { row[\"state_group\"] for row in", "than overcomplicating DictionaryCache's API, we instead split the # state_group_cache", "_purge_room_state_txn(self, txn, room_id, state_groups_to_delete): # first we have to delete", "separately and then merging the result. So for the example", "from this subset\". # This is problematic when lazy loading", "between state at `prev_group` and `current_state_ids`, if `prev_group` was given.", "state group to lookup state_filter (StateFilter): The state filter used", "\"event_id\": state_id, } for key, state_id in delta_ids.items() ], )", "\"License\"); # you may not use this file except in", "fetching/storing state groups. \"\"\" def __init__(self, database: Database, db_conn, hs):", "used to fetch state from the database. Returns 2-tuple (`state_dict`,", "\"event_id\": event_id}, ) # We persist as a delta if", "state to store. Map of (type, state_key) to event_id. Returns:", "row[\"event_id\"] for row in delta_ids}, ) return self.db.runInteraction( \"get_state_group_delta\", _get_state_group_delta_txn", "we have to assume that we may # have missed", "cache_seq_num_non_members (int): Sequence number of member cache since last lookup", "Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool indicating if", "fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types, ) def store_state_group(", "state_groups_state\", room_id) self.db.simple_delete_many_txn( txn, table=\"state_groups_state\", column=\"state_group\", iterable=state_groups_to_delete, keyvalues={}, ) #", "member_filter, non_member_filter = state_filter.get_member_split() # Now we look them up", "def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter ): \"\"\"Returns the", "wild cards) member_filter, non_member_filter = state_filter.get_member_split() if member_filter.is_full(): # We", "StateBackgroundUpdateStore from synapse.storage.database import Database from synapse.storage.state import StateFilter from", "in cache \"\"\" # We need to work out which", "\"state_group\"), desc=\"get_previous_state_groups\", ) return {row[\"state_group\"]: row[\"prev_state_group\"] for row in rows}", "\"\"\"Gets the state at each of a list of state", "to lookup state_filter (StateFilter): The state filter used to fetch", "# We need to work out which types we've fetched", "potential_hops = self._count_state_group_hops_txn(txn, prev_group) if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:", "event_id, room_id, prev_group, delta_ids, current_state_ids ): \"\"\"Store a new set", "namedtuple from typing import Dict, Iterable, List, Set, Tuple from", "table=\"state_groups_state\", values=[ { \"state_group\": sg, \"room_id\": room_id, \"type\": key[0], \"state_key\":", "state_groups_to_delete } logger.info( \"[purge] de-delta-ing %i remaining state groups\", len(remaining_state_groups),", "state groups, optionally filtering by type/state_key, querying from a specific", "group ID). # # However, this poses problems when performing", "# and then just merge the results together. # #", "remaining_state_groups = { row[\"state_group\"] for row in rows if row[\"state_group\"]", "types of state events. Args: groups: list of state group", "if prev_group: is_in_db = self.db.simple_select_one_onecol_txn( txn, table=\"state_groups\", keyvalues={\"id\": prev_group}, retcol=\"id\",", "txn, table=\"state_group_edges\", values={\"state_group\": state_group, \"prev_state_group\": prev_group}, ) self.db.simple_insert_many_txn( txn, table=\"state_groups_state\",", "for the room, optional. delta_ids (dict|None): The delta between state", "cache for a given state group ID). # # However,", "%s\", sg) curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) curr_state = curr_state[sg] self.db.simple_delete_txn(" ]
[ "[ migrations.CreateModel( name='Variation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "('name', models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ], options={ 'unique_together': {('item', 'name')},", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ], options={ 'unique_together': {('item', 'name')}, }, ), migrations.CreateModel(", "{('item', 'name')}, }, ), migrations.CreateModel( name='ItemVariation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')),", "models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ], options={ 'unique_together': {('variation',", "name='ItemVariation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=50)), ('attachment',", "('value', models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ], options={ 'unique_together':", "] operations = [ migrations.CreateModel( name='Variation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "verbose_name='ID')), ('name', models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ], options={ 'unique_together': {('item',", "12:24 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "= [ ('core', '0010_auto_20200130_1135'), ] operations = [ migrations.CreateModel( name='Variation',", "migrations.CreateModel( name='ItemVariation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=50)),", "class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20200130_1135'), ] operations =", "primary_key=True, serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')),", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20200130_1135'), ] operations", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "options={ 'unique_together': {('item', 'name')}, }, ), migrations.CreateModel( name='ItemVariation', fields=[ ('id',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')), ('variation',", "verbose_name='ID')), ('value', models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ], options={", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')),", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20200130_1135'),", "('core', '0010_auto_20200130_1135'), ] operations = [ migrations.CreateModel( name='Variation', fields=[ ('id',", "# Generated by Django 2.2.6 on 2020-02-09 12:24 from django.db", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ], options={ 'unique_together':", "Generated by Django 2.2.6 on 2020-02-09 12:24 from django.db import", "'name')}, }, ), migrations.CreateModel( name='ItemVariation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20200130_1135'), ]", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ], options={", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ], options={ 'unique_together': {('variation', 'value')}, }, ), ]", "], options={ 'unique_together': {('item', 'name')}, }, ), migrations.CreateModel( name='ItemVariation', fields=[", "migrations.CreateModel( name='Variation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)),", "}, ), migrations.CreateModel( name='ItemVariation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ], options={ 'unique_together': {('variation', 'value')}, }, ),", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "by Django 2.2.6 on 2020-02-09 12:24 from django.db import migrations,", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "[ ('core', '0010_auto_20200130_1135'), ] operations = [ migrations.CreateModel( name='Variation', fields=[", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ],", "Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20200130_1135'), ] operations = [", "name='Variation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('item',", "models.CharField(max_length=50)), ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ], options={ 'unique_together': {('item', 'name')}, },", "'unique_together': {('item', 'name')}, }, ), migrations.CreateModel( name='ItemVariation', fields=[ ('id', models.AutoField(auto_created=True,", "models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ], options={ 'unique_together': {('variation', 'value')}, },", "Django 2.2.6 on 2020-02-09 12:24 from django.db import migrations, models", "('attachment', models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ], options={ 'unique_together': {('variation', 'value')},", "dependencies = [ ('core', '0010_auto_20200130_1135'), ] operations = [ migrations.CreateModel(", "('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')), ], options={ 'unique_together': {('item', 'name')}, }, ),", "on 2020-02-09 12:24 from django.db import migrations, models import django.db.models.deletion", "operations = [ migrations.CreateModel( name='Variation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "to='core.Item')), ], options={ 'unique_together': {('item', 'name')}, }, ), migrations.CreateModel( name='ItemVariation',", "2.2.6 on 2020-02-09 12:24 from django.db import migrations, models import", "2020-02-09 12:24 from django.db import migrations, models import django.db.models.deletion class", "), migrations.CreateModel( name='ItemVariation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core',", "= [ migrations.CreateModel( name='Variation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "serialize=False, verbose_name='ID')), ('value', models.CharField(max_length=50)), ('attachment', models.ImageField(upload_to='variations/')), ('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')), ],", "'0010_auto_20200130_1135'), ] operations = [ migrations.CreateModel( name='Variation', fields=[ ('id', models.AutoField(auto_created=True," ]
[ "feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, y_:", "tf from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets(\"data/dataset/\", one_hot=True) #", "y_: data.test.labels, keep_prob: 1.0})) path = saver.save( sess, os.path.join(os.path.dirname(__file__), 'data',", "keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0})) path", "tf.placeholder(tf.float32) y, variables = model.convolutional(x, keep_prob) # train y_ =", "tf.train.Saver(variables) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(20000):", "keep_prob) # train y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy =", "tf.placeholder(tf.float32, [None, 10]) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step =", "tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(variables) with", "= tf.placeholder(tf.float32) y, variables = model.convolutional(x, keep_prob) # train y_", "print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0})) path = saver.save(", "[None, 10]) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)", "y_: batch[1], keep_prob: 1.0}) print(\"step %d, training accuracy %g\" %", "if i % 100 == 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0],", "i % 100 == 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_:", "as tf from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets(\"data/dataset/\", one_hot=True)", "batch = data.train.next_batch(50) if i % 100 == 0: train_accuracy", "keep_prob = tf.placeholder(tf.float32) y, variables = model.convolutional(x, keep_prob) # train", "1.0}) print(\"step %d, training accuracy %g\" % (i, train_accuracy)) sess.run(train_step,", "(i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print(sess.run(accuracy,", "train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy", "= accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}) print(\"step %d, training", "% (i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})", "as sess: sess.run(tf.global_variables_initializer()) for i in range(20000): batch = data.train.next_batch(50)", "0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}) print(\"step", "tf.float32)) saver = tf.train.Saver(variables) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for", "tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32) y, variables = model.convolutional(x,", "tf.variable_scope(\"convolutional\"): x = tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32) y,", "= tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(variables) with tf.Session() as sess:", "print(\"step %d, training accuracy %g\" % (i, train_accuracy)) sess.run(train_step, feed_dict={x:", "1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(variables) with tf.Session()", "batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0}))", "[None, 784]) keep_prob = tf.placeholder(tf.float32) y, variables = model.convolutional(x, keep_prob)", "x = tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32) y, variables", "train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}) print(\"step %d,", "data.test.labels, keep_prob: 1.0})) path = saver.save( sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'),", "1.0})) path = saver.save( sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'), write_meta_graph=False, write_state=False)", "784]) keep_prob = tf.placeholder(tf.float32) y, variables = model.convolutional(x, keep_prob) #", "in range(20000): batch = data.train.next_batch(50) if i % 100 ==", "= saver.save( sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'), write_meta_graph=False, write_state=False) print(\"Saved:\", path)", "path = saver.save( sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'), write_meta_graph=False, write_state=False) print(\"Saved:\",", "# train y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = -tf.reduce_sum(y_", "# model with tf.variable_scope(\"convolutional\"): x = tf.placeholder(tf.float32, [None, 784]) keep_prob", "data.test.images, y_: data.test.labels, keep_prob: 1.0})) path = saver.save( sess, os.path.join(os.path.dirname(__file__),", "import input_data data = input_data.read_data_sets(\"data/dataset/\", one_hot=True) # model with tf.variable_scope(\"convolutional\"):", "mnist import model import tensorflow as tf from tensorflow.examples.tutorials.mnist import", "model.convolutional(x, keep_prob) # train y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy", "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(variables) with tf.Session() as", "data.train.next_batch(50) if i % 100 == 0: train_accuracy = accuracy.eval(feed_dict={x:", "% 100 == 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1],", "= tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver", "y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = -tf.reduce_sum(y_ * tf.log(y))", "from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets(\"data/dataset/\", one_hot=True) # model", "= model.convolutional(x, keep_prob) # train y_ = tf.placeholder(tf.float32, [None, 10])", "with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(20000): batch", "import model import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data", "tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(variables) with tf.Session() as sess: sess.run(tf.global_variables_initializer())", "model import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data data", "= data.train.next_batch(50) if i % 100 == 0: train_accuracy =", "train y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = -tf.reduce_sum(y_ *", "cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction =", "batch[1], keep_prob: 1.0}) print(\"step %d, training accuracy %g\" % (i,", "* tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_,", "= tf.train.Saver(variables) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in", "-tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1),", "batch[0], y_: batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels,", "%g\" % (i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob:", "i in range(20000): batch = data.train.next_batch(50) if i % 100", "keep_prob: 1.0}) print(\"step %d, training accuracy %g\" % (i, train_accuracy))", "feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0})) path = saver.save( sess,", "y, variables = model.convolutional(x, keep_prob) # train y_ = tf.placeholder(tf.float32,", "training accuracy %g\" % (i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], y_:", "input_data.read_data_sets(\"data/dataset/\", one_hot=True) # model with tf.variable_scope(\"convolutional\"): x = tf.placeholder(tf.float32, [None,", "= input_data.read_data_sets(\"data/dataset/\", one_hot=True) # model with tf.variable_scope(\"convolutional\"): x = tf.placeholder(tf.float32,", "accuracy %g\" % (i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1],", "variables = model.convolutional(x, keep_prob) # train y_ = tf.placeholder(tf.float32, [None,", "tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets(\"data/dataset/\", one_hot=True) # model with", "= -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y,", "batch[0], y_: batch[1], keep_prob: 1.0}) print(\"step %d, training accuracy %g\"", "0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0})) path =", "keep_prob: 1.0})) path = saver.save( sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'), write_meta_graph=False,", "with tf.variable_scope(\"convolutional\"): x = tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32)", "y_: batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob:", "tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets(\"data/dataset/\",", "import os from mnist import model import tensorflow as tf", "100 == 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob:", "train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x:", "= tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy =", "for i in range(20000): batch = data.train.next_batch(50) if i %", "%d, training accuracy %g\" % (i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0],", "from mnist import model import tensorflow as tf from tensorflow.examples.tutorials.mnist", "tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,", "tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver =", "1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(variables)", "os from mnist import model import tensorflow as tf from", "accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}) print(\"step %d, training accuracy", "input_data data = input_data.read_data_sets(\"data/dataset/\", one_hot=True) # model with tf.variable_scope(\"convolutional\"): x", "sess.run(tf.global_variables_initializer()) for i in range(20000): batch = data.train.next_batch(50) if i", "== 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})", "model with tf.variable_scope(\"convolutional\"): x = tf.placeholder(tf.float32, [None, 784]) keep_prob =", "tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))", "sess: sess.run(tf.global_variables_initializer()) for i in range(20000): batch = data.train.next_batch(50) if", "sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images,", "saver = tf.train.Saver(variables) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i", "one_hot=True) # model with tf.variable_scope(\"convolutional\"): x = tf.placeholder(tf.float32, [None, 784])", "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))", "data = input_data.read_data_sets(\"data/dataset/\", one_hot=True) # model with tf.variable_scope(\"convolutional\"): x =", "range(20000): batch = data.train.next_batch(50) if i % 100 == 0:", "import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data data =", "tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(20000): batch =", "= tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32) y, variables =", "= tf.placeholder(tf.float32, [None, 10]) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step", "10]) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction" ]
[ "= requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding = 'utf-8' f = codecs.open('data/osm_data.xml', encoding='utf-8'", "<print/>\"\"\" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2) #r2 =", "<has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\"", "'utf-8' f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+') f.write(r1.text) query2 =", "<has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\"", "s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154)", "</query> <query type=\"node\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\"", "<query type=\"node\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/>", "type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query", "f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+') f.write(r1.text) query2 = \"\"\"<union>", "<query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/>", "* 2) #r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f = codecs.open(\"data/osm_data_street.xml\", \"wb\",", "<has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query e=\"%s\"", "<has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\"", "w=\"%s\"/> </query> </union> <print/> \"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000)", "n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/> \"\"\" % ((-70.000000, 50.000000,", "50.000000, 25.000000, -125.000000) * 2) if __name__ == '__main__' :", "k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\"", "% ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2) #r2 = requests.post('http://overpass-api.de/api/interpreter/',", "e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/> \"\"\" % ((-70.000000,", "<query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/>", "'__main__' : r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3) f = codecs.open(\"data/osm_data_full_addr.xml\", \"wb\",", "<query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/>", "#r2.encoding = 'utf-8' #f.write(r2.text) query3 = \"\"\"<union> <query type=\"way\"> <has-kv", "<has-kv k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"way\">", "25.000000, -125.000000) * 6) r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding =", "<query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/>", "regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/> \"\"\"", "k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\"", "<has-kv k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"way\">", "k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv", "k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv", "<has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\"", "\"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\"", "e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/>", "w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/>", "#f.write(r2.text) query3 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query", "k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union>", "__name__ == '__main__' : r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3) f =", "<has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\"", "<has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\"", "requests import codecs query1 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/>", "type=\"node\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query", "#f = codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\") #r2.encoding = 'utf-8' #f.write(r2.text) query3", "e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-87.61309146881104, 41.890042371392965,", "<bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-87.61309146881104,", "w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) *", "k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\"", "k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query", "<bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:street\"/>", "w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\"", "w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) *", "6) r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding = 'utf-8' f =", "#r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f = codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\") #r2.encoding", "k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/>", "k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>", "-125.000000) * 2) if __name__ == '__main__' : r3 =", "<has-kv k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\">", "k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/>", "n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-70.000000, 50.000000, 25.000000,", "</query> </union> <print/>\"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 6)", "r1.encoding = 'utf-8' f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+') f.write(r1.text)", "codecs query1 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/>", "</query> </union> <print/>\"\"\" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2)", "= \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\"", "((-70.000000, 50.000000, 25.000000, -125.000000) * 2) if __name__ == '__main__'", "data=query2) #f = codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\") #r2.encoding = 'utf-8' #f.write(r2.text)", "k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"way\"> <has-kv", "<has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query>", "k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/>", "encoding='utf-8' , mode='w+') f.write(r1.text) query2 = \"\"\"<union> <query type=\"way\"> <has-kv", "k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union>", "((-70.000000, 50.000000, 25.000000, -125.000000) * 6) r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1)", ": r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3) f = codecs.open(\"data/osm_data_full_addr.xml\", \"wb\", \"utf-8\")", "25.000000, -125.000000) * 2) if __name__ == '__main__' : r3", "codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+') f.write(r1.text) query2 = \"\"\"<union> <query type=\"way\">", "n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/>", "query2 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv", "query3 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\"", "<has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query>", "n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/>", "% ((-70.000000, 50.000000, 25.000000, -125.000000) * 6) r1 = requests.post('http://overpass-api.de/api/interpreter/',", "<has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union>", "k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv", "<print/>\"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 6) r1 =", "k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\"", "% ((-70.000000, 50.000000, 25.000000, -125.000000) * 2) if __name__ ==", "type=\"node\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query>", "= codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+') f.write(r1.text) query2 = \"\"\"<union> <query", "k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query", "<query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/>", "<has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\">", "s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000)", "e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv", "<bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:housenumber\"/>", "<query type=\"way\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/>", "<bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/> \"\"\" %", "= \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/>", "requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding = 'utf-8' f = codecs.open('data/osm_data.xml', encoding='utf-8' ,", "\"utf-8\") #r2.encoding = 'utf-8' #f.write(r2.text) query3 = \"\"\"<union> <query type=\"way\">", "2) if __name__ == '__main__' : r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3)", "41.87234107841773, -87.64235973358154) * 2) #r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f =", "</query> <query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv", "k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" %", "requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f = codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\") #r2.encoding = 'utf-8'", "k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\">", "s=\"%s\" w=\"%s\"/> </query> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv", "</query> </union> <print/> \"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) *", "requests.post('http://overpass-api.de/api/interpreter/', data=query3) f = codecs.open(\"data/osm_data_full_addr.xml\", \"wb\", \"utf-8\") r3.encoding = 'utf-8'", "mode='w+') f.write(r1.text) query2 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:street\"/> <has-kv", "k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query", "type=\"way\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query>", "<print/> \"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2) if", "50.000000, 25.000000, -125.000000) * 6) r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding", "w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/>", "<bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-70.000000,", "</union> <print/>\"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 6) r1", "e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv", "type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query", ", mode='w+') f.write(r1.text) query2 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:street\"/>", "import requests import codecs query1 = \"\"\"<union> <query type=\"way\"> <has-kv", "\"wb\", \"utf-8\") #r2.encoding = 'utf-8' #f.write(r2.text) query3 = \"\"\"<union> <query", "data=query1) r1.encoding = 'utf-8' f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+')", "= \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/>", "2) #r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f = codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\")", "-125.000000) * 6) r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding = 'utf-8'", "* 6) r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding = 'utf-8' f", "type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query", "s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv", "if __name__ == '__main__' : r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3) f", "w=\"%s\"/> </query> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/>", "type=\"way\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query", "s=\"%s\" w=\"%s\"/> </query> </union> <print/> \"\"\" % ((-70.000000, 50.000000, 25.000000,", "regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv", "codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\") #r2.encoding = 'utf-8' #f.write(r2.text) query3 = \"\"\"<union>", "= requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f = codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\") #r2.encoding =", "== '__main__' : r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3) f = codecs.open(\"data/osm_data_full_addr.xml\",", "r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding = 'utf-8' f = codecs.open('data/osm_data.xml',", "* 2) if __name__ == '__main__' : r3 = requests.post('http://overpass-api.de/api/interpreter/',", "type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query", "<query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/>", "= codecs.open(\"data/osm_data_street.xml\", \"wb\", \"utf-8\") #r2.encoding = 'utf-8' #f.write(r2.text) query3 =", "k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"way\"> <has-kv", "r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3) f = codecs.open(\"data/osm_data_full_addr.xml\", \"wb\", \"utf-8\") r3.encoding", "e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:street\"/> <has-kv", "s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\"", "<has-kv k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\">", "n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773,", "<has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query>", "= 'utf-8' #f.write(r2.text) query3 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:full\"", "\"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2) if __name__", "n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/>", "s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv", "k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query", "k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv", "type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query", "<has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query", "= 'utf-8' f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+') f.write(r1.text) query2", "e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" % ((-70.000000, 50.000000,", "</query> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv", "type=\"node\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query", "\"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv", "</union> <print/>\"\"\" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2) #r2", "41.890042371392965, 41.87234107841773, -87.64235973358154) * 2) #r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f", "\"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv", "<has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\">", "n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query", "= requests.post('http://overpass-api.de/api/interpreter/', data=query3) f = codecs.open(\"data/osm_data_full_addr.xml\", \"wb\", \"utf-8\") r3.encoding =", "k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/>", "-87.64235973358154) * 2) #r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f = codecs.open(\"data/osm_data_street.xml\",", "<has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:state\"/> <bbox-query e=\"%s\"", "<bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/>", "k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> </union> <print/>\"\"\" %", "<bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query> <query type=\"node\"> <has-kv k=\"addr:full\"", "<query type=\"way\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/>", "'utf-8' #f.write(r2.text) query3 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:full\" regv=\"^[0-9]+.*[a-z]+.*[0-9]{5}.*\"/>", "<has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\"", "</query> <query type=\"node\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv", "<has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:postcode\"/> <bbox-query e=\"%s\"", "<has-kv k=\"addr:street:type\"/> <has-kv k=\"addr:city\"/> <bbox-query e=\"%s\" n=\"%s\" s=\"%s\" w=\"%s\"/> </query>", "import codecs query1 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv", "</union> <print/> \"\"\" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2)", "f.write(r1.text) query2 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/>", "data=query3) f = codecs.open(\"data/osm_data_full_addr.xml\", \"wb\", \"utf-8\") r3.encoding = 'utf-8' f.write(r3.text)", "query1 = \"\"\"<union> <query type=\"way\"> <has-kv k=\"addr:housenumber\"/> <has-kv k=\"addr:street:name\"/> <has-kv", "<query type=\"node\"> <has-kv k=\"addr:street\"/> <has-kv k=\"addr:street:name\"/> <has-kv k=\"addr:street:prefix\"/> <has-kv k=\"addr:street:type\"/>", "((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2) #r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2)" ]
[ "under The MIT License [see LICENSE for details] # Written", "parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to", "argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id", "dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result file', default='', type=str,", "result['file'] = model df_results = df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir, res_file)) if", "splitall(path): allparts = [] while 1: parts = os.path.split(path) if", "a Fast R-CNN network on an image database.\"\"\" import _init_paths", "# -------------------------------------------------------- \"\"\"Test a Fast R-CNN network on an image", "help='GPU id to use', default=0, type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory", "R-CNN # Copyright (c) 2015 Microsoft # Licensed under The", "input arguments \"\"\" parser = argparse.ArgumentParser(description='Test a Fast R-CNN network", "the model files', default=\"\", type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text file", "R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use', default=0,", "Licensed under The MIT License [see LICENSE for details] #", "# Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed", "dest='gpu_id', help='GPU id to use', default=0, type=int, required=True) parser.add_argument('--dir', dest='dir',", "= gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(caffemodel): print('Waiting for", "test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir, model_files, prototxt,", "parser.add_argument('--res', dest='res_file', help='result file', default='', type=str, required=True) args = parser.parse_args()", "results: result['file'] = model df_results = df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir, res_file))", "test_net from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list from datasets.factory import", "def parse_args(): \"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='Test", "df_results = pd.DataFrame() for model in models: results = run_test_net(gpu_id,", "1: parts = os.path.split(path) if parts[0] == path: # sentinel", "= parser.parse_args() return args def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file):", "'test.txt' args = parse_args() print('Called with args:') print(args) run_test_nets(args.gpu_id, args.dir,", "type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text file with names of models',", "import pprint import time, os, sys import pandas as pd", "required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result file',", "import pandas as pd def splitall(path): allparts = [] while", "dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt' args = parse_args()", "import caffe import argparse import pprint import time, os, sys", "pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use', default=0, type=int, required=True)", "args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file) # run_test_net(gpu_id,caffemodel, prototxt, imdb_name,", "type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='ped_test_small', type=str, required=True)", "# Licensed under The MIT License [see LICENSE for details]", "os.path.join(dir, model), prototxt, imdb_name, cfg_file) for result in results: result['file']", "parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='ped_test_small', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file',", "# args = parse_args() gpu_id = 0 # dir =", "= 0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt'", "parse_args(): \"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='Test a", "paths = splitall(n) proposal_prefix = paths[-1] return test_net(net, imdb, max_per_image=100,", "return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir, model_files,", "\"\"\"Test a Fast R-CNN network on an image database.\"\"\" import", "arguments \"\"\" parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline')", "\"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='Test a Fast", "database.\"\"\" import _init_paths from fast_rcnn.test import test_net from fast_rcnn.config import", "df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir, res_file)) if __name__ == '__main__': # args", "run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file) # run_test_net(gpu_id,caffemodel, prototxt,", "gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(caffemodel): print('Waiting for {}", "model), prototxt, imdb_name, cfg_file) for result in results: result['file'] =", "is not None: cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id print('Using config:') pprint.pprint(cfg)", "from datasets.factory import get_imdb import caffe import argparse import pprint", "= pd.DataFrame() for model in models: results = run_test_net(gpu_id, os.path.join(dir,", "model in models: results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name,", "Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='Test a Fast R-CNN", "help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='ped_test_small',", "for {} to exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(prototxt,", "of the model files', default=\"\", type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text", "print('Waiting for {} to exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net =", "def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file): if cfg_file is not", "n, _ = os.path.splitext(args.caffemodel) paths = splitall(n) proposal_prefix = paths[-1]", "{} to exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(prototxt, caffemodel,", "dest='prototxt', help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to test',", "time, os, sys import pandas as pd def splitall(path): allparts", "prototxt, imdb_name, cfg_file) for result in results: result['file'] = model", "net = caffe.Net(prototxt, caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb =", "cfg_file, res_file): models = [line.rstrip('\\n') for line in open(os.path.join(dir, model_files))]", "LICENSE for details] # Written by <NAME> # -------------------------------------------------------- \"\"\"Test", "caffemodel, prototxt, imdb_name, cfg_file): if cfg_file is not None: cfg_from_file(cfg_file)", "pd def splitall(path): allparts = [] while 1: parts =", "file with names of models', default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt',", "required=True) args = parser.parse_args() return args def run_test_net(gpu_id, caffemodel, prototxt,", "Microsoft # Licensed under The MIT License [see LICENSE for", "return args def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file): if cfg_file", "= os.path.splitext(args.caffemodel) paths = splitall(n) proposal_prefix = paths[-1] return test_net(net,", "args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file) # run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)", "return allparts def parse_args(): \"\"\" Parse input arguments \"\"\" parser", "\"\"\" parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline') parser.add_argument('--gpu',", "-------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft #", "in open(os.path.join(dir, model_files))] df_results = pd.DataFrame() for model in models:", "parser.add_argument('--models', dest='model_files', help='Text file with names of models', default=None, type=str,", "Fast R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',", "help='Text file with names of models', default=None, type=str, required=True) parser.add_argument('--prototxt',", "Written by <NAME> # -------------------------------------------------------- \"\"\"Test a Fast R-CNN network", "else: path = parts[0] allparts.insert(0, parts[1]) return allparts def parse_args():", "<NAME> # -------------------------------------------------------- \"\"\"Test a Fast R-CNN network on an", "python # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015", "for result in results: result['file'] = model df_results = df_results.append(results,", "parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id',", "parse_args() print('Called with args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name,", "= paths[-1] return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id,", "path: # sentinel for relative paths allparts.insert(0, parts[1]) break else:", "pd.DataFrame() for model in models: results = run_test_net(gpu_id, os.path.join(dir, model),", "cfg_file) for result in results: result['file'] = model df_results =", "Fast R-CNN network on an image database.\"\"\" import _init_paths from", "= get_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ = os.path.splitext(args.caffemodel)", "prototxt, imdb_name, cfg_file): if cfg_file is not None: cfg_from_file(cfg_file) cfg.GPU_ID", "gpu_id = 0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files =", "cfg, cfg_from_file, cfg_from_list from datasets.factory import get_imdb import caffe import", "get_imdb import caffe import argparse import pprint import time, os,", "# sentinel for relative paths allparts.insert(0, parts[1]) break else: path", "help='Directory of the model files', default=\"\", type=str, required=True) parser.add_argument('--models', dest='model_files',", "= os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n,", "network on an image database.\"\"\" import _init_paths from fast_rcnn.test import", "os.path.split(path) if parts[0] == path: # sentinel for absolute paths", "cfg_file is not None: cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id print('Using config:')", "cfg_file): if cfg_file is not None: cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id", "with names of models', default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt',", "[] while 1: parts = os.path.split(path) if parts[0] == path:", "a Fast R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id to", "res_file)) if __name__ == '__main__': # args = parse_args() gpu_id", "exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(prototxt, caffemodel, caffe.TEST) net.name", "in results: result['file'] = model df_results = df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir,", "parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result file', default='',", "== '__main__': # args = parse_args() gpu_id = 0 #", "def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file): models =", "cfg_from_list from datasets.factory import get_imdb import caffe import argparse import", "help='result file', default='', type=str, required=True) args = parser.parse_args() return args", "= [line.rstrip('\\n') for line in open(os.path.join(dir, model_files))] df_results = pd.DataFrame()", "for relative paths allparts.insert(0, parts[1]) break else: path = parts[0]", "= 'test.txt' args = parse_args() print('Called with args:') print(args) run_test_nets(args.gpu_id,", "parser.parse_args() return args def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file): if", "import time, os, sys import pandas as pd def splitall(path):", "Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under", "model_files = 'test.txt' args = parse_args() print('Called with args:') print(args)", "if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ = os.path.splitext(args.caffemodel) paths =", "run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file): models = [line.rstrip('\\n')", "import get_imdb import caffe import argparse import pprint import time,", "parts[0] allparts.insert(0, parts[1]) return allparts def parse_args(): \"\"\" Parse input", "if parts[0] == path: # sentinel for absolute paths allparts.insert(0,", "MIT License [see LICENSE for details] # Written by <NAME>", "_ = os.path.splitext(args.caffemodel) paths = splitall(n) proposal_prefix = paths[-1] return", "type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name',", "import _init_paths from fast_rcnn.test import test_net from fast_rcnn.config import cfg,", "path = parts[0] allparts.insert(0, parts[1]) return allparts def parse_args(): \"\"\"", "default='ped_test_small', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file',", "details] # Written by <NAME> # -------------------------------------------------------- \"\"\"Test a Fast", "Copyright (c) 2015 Microsoft # Licensed under The MIT License", "imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ = os.path.splitext(args.caffemodel) paths = splitall(n) proposal_prefix =", "while 1: parts = os.path.split(path) if parts[0] == path: #", "_init_paths from fast_rcnn.test import test_net from fast_rcnn.config import cfg, cfg_from_file,", "imdb_name, cfg_file): if cfg_file is not None: cfg_from_file(cfg_file) cfg.GPU_ID =", "= splitall(n) proposal_prefix = paths[-1] return test_net(net, imdb, max_per_image=100, vis=False,", "vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file):", "allparts.insert(0, parts[1]) return allparts def parse_args(): \"\"\" Parse input arguments", "if __name__ == '__main__': # args = parse_args() gpu_id =", "with args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file)", "parse_args() gpu_id = 0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files", "caffe.set_device(gpu_id) net = caffe.Net(prototxt, caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb", "imdb_name, cfg_file, res_file): models = [line.rstrip('\\n') for line in open(os.path.join(dir,", "= run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file) for result in", "parser.add_argument('--dir', dest='dir', help='Directory of the model files', default=\"\", type=str, required=True)", "# model_files = 'test.txt' args = parse_args() print('Called with args:')", "args = parse_args() print('Called with args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files,", "= parse_args() print('Called with args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt,", "== path: # sentinel for relative paths allparts.insert(0, parts[1]) break", "max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file,", "parts = os.path.split(path) if parts[0] == path: # sentinel for", "image database.\"\"\" import _init_paths from fast_rcnn.test import test_net from fast_rcnn.config", "imdb = get_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ =", "pandas as pd def splitall(path): allparts = [] while 1:", "type=str) parser.add_argument('--res', dest='res_file', help='result file', default='', type=str, required=True) args =", "for absolute paths allparts.insert(0, parts[0]) break elif parts[1] == path:", "file', default='', type=str, required=True) args = parser.parse_args() return args def", "License [see LICENSE for details] # Written by <NAME> #", "dest='imdb_name', help='dataset to test', default='ped_test_small', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg',", "dest='res_file', help='result file', default='', type=str, required=True) args = parser.parse_args() return", "results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file) for result", "allparts = [] while 1: parts = os.path.split(path) if parts[0]", "sentinel for absolute paths allparts.insert(0, parts[0]) break elif parts[1] ==", "type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result", "open(os.path.join(dir, model_files))] df_results = pd.DataFrame() for model in models: results", "argparse import pprint import time, os, sys import pandas as", "id to use', default=0, type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory of", "model_files, prototxt, imdb_name, cfg_file, res_file): models = [line.rstrip('\\n') for line", "model files', default=\"\", type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text file with", "print('Called with args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file,", "= df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir, res_file)) if __name__ == '__main__': #", "default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='ped_test_small', type=str,", "dest='model_files', help='Text file with names of models', default=None, type=str, required=True)", "dir, model_files, prototxt, imdb_name, cfg_file, res_file): models = [line.rstrip('\\n') for", "None: cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id print('Using config:') pprint.pprint(cfg) while not", "required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='ped_test_small', type=str, required=True) parser.add_argument('--cfg',", "def splitall(path): allparts = [] while 1: parts = os.path.split(path)", "default='', type=str, required=True) args = parser.parse_args() return args def run_test_net(gpu_id,", "splitall(n) proposal_prefix = paths[-1] return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix)", "= model df_results = df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir, res_file)) if __name__", "models', default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str, required=True)", "= argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU", "import test_net from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list from datasets.factory", "files', default=\"\", type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text file with names", "= parts[0] allparts.insert(0, parts[1]) return allparts def parse_args(): \"\"\" Parse", "model df_results = df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir, res_file)) if __name__ ==", "# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft", "not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ = os.path.splitext(args.caffemodel) paths = splitall(n)", "os.path.exists(caffemodel): print('Waiting for {} to exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net", "as pd def splitall(path): allparts = [] while 1: parts", "dest='dir', help='Directory of the model files', default=\"\", type=str, required=True) parser.add_argument('--models',", "== path: # sentinel for absolute paths allparts.insert(0, parts[0]) break", "elif parts[1] == path: # sentinel for relative paths allparts.insert(0,", "= parse_args() gpu_id = 0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' #", "R-CNN network on an image database.\"\"\" import _init_paths from fast_rcnn.test", "break else: path = parts[0] allparts.insert(0, parts[1]) return allparts def", "time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(prototxt, caffemodel, caffe.TEST) net.name =", "test', default='ped_test_small', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res',", "not None: cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id print('Using config:') pprint.pprint(cfg) while", "get_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ = os.path.splitext(args.caffemodel) paths", "paths allparts.insert(0, parts[0]) break elif parts[1] == path: # sentinel", "if cfg_file is not None: cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id print('Using", "proposal_prefix = paths[-1] return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def", "help='dataset to test', default='ped_test_small', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml',", "to use', default=0, type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory of the", "'__main__': # args = parse_args() gpu_id = 0 # dir", "caffe import argparse import pprint import time, os, sys import", "for details] # Written by <NAME> # -------------------------------------------------------- \"\"\"Test a", "help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result file', default='', type=str, required=True)", "caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(imdb_name) if not", "from fast_rcnn.test import test_net from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list", "type=str, required=True) args = parser.parse_args() return args def run_test_net(gpu_id, caffemodel,", "parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use', default=0, type=int, required=True) parser.add_argument('--dir',", "parts[1]) return allparts def parse_args(): \"\"\" Parse input arguments \"\"\"", "import cfg, cfg_from_file, cfg_from_list from datasets.factory import get_imdb import caffe", "use', default=0, type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory of the model", "to test', default='ped_test_small', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str)", "allparts.insert(0, parts[1]) break else: path = parts[0] allparts.insert(0, parts[1]) return", "paths[-1] return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir,", "caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(prototxt, caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0]", "pprint.pprint(cfg) while not os.path.exists(caffemodel): print('Waiting for {} to exist...'.format(caffemodel)) time.sleep(10)", "an image database.\"\"\" import _init_paths from fast_rcnn.test import test_net from", "network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use', default=0, type=int,", "prototxt, imdb_name, cfg_file, res_file): models = [line.rstrip('\\n') for line in", "pprint import time, os, sys import pandas as pd def", "args = parser.parse_args() return args def run_test_net(gpu_id, caffemodel, prototxt, imdb_name,", "default=\"\", type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text file with names of", "relative paths allparts.insert(0, parts[1]) break else: path = parts[0] allparts.insert(0,", "config:') pprint.pprint(cfg) while not os.path.exists(caffemodel): print('Waiting for {} to exist...'.format(caffemodel))", "imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name,", "2015 Microsoft # Licensed under The MIT License [see LICENSE", "print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file) # run_test_net(gpu_id,caffemodel,", "for line in open(os.path.join(dir, model_files))] df_results = pd.DataFrame() for model", "required=True) parser.add_argument('--models', dest='model_files', help='Text file with names of models', default=None,", "parts[0]) break elif parts[1] == path: # sentinel for relative", "names of models', default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None,", "= caffe.Net(prototxt, caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(imdb_name)", "res_file): models = [line.rstrip('\\n') for line in open(os.path.join(dir, model_files))] df_results", "caffe.Net(prototxt, caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(imdb_name) if", "ignore_index=True) df_results.to_csv(os.path.join(dir, res_file)) if __name__ == '__main__': # args =", "allparts def parse_args(): \"\"\" Parse input arguments \"\"\" parser =", "by <NAME> # -------------------------------------------------------- \"\"\"Test a Fast R-CNN network on", "paths allparts.insert(0, parts[1]) break else: path = parts[0] allparts.insert(0, parts[1])", "on an image database.\"\"\" import _init_paths from fast_rcnn.test import test_net", "print('Using config:') pprint.pprint(cfg) while not os.path.exists(caffemodel): print('Waiting for {} to", "args def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file): if cfg_file is", "[see LICENSE for details] # Written by <NAME> # --------------------------------------------------------", "args = parse_args() gpu_id = 0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup'", "The MIT License [see LICENSE for details] # Written by", "default=0, type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory of the model files',", "os, sys import pandas as pd def splitall(path): allparts =", "parts[1]) break else: path = parts[0] allparts.insert(0, parts[1]) return allparts", "imdb_name, cfg_file) for result in results: result['file'] = model df_results", "sys import pandas as pd def splitall(path): allparts = []", "import argparse import pprint import time, os, sys import pandas", "# sentinel for absolute paths allparts.insert(0, parts[0]) break elif parts[1]", "cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _ = os.path.splitext(args.caffemodel) paths = splitall(n) proposal_prefix", "of models', default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str,", "sentinel for relative paths allparts.insert(0, parts[1]) break else: path =", "to exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(prototxt, caffemodel, caffe.TEST)", "required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset", "os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) n, _", "'/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt' args = parse_args() print('Called with", "model_files))] df_results = pd.DataFrame() for model in models: results =", "[line.rstrip('\\n') for line in open(os.path.join(dir, model_files))] df_results = pd.DataFrame() for", "parts[1] == path: # sentinel for relative paths allparts.insert(0, parts[1])", "= '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt' args = parse_args() print('Called", "caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(imdb_name) if not cfg.TEST.HAS_RPN:", "= os.path.split(path) if parts[0] == path: # sentinel for absolute", "fast_rcnn.test import test_net from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list from", "path: # sentinel for absolute paths allparts.insert(0, parts[0]) break elif", "net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)", "in models: results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file)", "os.path.splitext(args.caffemodel) paths = splitall(n) proposal_prefix = paths[-1] return test_net(net, imdb,", "proposal_prefix=proposal_prefix) def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file): models", "absolute paths allparts.insert(0, parts[0]) break elif parts[1] == path: #", "fast_rcnn.config import cfg, cfg_from_file, cfg_from_list from datasets.factory import get_imdb import", "while not os.path.exists(caffemodel): print('Waiting for {} to exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu()", "cfg.GPU_ID = gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(caffemodel): print('Waiting", "df_results = df_results.append(results, ignore_index=True) df_results.to_csv(os.path.join(dir, res_file)) if __name__ == '__main__':", "from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list from datasets.factory import get_imdb", "# Copyright (c) 2015 Microsoft # Licensed under The MIT", "__name__ == '__main__': # args = parse_args() gpu_id = 0", "args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file) #", "break elif parts[1] == path: # sentinel for relative paths", "models: results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file) for", "models = [line.rstrip('\\n') for line in open(os.path.join(dir, model_files))] df_results =", "0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt' args", "run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file) for result in results:", "# dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt' args =", "result in results: result['file'] = model df_results = df_results.append(results, ignore_index=True)", "#!/usr/bin/env python # -------------------------------------------------------- # Fast R-CNN # Copyright (c)", "# Written by <NAME> # -------------------------------------------------------- \"\"\"Test a Fast R-CNN", "run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file): if cfg_file is not None:", "cfg_from_file(cfg_file) cfg.GPU_ID = gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(caffemodel):", "default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result file', default='', type=str, required=True) args", "(c) 2015 Microsoft # Licensed under The MIT License [see", "for model in models: results = run_test_net(gpu_id, os.path.join(dir, model), prototxt,", "df_results.to_csv(os.path.join(dir, res_file)) if __name__ == '__main__': # args = parse_args()", "not os.path.exists(caffemodel): print('Waiting for {} to exist...'.format(caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(gpu_id)", "allparts.insert(0, parts[0]) break elif parts[1] == path: # sentinel for", "cfg_from_file, cfg_from_list from datasets.factory import get_imdb import caffe import argparse", "parts[0] == path: # sentinel for absolute paths allparts.insert(0, parts[0])", "default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb',", "= [] while 1: parts = os.path.split(path) if parts[0] ==", "line in open(os.path.join(dir, model_files))] df_results = pd.DataFrame() for model in", "datasets.factory import get_imdb import caffe import argparse import pprint import", "required=True) parser.add_argument('--dir', dest='dir', help='Directory of the model files', default=\"\", type=str,", "-------------------------------------------------------- \"\"\"Test a Fast R-CNN network on an image database.\"\"\"", "type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory of the model files', default=\"\"," ]
[ "import url from . import views urlpatterns = [ url('^$',", "django.conf.urls.static import static from django.conf.urls import url from . import", "'gallary'), url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images') ] if settings.DEBUG: urlpatterns+=", "<gh_stars>0 from django.conf import settings from django.conf.urls.static import static from", "= [ url('^$', views.gallary,name = 'gallary'), url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name", "from django.conf import settings from django.conf.urls.static import static from django.conf.urls", "urlpatterns = [ url('^$', views.gallary,name = 'gallary'), url(r'^search/', views.search_image, name='search_image'),", "from django.conf.urls.static import static from django.conf.urls import url from .", "url from . import views urlpatterns = [ url('^$', views.gallary,name", "url(r'^details/(\\d+)',views.search_location,name ='images') ] if settings.DEBUG: urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)", "from django.conf.urls import url from . import views urlpatterns =", "[ url('^$', views.gallary,name = 'gallary'), url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images')", "views.gallary,name = 'gallary'), url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images') ] if", "static from django.conf.urls import url from . import views urlpatterns", "django.conf import settings from django.conf.urls.static import static from django.conf.urls import", "settings from django.conf.urls.static import static from django.conf.urls import url from", "from . import views urlpatterns = [ url('^$', views.gallary,name =", "url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images') ] if settings.DEBUG: urlpatterns+= static(settings.MEDIA_URL,", "django.conf.urls import url from . import views urlpatterns = [", "url('^$', views.gallary,name = 'gallary'), url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images') ]", "name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images') ] if settings.DEBUG: urlpatterns+= static(settings.MEDIA_URL, document_root =", "= 'gallary'), url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images') ] if settings.DEBUG:", "import static from django.conf.urls import url from . import views", "views.search_image, name='search_image'), url(r'^details/(\\d+)',views.search_location,name ='images') ] if settings.DEBUG: urlpatterns+= static(settings.MEDIA_URL, document_root", "import views urlpatterns = [ url('^$', views.gallary,name = 'gallary'), url(r'^search/',", "import settings from django.conf.urls.static import static from django.conf.urls import url", ". import views urlpatterns = [ url('^$', views.gallary,name = 'gallary'),", "views urlpatterns = [ url('^$', views.gallary,name = 'gallary'), url(r'^search/', views.search_image," ]