docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Delete an invoice You can delete an invoice which is in the draft state. Args: invoice_id : Id for delete the invoice Returns: The response is always be an empty array like this - []
def delete(self, invoice_id, **kwargs): url = "{}/{}".format(self.base_url, invoice_id) return self.delete_url(url, {}, **kwargs)
340,189
Issues an invoice in draft state Args: invoice_id : Id for delete the invoice Returns: Its response is the invoice entity, similar to create/update API response. Its status now would be issued.
def issue(self, invoice_id, **kwargs): url = "{}/{}/issue".format(self.base_url, invoice_id) return self.post_url(url, {}, **kwargs)
340,190
Update an invoice In draft state all the attributes are allowed. Args: invoice_id : Id for delete the invoice data : Dictionary having keys using which invoice have to be updated Returns: Its response is the invoice entity, similar to create/update API response. Its status now would be issued. Refer https://razorpay.com/docs/invoices/api/#entity-structure
def edit(self, invoice_id, data={}, **kwargs): url = "{}/{}".format(self.base_url, invoice_id) return self.patch_url(url, data, **kwargs)
340,191
Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty.
def _process_worker(call_queue, result_queue): while True: call_item = call_queue.get(block=True) if call_item is None: # Wake up queue management thread result_queue.put(None) return try: r = call_item.fn(*call_item.args, **call_item.kwargs) except BaseException: e = sys.exc_info()[1] result_queue.put(_ResultItem(call_item.work_id, exception=e)) else: result_queue.put(_ResultItem(call_item.work_id, result=r))
340,299
Constructor. Args: compressed_files zipfile.ZipFile or tarfile.TarFile
def __init__(self, *compressed_files, **kwargs): self._files = [] self._prefixes = defaultdict(lambda: set([''])) self._extract = kwargs.get('extract', False) self._supersede = kwargs.get('supersede', False) self._match_version = kwargs.get('_match_version', True) self._local_warned = False for f in compressed_files: if isinstance(f, zipfile.ZipFile): bin_package = any(n.endswith('.so') or n.endswith('.pxd') or n.endswith('.dylib') for n in f.namelist()) need_extract = True elif isinstance(f, tarfile.TarFile): bin_package = any(m.name.endswith('.so') or m.name.endswith('.pxd') or m.name.endswith('.dylib') for m in f.getmembers()) need_extract = True elif isinstance(f, dict): bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib') for name in iterkeys(f)) need_extract = False elif isinstance(f, list): bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib') for name in f) need_extract = False else: raise TypeError('Compressed file can only be zipfile.ZipFile or tarfile.TarFile') if bin_package: if not ALLOW_BINARY: raise SystemError('Cannot load binary package. It is quite possible that you are using an old ' 'MaxCompute service which does not support binary packages. If this is ' 'not true, please set `odps.isolation.session.enable` to True or ask your ' 'project owner to change project-level configuration.') if need_extract: f = self._extract_archive(f) prefixes = set(['']) dir_prefixes = set() if isinstance(f, zipfile.ZipFile): for name in f.namelist(): name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/') if name in prefixes: continue try: f.getinfo(name + '__init__.py') except KeyError: prefixes.add(name) elif isinstance(f, tarfile.TarFile): for member in f.getmembers(): name = member.name if member.isdir() else member.name.rsplit('/', 1)[0] if name in prefixes: continue try: f.getmember(name + '/__init__.py') except KeyError: prefixes.add(name + '/') elif isinstance(f, (list, dict)): # Force ArchiveResource to run under binary mode to resolve manually # opening __file__ paths in pure-python code. if ALLOW_BINARY: bin_package = True rendered_names = set() for name in f: name = name.replace(os.sep, '/') rendered_names.add(name) for name in rendered_names: name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/') if name in prefixes or '/tests/' in name: continue if name + '__init__.py' not in rendered_names: prefixes.add(name) dir_prefixes.add(name) else: if '/' in name.rstrip('/'): ppath = name.rstrip('/').rsplit('/', 1)[0] else: ppath = '' prefixes.add(ppath) dir_prefixes.add(ppath) if bin_package: path_patch = [] for p in sorted(dir_prefixes): if p in sys.path: continue parent_exist = False for pp in path_patch: if p[:len(pp)] == pp: parent_exist = True break if parent_exist: continue path_patch.append(p) if self._supersede: sys.path = path_patch + sys.path else: sys.path = sys.path + path_patch else: self._files.append(f) if prefixes: self._prefixes[id(f)] = sorted(prefixes)
341,128
PEP-302-compliant load_module() method. Args: fullmodname: The dot-separated full module name, e.g. 'django.core.mail'. Returns: The module object constructed from the source code. Raises: SyntaxError if the module's source code is syntactically incorrect. ImportError if there was a problem accessing the source code. Whatever else can be raised by executing the module's source code.
def load_module(self, fullmodname): submodname, is_package, fullpath, source = self._get_source(fullmodname) code = compile(source, fullpath, 'exec') mod = sys.modules.get(fullmodname) try: if mod is None: mod = sys.modules[fullmodname] = types.ModuleType(fullmodname) mod.__loader__ = self mod.__file__ = fullpath mod.__name__ = fullmodname if is_package: mod.__path__ = [os.path.dirname(mod.__file__)] exec(code, mod.__dict__) except: if fullmodname in sys.modules: del sys.modules[fullmodname] raise return mod
341,132
Returns an unsigned 32-bit integer that encodes the field number and wire type information in standard protocol message wire format. Args: field_number: Expected to be an integer in the range [1, 1 << 29) wire_type: One of the WIRETYPE_* constants.
def pack_tag(field_number, wire_type): if not 0 <= wire_type <= _WIRETYPE_MAX: raise errors.EncodeError('Unknown wire type: %d' % wire_type) return (field_number << TAG_TYPE_BITS) | wire_type
341,189
Here you see and example of readying out a part of a DB Args: db (int): The db to use start (int): The index of where to start in db data size (int): The size of the db data to read
def get_db_row(db, start, size): type_ = snap7.snap7types.wordlen_to_ctypes[snap7.snap7types.S7WLByte] data = client.db_read(db, start, type_, size) # print_row(data[:60]) return data
343,332
Here we replace a piece of data in a db block with new data Args: db (int): The db to use start(int): The start within the db size(int): The size of the data in bytes _butearray (enumerable): The data to put in the db
def set_db_row(db, start, size, _bytearray): client.db_write(db, start, size, _bytearray)
343,333
Put scancodes that represent keys defined in the sequences provided. Arguments: press_keys: Press a sequence of keys hold_keys: While pressing the sequence of keys, hold down the keys defined in hold_keys. press_delay: Number of milliseconds to delay between each press Note: Both press_keys and hold_keys are iterable objects that yield self.SCANCODE.keys() keys.
def put_keys(self, press_keys=None, hold_keys=None, press_delay=50): if press_keys is None: press_keys = [] if hold_keys is None: hold_keys = [] release_codes = set() put_codes = set() try: # hold the keys for k in hold_keys: put, release = self.SCANCODES[k] # Avoid putting codes over and over put = set(put) - put_codes self.put_scancodes(list(put)) put_codes.update(put) release_codes.update(release) # press the keys for k in press_keys: put, release = self.SCANCODES[k] # Avoid putting held codes _put = set(put) - put_codes if not _put: continue release = set(release) - release_codes # Avoid releasing held codes if not release: continue self.put_scancodes(list(put) + list(release)) time.sleep(press_delay / 1000.0) finally: # release the held keys for code in release_codes: self.put_scancode(code)
344,626
register a callback function against an event_source for a given event_type. Arguments: callback - function to call when the event occurs event_source - the source to monitor events in event_type - the type of event we're monitoring for returns the registration id (callback_id)
def register_callback(callback, event_source, event_type): global _callbacks event_interface = type_to_interface(event_type) listener = event_source.create_listener() event_source.register_listener(listener, [event_type], False) quit = threading.Event() t = threading.Thread(target=_event_monitor, args=(callback, event_source, listener, event_interface, quit)) t.daemon = True t.start() while t.is_alive() is False: continue _callbacks[t.ident] = (t, quit) return t.ident
344,670
Lock this machine Arguments: lock_type - see IMachine.lock_machine for details session - optionally define a session object to lock this machine against. If not defined, a new ISession object is created to lock against return an ISession object
def create_session(self, lock_type=library.LockType.shared, session=None): if session is None: session = library.ISession() # NOTE: The following hack handles the issue of unknown machine state. # This occurs most frequently when a machine is powered off and # in spite waiting for the completion event to end, the state of # machine still raises the following Error: # virtualbox.library.VBoxErrorVmError: 0x80bb0003 (Failed to \ # get a console object from the direct session (Unknown \ # Status 0x80BB0002)) error = None for _ in range(10): try: self.lock_machine(session, lock_type) except Exception as exc: error = exc time.sleep(1) continue else: break else: if error is not None: raise Exception("Failed to create clone - %s" % error) return session
344,674
Log a histogram for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). value (tuple or list): either list of numbers to be summarized as a histogram, or a tuple of bin_edges and bincounts that directly define a histogram. step (int): non-negative integer used for visualization
def log_histogram(self, name, value, step=None): if isinstance(value, six.string_types): raise TypeError('"value" should be a number, got {}' .format(type(value))) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._histogram_summary(tf_name, value, step=step) self._log_summary(tf_name, summary, value, step=step)
346,584
Log new images for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). images (list): list of images to visualize step (int): non-negative integer used for visualization
def log_images(self, name, images, step=None): if isinstance(images, six.string_types): raise TypeError('"images" should be a list of ndarrays, got {}' .format(type(images))) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._image_summary(tf_name, images, step=step) self._log_summary(tf_name, summary, images, step=step)
346,585
Execute an assembler instruction. Args: asm_instr (X86Instruction): A instruction to execute. Returns: A int. The address of the next instruction to execute.
def execute(self, asm_instr): # Update the instruction pointer. self.ir_emulator.registers[self.ip] = asm_instr.address + asm_instr.size # Process syscall. if self.arch_info.instr_is_syscall(asm_instr): raise Syscall() # Process instruction and return next address instruction to execute. return self.__execute(asm_instr)
347,473
Open a file for analysis. Args: filename (str): Name of an executable file.
def open(self, filename): if filename: self.binary = BinaryFile(filename) self.text_section = self.binary.text_section self._load(arch_mode=self.binary.architecture_mode)
347,718
Translate to REIL instructions. Args: name (str): Architecture's name. arch_info (ArchitectureInformation): Architecture information object. disassembler (Disassembler): Disassembler for the architecture. translator (Translator): Translator for the architecture.
def load_architecture(self, name, arch_info, disassembler, translator): # Set up architecture information. self.name = name self.arch_info = arch_info self.disassembler = disassembler self.ir_translator = translator # Setup analysis modules. self._setup_analysis_modules()
347,719
Translate to REIL instructions. Args: start (int): Start address. end (int): End address. arch_mode (int): Architecture mode. Returns: (int, Instruction, list): A tuple of the form (address, assembler instruction, REIL instructions).
def translate(self, start=None, end=None, arch_mode=None): start_addr = start if start else self.binary.ea_start end_addr = end if end else self.binary.ea_end self.ir_translator.reset() for addr, asm, _ in self.disassemble(start=start_addr, end=end_addr, arch_mode=arch_mode): yield addr, asm, self.ir_translator.translate(asm)
347,720
Disassemble native instructions. Args: start (int): Start address. end (int): End address. arch_mode (int): Architecture mode. Returns: (int, Instruction, int): A tuple of the form (address, assembler instruction, instruction size).
def disassemble(self, start=None, end=None, arch_mode=None): if arch_mode is None: arch_mode = self.binary.architecture_mode curr_addr = start if start else self.binary.ea_start end_addr = end if end else self.binary.ea_end while curr_addr < end_addr: # Fetch the instruction. encoding = self.__fetch_instr(curr_addr) # Decode it. asm_instr = self.disassembler.disassemble(encoding, curr_addr, architecture_mode=arch_mode) if not asm_instr: return yield curr_addr, asm_instr, asm_instr.size # update instruction pointer curr_addr += asm_instr.size
347,721
Recover CFG. Args: start (int): Start address. end (int): End address. symbols (dict): Symbol table. callback (function): A callback function which is called after each successfully recovered CFG. arch_mode (int): Architecture mode. Returns: ControlFlowGraph: A CFG.
def recover_cfg(self, start=None, end=None, symbols=None, callback=None, arch_mode=None): # Set architecture in case it wasn't already set. if arch_mode is None: arch_mode = self.binary.architecture_mode # Reload modules. self._load(arch_mode=arch_mode) # Check start address. start = start if start else self.binary.entry_point cfg, _ = self._recover_cfg(start=start, end=end, symbols=symbols, callback=callback) return cfg
347,722
Recover CFG for all functions from an entry point and/or symbol table. Args: entries (list): A list of function addresses' to start the CFG recovery process. symbols (dict): Symbol table. callback (function): A callback function which is called after each successfully recovered CFG. arch_mode (int): Architecture mode. Returns: list: A list of recovered CFGs.
def recover_cfg_all(self, entries, symbols=None, callback=None, arch_mode=None): # Set architecture in case it wasn't already set. if arch_mode is None: arch_mode = self.binary.architecture_mode # Reload modules. self._load(arch_mode=arch_mode) # Set symbols. symbols = {} if not symbols else symbols # Recover the CFGs. cfgs = [] addrs_processed = set() calls = entries while len(calls) > 0: start, calls = calls[0], calls[1:] cfg, calls_tmp = self._recover_cfg(start=start, symbols=symbols, callback=callback) addrs_processed.add(start) cfgs.append(cfg) for addr in sorted(calls_tmp): if addr not in addrs_processed and addr not in calls: calls.append(addr) return cfgs
347,723
Emulate native code. Args: context (dict): Processor context (register and/or memory). start (int): Start address. end (int): End address. arch_mode (int): Architecture mode. hooks (dict): Hooks by address. max_instrs (int): Maximum number of instructions to execute. print_asm (bool): Print asm. Returns: dict: Processor context.
def emulate(self, context=None, start=None, end=None, arch_mode=None, hooks=None, max_instrs=None, print_asm=False): if arch_mode is not None: # Reload modules. self._load(arch_mode=arch_mode) context = context if context else {} start_addr = start if start else self.binary.ea_start end_addr = end if end else self.binary.ea_end hooks = hooks if hooks else {} # Load registers for reg, val in context.get('registers', {}).items(): self.ir_emulator.registers[reg] = val # Load memory # TODO Memory content should be encoded as hex strings so each # entry can be of different sizes. for addr, val in context.get('memory', {}).items(): self.ir_emulator.memory.write(addr, 4, val) # Execute the code. self.emulator.emulate(start_addr, end_addr, hooks, max_instrs, print_asm) context_out = { 'registers': {}, 'memory': {} } # save registers for reg, val in self.ir_emulator.registers.items(): context_out['registers'][reg] = val return context_out
347,725
Process a REIL instruction. Args: instr (ReilInstruction): Instruction to process. avoid (list): List of addresses to avoid while executing the code. next_addr (int): Address of the following instruction. initial_state (State): Initial execution state. execution_state (Queue): Queue of execution states. trace_current (list): Current trace. Returns: int: Returns the next address to execute.
def __process_instr(self, instr, avoid, next_addr, initial_state, execution_state, trace_current): # Process branch (JCC oprnd0, empty, oprnd2). if instr.mnemonic == ReilMnemonic.JCC: not_taken_addr = next_addr address, index = split_address(instr.address) logger.debug("[+] Processing branch: {:#08x}:{:02x} : {}".format(address, index, instr)) # Process conditional branch (oprnd0 is a REGISTER). if isinstance(instr.operands[0], ReilRegisterOperand): next_ip = self.__process_branch_cond(instr, avoid, initial_state, execution_state, trace_current, not_taken_addr) # Process unconditional branch (oprnd0 is an INTEGER). else: next_ip = self.__process_branch_uncond(instr, trace_current, not_taken_addr) # Process the rest of the instructions. else: trace_current += [(instr, None)] self.__cpu.execute(instr) next_ip = next_addr return next_ip
347,819
Process a REIL sequence. Args: sequence (ReilSequence): A REIL sequence to process. avoid (list): List of address to avoid. initial_state: Initial state. execution_state: Execution state queue. trace_current (list): Current trace. next_addr: Address of the next instruction following the current one. Returns: Returns the next instruction to execute in case there is one, otherwise returns None.
def __fa_process_sequence(self, sequence, avoid, initial_state, execution_state, trace_current, next_addr): # TODO: Process execution intra states. ip = sequence.address next_ip = None while ip: # Fetch next instruction in the sequence. try: instr = sequence.fetch(ip) except ReilSequenceInvalidAddressError: # At this point, ip should be a native instruction address, therefore # the index should be zero. assert split_address(ip)[1] == 0x0 next_ip = ip break try: target_addr = sequence.get_next_address(ip) except ReilSequenceInvalidAddressError: # We reached the end of the sequence. Execution continues on the next native instruction # (it's a REIL address). target_addr = next_addr next_ip = self.__process_instr(instr, avoid, target_addr, initial_state, execution_state, trace_current) # Update instruction pointer. try: ip = next_ip if next_ip else sequence.get_next_address(ip) except ReilSequenceInvalidAddressError: break return next_ip
347,821
Process a REIL container. Args: avoid (list): List of addresses to avoid while executing the code. container (ReilContainer): REIL container to execute. end (int): End address. execution_state (Queue): Queue of execution states. find (int): Address to find. initial_state (State): Initial state. start (int): Start address. trace_current: trace_final:
def __fa_process_container(self, container, find, start, end, avoid, initial_state, execution_state, trace_current, trace_final): ip = start while ip: # NOTE *ip* and *next_addr* variables can be, independently, either intra # or inter addresses. # Fetch next instruction. try: instr = container.fetch(ip) except ReilContainerInvalidAddressError: logger.debug("Exception @ {:#08x}".format(ip)) raise ReilContainerInvalidAddressError # Compute the address of the following instruction to the fetched one. try: next_addr = container.get_next_address(ip) except Exception: logger.debug("Exception @ {:#08x}".format(ip)) # TODO Should this be considered an error? raise ReilContainerInvalidAddressError # Process the instruction. next_ip = self.__process_instr(instr, avoid, next_addr, initial_state, execution_state, trace_current) # # ====================================================================================================== # # # NOTE This is an attempt to separate intra and inter instruction # # addresses processing. Here, *ip* and *next_addr* are always inter # # instruction addresses. # # assert split_address(ip)[1] == 0x0 # # # Compute the address of the following instruction to the fetched one. # try: # seq = container.fetch_sequence(ip) # except ReilContainerInvalidAddressError: # logger.debug("Exception @ {:#08x}".format(ip)) # # raise ReilContainerInvalidAddressError # # # Fetch next instruction address. # try: # next_addr = container.get_next_address(ip + len(seq)) # except Exception: # logger.debug("Exception @ {:#08x}".format(ip)) # # # TODO Should this be considered an error? # # raise ReilContainerInvalidAddressError # # next_ip = self.__process_sequence(seq, avoid, initial_state, execution_state, trace_current, next_addr) # # if next_ip: # assert split_address(next_ip)[1] == 0x0 # # # ====================================================================================================== # # Check termination conditions. if find and next_ip and next_ip == find: logger.debug("[+] Find address found!") trace_final.append(list(trace_current)) next_ip = None if end and next_ip and next_ip == end: logger.debug("[+] End address found!") next_ip = None # Update instruction pointer. ip = next_ip if next_ip else None while not ip: if not execution_state.empty(): # Pop next execution state. ip, trace_current, registers, memory = execution_state.get() if split_address(ip)[1] == 0x0: logger.debug("[+] Popping execution state @ {:#x} (INTER)".format(ip)) else: logger.debug("[+] Popping execution state @ {:#x} (INTRA)".format(ip)) # Setup cpu and memory. self.__cpu.registers = registers self.__cpu.memory = memory logger.debug("[+] Next address: {:#08x}:{:02x}".format(ip >> 8, ip & 0xff)) else: logger.debug("[+] No more paths to explore! Exiting...") break # Check termination conditions (AGAIN...). if find and ip == find: logger.debug("[+] Find address found!") trace_final.append(list(trace_current)) ip = None if end and ip == end: logger.debug("[+] End address found!") ip = None
347,822
Prints profiling information. Parameters: ---------------------------- @param reset (bool) If set to True, the profiling will be reset.
def getProfileInfo(exp): totalTime = 0.000001 for region in exp.network.regions.values(): timer = region.getComputeTimer() totalTime += timer.getElapsed() # Sort the region names regionNames = list(exp.network.regions.keys()) regionNames.sort() count = 1 profileInfo = [] L2Time = 0.0 L4Time = 0.0 for regionName in regionNames: region = exp.network.regions[regionName] timer = region.getComputeTimer() count = max(timer.getStartCount(), count) profileInfo.append([region.name, timer.getStartCount(), timer.getElapsed(), 100.0 * timer.getElapsed() / totalTime, timer.getElapsed() / max(timer.getStartCount(), 1)]) if "L2Column" in regionName: L2Time += timer.getElapsed() elif "L4Column" in regionName: L4Time += timer.getElapsed() return L2Time
348,591
The two key parameters are height and radius. Does not support arbitrary dimensions. Parameters: ---------------------------- @param height (int) Cylinder height. @param radius (int) Cylinder radius. @param dimension (int) Space dimension. Typically 3. @param epsilon (float) Object resolution. Defaults to self.DEFAULT_EPSILON
def __init__(self, height, radius, epsilon=None): self.radius = radius self.height = height self.dimension = 3 # no choice for cylinder dimension if epsilon is None: self.epsilon = self.DEFAULT_EPSILON else: self.epsilon = epsilon
348,639
The only key parameter is the list (or tuple) of dimensions, which can be of any size as long as its length is equal to the "dimension" parameter. Parameters: ---------------------------- @param dimensions (list(int)) List of the box's dimensions. @param dimension (int) Space dimension. Typically 3. @param epsilon (float) Object resolution. Defaults to self.DEFAULT_EPSILON
def __init__(self, dimensions, dimension=3, epsilon=None): self.dimensions = dimensions self.dimension = dimension if epsilon is None: self.epsilon = self.DEFAULT_EPSILON else: self.epsilon = epsilon
348,649
We simply pass the width as every dimension. Parameters: ---------------------------- @param width (int) Cube width. @param dimension (int) Space dimension. Typically 3. @param epsilon (float) Object resolution. Defaults to self.DEFAULT_EPSILON
def __init__(self, width, dimension=3, epsilon=None): self.width = width dimensions = [width] * dimension super(Cube, self).__init__(dimensions=dimensions, dimension=dimension, epsilon=epsilon)
348,658
The only key parameter to provide is location of file. Supports arbitrary dimensions. Parameters: ---------------------------- @param file (string) string representing location of file (.ply to be specific) . @param epsilon (float) Object resolution. Defaults to self.DEFAULT_EPSILON @param normalTolerance (float) Adjacent Faces Normal Tolerance. Defaults to zero - edges appear more.
def __init__(self, file=None, normalTolerance = 0., epsilon=None): try: self.file = file self.model = ply.PlyData.read(self.file) self.vertices = self.model['vertex'] self.faces = self.model['face'] except IOError as e: print("Something went wrong!") print("Please check if file exists at {}".format(file)) raise IOError self.graphicsWindow = None self.mesh = None self.rng = random.Random() self.epsilon = self.DEFAULT_EPSILON if epsilon is None else epsilon self.sampledPoints = {i:[] for i in self._FEATURES} self.nTol = normalTolerance
348,660
Runs a simple experiment where three objects share a number of location, feature pairs. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference
def runSharedFeatures(noiseLevel=None, profile=False): exp = L4L2Experiment( "shared_features", enableLateralSP=True, enableFeedForwardSP=True ) pairs = createThreeObjects() objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024 ) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 10, "noiseLevel": noiseLevel, "pairs": { 0: zip(range(10), range(10)) } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], )
348,685
Generates a lot of random objects to profile the network. Parameters: ---------------------------- @param numObjects (int) Number of objects to create and learn.
def runStretchExperiment(numObjects=25): exp = L4L2Experiment( "profiling_experiment", enableLateralSP = True, enableFeedForwardSP=True ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024 ) objects.createRandomObjects(numObjects=numObjects, numPoints=10) exp.learnObjects(objects.provideObjectsToLearn()) exp.printProfile() inferConfig = { "numSteps": len(objects[0]), "pairs": { 0: objects[0] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"] )
348,687
Creates the network. Parameters: ---------------------------- @param TMOverrides (dict) Parameters to override in the TM region
def __init__(self, name, numCorticalColumns=1, inputSize=1024, numInputBits=20, externalInputSize=1024, numExternalInputBits=20, L2Overrides=None, networkType = "L4L2TMColumn", L4Overrides=None, seed=42, logCalls=False, objectNamesAreIndices=False, TMOverrides=None, ): # Handle logging - this has to be done first self.logCalls = logCalls registerAllResearchRegions() self.name = name self.numLearningPoints = 1 self.numColumns = numCorticalColumns self.inputSize = inputSize self.externalInputSize = externalInputSize self.numInputBits = numInputBits self.objectNamesAreIndices = objectNamesAreIndices # seed self.seed = seed random.seed(seed) # update parameters with overrides self.config = { "networkType": networkType, "numCorticalColumns": numCorticalColumns, "externalInputSize": externalInputSize, "sensorInputSize": inputSize, "enableFeedback": False, "L4Params": self.getDefaultL4Params(inputSize, numExternalInputBits), "L2Params": self.getDefaultL2Params(inputSize, numInputBits), "TMParams": self.getDefaultTMParams(self.inputSize, self.numInputBits), } if L2Overrides is not None: self.config["L2Params"].update(L2Overrides) if L4Overrides is not None: self.config["L4Params"].update(L4Overrides) if TMOverrides is not None: self.config["TMParams"].update(TMOverrides) # Recreate network including TM parameters self.network = createNetwork(self.config) self.sensorInputs = [] self.externalInputs = [] self.L4Regions = [] self.L2Regions = [] self.TMRegions = [] for i in xrange(self.numColumns): self.sensorInputs.append( self.network.regions["sensorInput_" + str(i)].getSelf() ) self.externalInputs.append( self.network.regions["externalInput_" + str(i)].getSelf() ) self.L4Regions.append( self.network.regions["L4Column_" + str(i)] ) self.L2Regions.append( self.network.regions["L2Column_" + str(i)] ) self.TMRegions.append( self.network.regions["TMColumn_" + str(i)] ) self.L4Columns = [region.getSelf() for region in self.L4Regions] self.L2Columns = [region.getSelf() for region in self.L2Regions] self.TMColumns = [region.getSelf() for region in self.TMRegions] # will be populated during training self.objectL2Representations = {} self.objectL2RepresentationsMatrices = [ SparseMatrix(0, self.config["L2Params"]["cellCount"]) for _ in xrange(self.numColumns)] self.objectNameToIndex = {} self.statistics = [] # Create classifier to hold supposedly unique TM states self.classifier = KNNClassifier(distanceMethod="rawOverlap") self.numTMCells = (self.TMColumns[0].cellsPerColumn * self.TMColumns[0].columnCount)
348,732
Updates the inference statistics. Parameters: ---------------------------- @param statistics (dict) Dictionary in which to write the statistics @param objectName (str) Name of the inferred object, if known. Otherwise, set to None.
def _updateInferenceStats(self, statistics, objectName=None): L4Representations = self.getL4Representations() L4PredictedCells = self.getL4PredictedCells() L4PredictedActiveCells = self.getL4PredictedActiveCells() L2Representation = self.getL2Representations() TMPredictedActive = self.getTMPredictedActiveCells() TMNextPredicted = self.getTMNextPredictedCells() TMRepresentation = self.getTMRepresentations() for i in xrange(self.numColumns): statistics["L4 Representation C" + str(i)].append( len(L4Representations[i]) ) statistics["L4 Predicted C" + str(i)].append( len(L4PredictedCells[i]) ) statistics["L4 PredictedActive C" + str(i)].append( len(L4PredictedActiveCells[i]) ) statistics["L2 Representation C" + str(i)].append( len(L2Representation[i]) ) statistics["L4 Apical Segments C" + str(i)].append( len(self.L4Columns[i]._tm.getActiveApicalSegments()) ) statistics["L4 Basal Segments C" + str(i)].append( len(self.L4Columns[i]._tm.getActiveBasalSegments()) ) statistics["TM Basal Segments C" + str(i)].append( len(self.TMColumns[i]._tm.getActiveBasalSegments()) ) statistics["TM PredictedActive C" + str(i)].append( len(TMPredictedActive[i]) ) # The number of cells that are in predictive state as a result of this # input statistics["TM NextPredicted C" + str(i)].append( len(TMNextPredicted[i]) ) # The indices of all active cells in the TM statistics["TM Full Representation C" + str(i)].append( TMRepresentation[i] ) # The indices of all active cells in the TM statistics["L2 Full Representation C" + str(i)].append( L2Representation[i] ) # Insert exact TM representation into the classifier if the number of # predictive active cells is potentially unique (otherwise we say it # failed to correctly predict this step). if ( (len(TMPredictedActive[i]) < 1.5*self.numInputBits) and (len(TMPredictedActive[i]) > 0.5*self.numInputBits) ): sdr = list(TMPredictedActive[i]) sdr.sort() self.classifier.learn(sdr, objectName, isSparse=self.numTMCells) # add true overlap if objectName was provided if objectName in self.objectL2Representations: objectRepresentation = self.objectL2Representations[objectName] statistics["Overlap L2 with object C" + str(i)].append( len(objectRepresentation[i] & L2Representation[i]) )
348,737
Plots and saves the desired inference statistics. Parameters: ---------------------------- @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot.
def plotInferenceStats(self, fields, plotDir="plots", experimentID=0, onePlot=True): if not os.path.exists(plotDir): os.makedirs(plotDir) plt.figure() stats = self.statistics[experimentID] objectName = stats["object"] for i in xrange(self.numColumns): if not onePlot: plt.figure() # plot request stats for field in fields: fieldKey = field + " C" + str(i) plt.plot(stats[fieldKey], marker='+', label=fieldKey) # format plt.legend(loc="upper right") plt.xlabel("Sensation #") plt.xticks(range(stats["numSteps"])) plt.ylabel("Number of active bits") plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5) plt.title("Object inference for object {}".format(objectName)) # save if not onePlot: relPath = "{}_exp_{}_C{}.png".format(self.name, experimentID, i) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close() if onePlot: relPath = "{}_exp_{}.png".format(self.name, experimentID) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close()
348,972
Prints profiling information. Parameters: ---------------------------- @param reset (bool) If set to True, the profiling will be reset.
def printProfile(self, reset=False): print "Profiling information for {}".format(type(self).__name__) totalTime = 0.000001 for region in self.network.regions.values(): timer = region.getComputeTimer() totalTime += timer.getElapsed() # Sort the region names regionNames = list(self.network.regions.keys()) regionNames.sort() count = 1 profileInfo = [] L2Time = 0.0 L4Time = 0.0 for regionName in regionNames: region = self.network.regions[regionName] timer = region.getComputeTimer() count = max(timer.getStartCount(), count) profileInfo.append([region.name, timer.getStartCount(), timer.getElapsed(), 100.0 * timer.getElapsed() / totalTime, timer.getElapsed() / max(timer.getStartCount(), 1)]) if "L2Column" in regionName: L2Time += timer.getElapsed() elif "L4Column" in regionName: L4Time += timer.getElapsed() profileInfo.append( ["Total time", "", totalTime, "100.0", totalTime / count]) print tabulate(profileInfo, headers=["Region", "Count", "Elapsed", "Pct of total", "Secs/iteration"], tablefmt="grid", floatfmt="6.3f") print print "Total time in L2 =", L2Time print "Total time in L4 =", L4Time if reset: self.resetProfile()
348,974
Stretch test that learns a lot of objects. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference
def runStretch(noiseLevel=None, profile=False): exp = L4L2Experiment( "stretch_L10_F10_C2", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.createRandomObjects(10, 10, numLocations=10, numFeatures=10) print "Objects are:" for object, pairs in objects.objects.iteritems(): print str(object) + ": " + str(pairs) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for object 0. We create a # sequence of random sensations for each column. We will present each # sensation for 4 time steps to let it settle and ensure it converges. objectCopy1 = [pair for pair in objects[0]] objectCopy2 = [pair for pair in objects[0]] objectCopy3 = [pair for pair in objects[0]] random.shuffle(objectCopy1) random.shuffle(objectCopy2) random.shuffle(objectCopy3) # stay multiple steps on each sensation objectSensations1 = [] for pair in objectCopy1: for _ in xrange(4): objectSensations1.append(pair) # stay multiple steps on each sensation objectSensations2 = [] for pair in objectCopy2: for _ in xrange(4): objectSensations2.append(pair) # stay multiple steps on each sensation objectSensations3 = [] for pair in objectCopy3: for _ in xrange(4): objectSensations3.append(pair) inferConfig = { "numSteps": len(objectSensations1), "noiseLevel": noiseLevel, "pairs": { 0: objectSensations1, 1: objectSensations2, # 2: objectSensations3, # Uncomment for 3 columns } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], onePlot=False, )
349,140
Computes one cycle of the Union Pooler algorithm. Return the union SDR Parameters: ---------------------------- @param activeCells: A list that stores indices of active cells @param forceOutput: if True, a union will be created without regard to minHistory
def updateHistory(self, activeCells, forceOutput=False): self._activeCellsHistory.append(activeCells) if len(self._activeCellsHistory) > self._historyLength: self._activeCellsHistory.pop(0) self._unionSDR = numpy.zeros(shape=(self._numInputs,)) if (len(self._activeCellsHistory) >= self._minHistory) or forceOutput: for i in self._activeCellsHistory: self._unionSDR[i] = 1 return self._unionSDR
349,259
Updates the inference statistics. Parameters: ---------------------------- @param statistics (dict) Dictionary in which to write the statistics @param objectName (str) Name of the inferred object, if known. Otherwise, set to None.
def _updateInferenceStats(self, statistics, objectName=None): L6aLearnableCells = self.getL6aLearnableCells() L6aSensoryAssociatedCells = self.getL6aSensoryAssociatedCells() L6aRepresentations = self.getL6aRepresentations() L4Representations = self.getL4Representations() L4PredictedCells = self.getL4PredictedCells() L2Representation = self.getL2Representations() for i in xrange(self.numColumns): statistics["L6a SensoryAssociatedCells C" + str(i)].append( len(L6aSensoryAssociatedCells[i])) statistics["L6a LearnableCells C" + str(i)].append( len(L6aLearnableCells[i])) statistics["L6a Representation C" + str(i)].append( len(L6aRepresentations[i])) statistics["L4 Representation C" + str(i)].append( len(L4Representations[i])) statistics["L4 Predicted C" + str(i)].append(len(L4PredictedCells[i])) statistics["L2 Representation C" + str(i)].append( len(L2Representation[i])) statistics["Full L2 SDR C" + str(i)].append(sorted( [int(c) for c in L2Representation[i]])) statistics["L4 Apical Segments C" + str(i)].append(len( self.L4Regions[i].getSelf()._tm.getActiveApicalSegments())) # add true overlap and classification result if objectName was learned if objectName in self.learnedObjects: objectRepresentation = self.learnedObjects[objectName] statistics["Overlap L2 with object C" + str(i)].append( len(objectRepresentation[i] & L2Representation[i])) if objectName in self.learnedObjects: if self.isObjectClassified(objectName, minOverlap=30): statistics["Correct classification"].append(1.0) else: statistics["Correct classification"].append(0.0)
349,332
Updates the inference statistics. Parameters: ---------------------------- @param statistics (dict) Dictionary in which to write the statistics @param objectName (str) Name of the inferred object, if known. Otherwise, set to None.
def _updateInferenceStats(self, statistics, objectName=None): L4Representations = self.getL4Representations() L4PredictedCells = self.getL4PredictedCells() L2Representations = self.getL2Representations() L5Representations = self.getL5Representations() L6Representations = self.getL6Representations() L6PredictedCells = self.getL6PredictedCells() for i in xrange(self.numColumns): statistics["L4 Representation C" + str(i)].append( len(L4Representations[i]) ) statistics["L4 Predicted C" + str(i)].append( len(L4PredictedCells[i]) ) statistics["L2 Representation C" + str(i)].append( len(L2Representations[i]) ) statistics["L6 Representation C" + str(i)].append( len(L6Representations[i]) ) statistics["L6 Predicted C" + str(i)].append( len(L6PredictedCells[i]) ) statistics["L5 Representation C" + str(i)].append( len(L5Representations[i]) ) # add true overlap if objectName was provided if objectName is not None: objectRepresentationL2 = self.objectRepresentationsL2[objectName] statistics["Overlap L2 with object C" + str(i)].append( len(objectRepresentationL2[i] & L2Representations[i]) ) objectRepresentationL5 = self.objectRepresentationsL5[objectName] statistics["Overlap L5 with object C" + str(i)].append( len(objectRepresentationL5[i] & L5Representations[i]) )
349,478
Creates the network. Parameters: ---------------------------- @param TMOverrides (dict) Parameters to override in the TM region
def __init__(self, name, numCorticalColumns=1, inputSize=1024, numInputBits=20, externalInputSize=1024, numExternalInputBits=20, L2Overrides=None, L4Overrides=None, seed=42, logCalls=False, objectNamesAreIndices=False, ): # Handle logging - this has to be done first self.logCalls = logCalls registerAllResearchRegions() self.name = name self.numLearningPoints = 1 self.numColumns = numCorticalColumns self.inputSize = inputSize self.externalInputSize = externalInputSize self.numInputBits = numInputBits self.objectNamesAreIndices = objectNamesAreIndices self.numExternalInputBits = numExternalInputBits # seed self.seed = seed random.seed(seed) # Create default parameters and then update with overrides self.config = { "networkType": "CombinedSequenceColumn", "numCorticalColumns": numCorticalColumns, "externalInputSize": externalInputSize, "sensorInputSize": inputSize, "enableFeedback": False, "L2Params": self.getDefaultL2Params(inputSize, numInputBits), } self.config["L4Params"] = self._getDefaultCombinedL4Params( self.numInputBits, self.inputSize, self.numExternalInputBits, self.externalInputSize, self.config["L2Params"]["cellCount"]) if L2Overrides is not None: self.config["L2Params"].update(L2Overrides) if L4Overrides is not None: self.config["L4Params"].update(L4Overrides) pprint.pprint(self.config) # Recreate network including TM parameters self.network = createNetwork(self.config) self.sensorInputs = [] self.externalInputs = [] self.L2Regions = [] self.L4Regions = [] for i in xrange(self.numColumns): self.sensorInputs.append( self.network.regions["sensorInput_" + str(i)].getSelf() ) self.externalInputs.append( self.network.regions["externalInput_" + str(i)].getSelf() ) self.L2Regions.append( self.network.regions["L2Column_" + str(i)] ) self.L4Regions.append( self.network.regions["L4Column_" + str(i)] ) self.L2Columns = [region.getSelf() for region in self.L2Regions] self.L4Columns = [region.getSelf() for region in self.L4Regions] # will be populated during training self.objectL2Representations = {} self.objectL2RepresentationsMatrices = [ SparseMatrix(0, self.config["L2Params"]["cellCount"]) for _ in xrange(self.numColumns)] self.objectNameToIndex = {} self.statistics = []
349,585
Updates the inference statistics. Parameters: ---------------------------- @param statistics (dict) Dictionary in which to write the statistics @param objectName (str) Name of the inferred object, if known. Otherwise, set to None.
def _updateInferenceStats(self, statistics, objectName=None): L4Representations = self.getL4Representations() L4PredictedCells = self.getL4PredictedCells() L4PredictedActiveCells = self.getL4PredictedActiveCells() L2Representation = self.getL2Representations() for i in xrange(self.numColumns): statistics["L4 Representation C" + str(i)].append( len(L4Representations[i]) ) statistics["L4 Predicted C" + str(i)].append( len(L4PredictedCells[i]) ) statistics["L4 PredictedActive C" + str(i)].append( len(L4PredictedActiveCells[i]) ) statistics["L2 Representation C" + str(i)].append( len(L2Representation[i]) ) statistics["L4 Apical Segments C" + str(i)].append( len(self.L4Columns[i]._tm.getActiveApicalSegments()) ) statistics["L4 Basal Segments C" + str(i)].append( len(self.L4Columns[i]._tm.getActiveBasalSegments()) ) # add true overlap if objectName was provided if objectName in self.objectL2Representations: objectRepresentation = self.objectL2Representations[objectName] statistics["Overlap L2 with object C" + str(i)].append( len(objectRepresentation[i] & L2Representation[i]) )
349,588
Returns the number of proximal connected synapses on these cells. Parameters: ---------------------------- @param cells (iterable) Indices of the cells. If None return count for all cells.
def numberOfConnectedProximalSynapses(self, cells=None): if cells is None: cells = xrange(self.numberOfCells()) return _countWhereGreaterEqualInRows(self.proximalPermanences, cells, self.connectedPermanenceProximal)
349,625
Returns the number of proximal synapses with permanence>0 on these cells. Parameters: ---------------------------- @param cells (iterable) Indices of the cells. If None return count for all cells.
def numberOfProximalSynapses(self, cells=None): if cells is None: cells = xrange(self.numberOfCells()) n = 0 for cell in cells: n += self.proximalPermanences.nNonZerosOnRow(cell) return n
349,626
Returns the total number of distal segments for these cells. A segment "exists" if its row in the matrix has any permanence values > 0. Parameters: ---------------------------- @param cells (iterable) Indices of the cells
def numberOfDistalSegments(self, cells=None): if cells is None: cells = xrange(self.numberOfCells()) n = 0 for cell in cells: if self.internalDistalPermanences.nNonZerosOnRow(cell) > 0: n += 1 for permanences in self.distalPermanences: if permanences.nNonZerosOnRow(cell) > 0: n += 1 return n
349,627
Returns the number of connected distal synapses on these cells. Parameters: ---------------------------- @param cells (iterable) Indices of the cells. If None return count for all cells.
def numberOfConnectedDistalSynapses(self, cells=None): if cells is None: cells = xrange(self.numberOfCells()) n = _countWhereGreaterEqualInRows(self.internalDistalPermanences, cells, self.connectedPermanenceDistal) for permanences in self.distalPermanences: n += _countWhereGreaterEqualInRows(permanences, cells, self.connectedPermanenceDistal) return n
349,628
Called at the end of learning and inference, this routine will update a number of stats in our _internalStats dictionary, including: 1. Our computed prediction score 2. ... Parameters: ------------------------------------------------------------------ bottomUpNZ: list of the active bottom-up inputs
def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState, confidence): # Return if not collecting stats if not self.collectStats: return stats['nInfersSinceReset'] += 1 # Compute the prediction score, how well the prediction from the last # time step predicted the current bottom-up input patternsToCheck = [bottomUpNZ] (numExtra2, numMissing2, confidences2) = self.checkPrediction2( patternNZs = [bottomUpNZ], output = predictedState, confidence = confidence) predictionScore, positivePredictionScore, negativePredictionScore = \ confidences2[0] # Store the stats that don't depend on burn-in stats['curPredictionScore2'] = float(predictionScore) stats['curFalseNegativeScore'] = 1.0 - float(positivePredictionScore) stats['curFalsePositiveScore'] = float(negativePredictionScore) stats['curMissing'] = numMissing2 stats['curExtra'] = numExtra2 # ---------------------------------------------------------------------- # If we are passed the burn-in period, update the accumulated stats # Here's what various burn-in values mean: # 0: try to predict the first element of each sequence and all subsequent # 1: try to predict the second element of each sequence and all subsequent # etc. if stats['nInfersSinceReset'] <= self.burnIn: return # Burn-in related stats stats['nPredictions'] += 1 numExpected = max(1.0, float(len(bottomUpNZ))) stats['totalMissing'] += numMissing2 stats['totalExtra'] += numExtra2 stats['pctExtraTotal'] += 100.0 * numExtra2 / numExpected stats['pctMissingTotal'] += 100.0 * numMissing2 / numExpected stats['predictionScoreTotal2'] += float(predictionScore) stats['falseNegativeScoreTotal'] += 1.0 - float(positivePredictionScore) stats['falsePositiveScoreTotal'] += float(negativePredictionScore) if self.collectSequenceStats: # Collect cell confidences for every cell that correctly predicted current # bottom up input. Normalize confidence across each column cc = self.confidence['t-1'] * self.activeState['t'] sconf = cc.sum(axis=1) for c in range(self.numberOfCols): if sconf[c] > 0: cc[c,:] /= sconf[c] # Update cell confidence histogram: add column-normalized confidence # scores to the histogram self._internalStats['confHistogram'] += cc
350,009
Remove a segment update (called when seg update expires or is processed) Parameters: -------------------------------------------------------------- updateInfo: (creationDate, SegmentUpdate)
def removeSegmentUpdate(self, updateInfo): # An updateInfo contains (creationDate, SegmentUpdate) (creationDate, segUpdate) = updateInfo # Key is stored in segUpdate itself... key = (segUpdate.columnIdx, segUpdate.cellIdx) self.segmentUpdates[key].remove(updateInfo)
350,015
This is the phase 2 of learning, inference and multistep prediction. During this phase, all the cell with lateral support have their predictedState turned on and the firing segments are queued up for updates. Parameters: -------------------------------------------- doLearn: Boolean flag to queue segment updates during learning retval: ?
def computePhase2(self, doLearn=False): # Phase 2: compute predicted state for each cell # - if a segment has enough horizontal connections firing because of # bottomUpInput, it's set to be predicting, and we queue up the segment # for reinforcement, # - if pooling is on, try to find the best weakly activated segment to # reinforce it, else create a new pooling segment. for c in xrange(self.numberOfCols): buPredicted = False # whether any cell in the column is predicted for i in xrange(self.cellsPerColumn): # Iterate over each of the segments of this cell maxConfidence = 0 for s in self.cells[c][i]: # sum(connected synapses) >= activationThreshold? if self.isSegmentActive(s, self.activeState['t']): self.predictedState['t'][c,i] = 1 buPredicted = True maxConfidence = max(maxConfidence, s.dutyCycle(readOnly=True)) if doLearn: s.totalActivations += 1 # increment activationFrequency s.lastActiveIteration = self.iterationIdx # mark this segment for learning activeUpdate = self.getSegmentActiveSynapses(c,i,s,'t') activeUpdate.phase1Flag = False self.addToSegmentUpdates(c, i, activeUpdate) # Store the max confidence seen among all the weak and strong segments # as the cell's confidence. self.confidence['t'][c,i] = maxConfidence
350,018
Compute the column confidences given the cell confidences. If None is passed in for cellConfidences, it uses the stored cell confidences from the last compute. Parameters: ---------------------------- cellConfidencs : cell confidences to use, or None to use the the current cell confidences. retval: : Column confidence scores.
def columnConfidences(self, cellConfidences=None): if cellConfidences is None: cellConfidences = self.confidence['t'] colConfidences = cellConfidences.sum(axis=1) # Make the max column confidence 1.0 #colConfidences /= colConfidences.max() return colConfidences
350,022
Initialise from axis and angle representation Create a Quaternion by specifying the 3-vector rotation axis and rotation angle (in radians) from which the quaternion's rotation should be created. Params: axis: a valid numpy 3-vector angle: a real valued angle in radians
def _from_axis_angle(cls, axis, angle): mag_sq = np.dot(axis, axis) if mag_sq == 0.0: raise ZeroDivisionError("Provided rotation axis has no length") # Ensure axis is in unit vector form if (abs(1.0 - mag_sq) > 1e-12): axis = axis / sqrt(mag_sq) theta = angle / 2.0 r = cos(theta) i = axis * sin(theta) return cls(r, i[0], i[1], i[2])
350,430
Quaternion Exponential. Find the exponential of a quaternion amount. Params: q: the input quaternion/argument as a Quaternion object. Returns: A quaternion amount representing the exp(q). See [Source](https://math.stackexchange.com/questions/1030737/exponential-function-of-quaternion-derivation for more information and mathematical background). Note: The method can compute the exponential of any quaternion.
def exp(cls, q): tolerance = 1e-17 v_norm = np.linalg.norm(q.vector) vec = q.vector if v_norm > tolerance: vec = vec / v_norm magnitude = exp(q.scalar) return Quaternion(scalar = magnitude * cos(v_norm), vector = magnitude * sin(v_norm) * vec)
350,448
Quaternion Logarithm. Find the logarithm of a quaternion amount. Params: q: the input quaternion/argument as a Quaternion object. Returns: A quaternion amount representing log(q) := (log(|q|), v/|v|acos(w/|q|)). Note: The method computes the logarithm of general quaternions. See [Source](https://math.stackexchange.com/questions/2552/the-logarithm-of-quaternion/2554#2554) for more details.
def log(cls, q): v_norm = np.linalg.norm(q.vector) q_norm = q.norm tolerance = 1e-17 if q_norm < tolerance: # 0 quaternion - undefined return Quaternion(scalar=-float('inf'), vector=float('nan')*q.vector) if v_norm < tolerance: # real quaternions - no imaginary part return Quaternion(scalar=log(q_norm), vector=[0,0,0]) vec = q.vector / v_norm return Quaternion(scalar=log(q_norm), vector=acos(q.scalar/q_norm)*vec)
350,449
Get the instantaneous quaternion derivative representing a quaternion rotating at a 3D rate vector `rate` Params: rate: numpy 3-array (or array-like) describing rotation rates about the global x, y and z axes respectively. Returns: A unit quaternion describing the rotation rate
def derivative(self, rate): rate = self._validate_number_sequence(rate, 3) return 0.5 * self * Quaternion(vector=rate)
350,457
Return low-level information about an image Args: name: name of the image
async def inspect(self, name: str) -> Mapping: response = await self.docker._query_json("images/{name}/json".format(name=name)) return response
351,667
Similar to `docker pull`, pull an image locally Args: fromImage: name of the image to pull repo: repository name given to an image when it is imported tag: if empty when pulling an image all tags for the given image to be pulled auth: special {'auth': base64} pull private repo
async def pull( self, from_image: str, *, auth: Optional[Union[MutableMapping, str, bytes]] = None, tag: str = None, repo: str = None, stream: bool = False ) -> Mapping: image = from_image # TODO: clean up params = {"fromImage": image} headers = {} if repo: params["repo"] = repo if tag: params["tag"] = tag if auth is not None: registry, has_registry_host, _ = image.partition("/") if not has_registry_host: raise ValueError( "Image should have registry host " "when auth information is provided" ) # TODO: assert registry == repo? headers["X-Registry-Auth"] = compose_auth_header(auth, registry) response = await self.docker._query( "images/create", "POST", params=params, headers=headers ) return await json_stream_result(response, stream=stream)
351,669
Tag the given image so that it becomes part of a repository. Args: repo: the repository to tag in tag: the name for the new tag
async def tag(self, name: str, repo: str, *, tag: str = None) -> bool: params = {"repo": repo} if tag: params["tag"] = tag await self.docker._query( "images/{name}/tag".format(name=name), "POST", params=params, headers={"content-type": "application/json"}, ) return True
351,671
Remove an image along with any untagged parent images that were referenced by that image Args: name: name/id of the image to delete force: remove the image even if it is being used by stopped containers or has other tags noprune: don't delete untagged parent images Returns: List of deleted images
async def delete( self, name: str, *, force: bool = False, noprune: bool = False ) -> List: params = {"force": force, "noprune": noprune} response = await self.docker._query_json( "images/{name}".format(name=name), "DELETE", params=params ) return response
351,672
Get a tarball of an image by name or id. Args: name: name/id of the image to be exported Returns: Streamreader of tarball image
async def export_image(self, name: str): response = await self.docker._query( "images/{name}/get".format(name=name), "GET" ) return response.content
351,674
Import tarball of image to docker. Args: data: tarball data of image to be imported Returns: Tarball of the image
async def import_image(self, data, stream: bool = False): headers = {"Content-Type": "application/x-tar"} response = await self.docker._query_chunked_post( "images/load", "POST", data=data, headers=headers ) return await json_stream_result(response, stream=stream)
351,675
Create a zipped tar archive from a Dockerfile **Remember to close the file object** Args: fileobj: a Dockerfile Returns: a NamedTemporaryFile() object
def mktar_from_dockerfile(fileobject: BinaryIO) -> IO: f = tempfile.NamedTemporaryFile() t = tarfile.open(mode="w:gz", fileobj=f) if isinstance(fileobject, BytesIO): dfinfo = tarfile.TarInfo("Dockerfile") dfinfo.size = len(fileobject.getvalue()) fileobject.seek(0) else: dfinfo = t.gettarinfo(fileobj=fileobject, arcname="Dockerfile") t.addfile(dfinfo, fileobject) t.close() f.seek(0) return f
351,687
Validate and compose base64-encoded authentication header with an optional support for parsing legacy-style "user:password" strings. Args: auth: Authentication information registry_addr: An address of the registry server Returns: A base64-encoded X-Registry-Auth header value
def compose_auth_header( auth: Union[MutableMapping, str, bytes], registry_addr: str = None ) -> str: if isinstance(auth, Mapping): # Validate the JSON format only. if "identitytoken" in auth: pass elif "auth" in auth: return compose_auth_header(auth["auth"], registry_addr) else: if registry_addr: auth["serveraddress"] = registry_addr auth_json = json.dumps(auth).encode("utf-8") elif isinstance(auth, (str, bytes)): # Parse simple "username:password"-formatted strings # and attach the server address specified. if isinstance(auth, bytes): auth = auth.decode("utf-8") s = base64.b64decode(auth) username, passwd = s.split(b":", 1) config = { "username": username.decode("utf-8"), "password": passwd.decode("utf-8"), "email": None, "serveraddress": registry_addr, } auth_json = json.dumps(config).encode("utf-8") else: raise TypeError("auth must be base64 encoded string/bytes or a dictionary") auth = base64.b64encode(auth_json).decode("ascii") return auth
351,688
Return info about a task Args: task_id: is ID of the task
async def inspect(self, task_id: str) -> Mapping[str, Any]: response = await self.docker._query_json( "tasks/{task_id}".format(task_id=task_id), method="GET" ) return response
351,691
Inspect a node Args: node_id: The ID or name of the node
async def inspect(self, *, node_id: str) -> Mapping[str, Any]: response = await self.docker._query_json( "nodes/{node_id}".format(node_id=node_id), method="GET" ) return response
351,724
Update the spec of a node. Args: node_id: The ID or name of the node version: version number of the node being updated spec: fields to be updated
async def update( self, *, node_id: str, version: int, spec: Mapping[str, Any] ) -> Mapping[str, Any]: params = {"version": version} if "Role" in spec: assert spec["Role"] in {"worker", "manager"} if "Availability" in spec: assert spec["Availability"] in {"active", "pause", "drain"} response = await self.docker._query_json( "nodes/{node_id}/update".format(node_id=node_id), method="POST", params=params, data=spec, ) return response
351,725
Remove a node from a swarm. Args: node_id: The ID or name of the node
async def remove(self, *, node_id: str, force: bool = False) -> Mapping[str, Any]: params = {"force": force} response = await self.docker._query_json( "nodes/{node_id}".format(node_id=node_id), method="DELETE", params=params ) return response
351,726
Initialize a new swarm. Args: ListenAddr: listen address used for inter-manager communication AdvertiseAddr: address advertised to other nodes. ForceNewCluster: Force creation of a new swarm. SwarmSpec: User modifiable swarm configuration. Returns: id of the swarm node
async def init( self, *, advertise_addr: str = None, listen_addr: str = "0.0.0.0:2377", force_new_cluster: bool = False, swarm_spec: Mapping = None ) -> str: data = { "AdvertiseAddr": advertise_addr, "ListenAddr": listen_addr, "ForceNewCluster": force_new_cluster, "Spec": swarm_spec, } response = await self.docker._query_json("swarm/init", method="POST", data=data) return response
351,738
Join a swarm. Args: listen_addr Used for inter-manager communication advertise_addr Externally reachable address advertised to other nodes. data_path_addr Address or interface to use for data path traffic. remote_addrs Addresses of manager nodes already participating in the swarm. join_token Secret token for joining this swarm.
async def join( self, *, remote_addrs: Iterable[str], listen_addr: str = "0.0.0.0:2377", join_token: str, advertise_addr: str = None, data_path_addr: str = None ) -> bool: data = { "RemoteAddrs": list(remote_addrs), "JoinToken": join_token, "ListenAddr": listen_addr, "AdvertiseAddr": advertise_addr, "DataPathAddr": data_path_addr, } await self.docker._query("swarm/join", method="POST", data=clean_map(data)) return True
351,739
Leave a swarm. Args: force: force to leave the swarm even if the node is a master
async def leave(self, *, force: bool = False) -> bool: params = {"force": force} await self.docker._query("swarm/leave", method="POST", params=params) return True
351,740
Return a list of services Args: filters: a dict with a list of filters Available filters: id=<service id> label=<service label> mode=["replicated"|"global"] name=<service name>
async def list(self, *, filters: Mapping = None) -> List[Mapping]: params = {"filters": clean_filters(filters)} response = await self.docker._query_json( "services", method="GET", params=params ) return response
351,746
Update a service. If rollback is True image will be ignored. Args: service_id: ID or name of the service. version: Version of the service that you want to update. rollback: Rollback the service to the previous service spec. Returns: True if successful.
async def update( self, service_id: str, version: str, *, image: str = None, rollback: bool = False ) -> bool: if image is None and rollback is False: raise ValueError("You need to specify an image.") inspect_service = await self.inspect(service_id) spec = inspect_service["Spec"] if image is not None: spec["TaskTemplate"]["ContainerSpec"]["Image"] = image params = {"version": version} if rollback is True: params["rollback"] = "previous" data = json.dumps(clean_map(spec)) await self.docker._query_json( "services/{service_id}/update".format(service_id=service_id), method="POST", data=data, params=params, ) return True
351,748
Remove a service Args: service_id: ID or name of the service Returns: True if successful
async def delete(self, service_id: str) -> bool: await self.docker._query( "services/{service_id}".format(service_id=service_id), method="DELETE" ) return True
351,749
Inspect a service Args: service_id: ID or name of the service Returns: a dict with info about a service
async def inspect(self, service_id: str) -> Mapping[str, Any]: response = await self.docker._query_json( "services/{service_id}".format(service_id=service_id), method="GET" ) return response
351,750
Returns a price index given a series of returns. Args: * returns: Expects a return series * start (number): Starting level Assumes arithmetic returns. Formula is: cumprod (1+r)
def to_price_index(returns, start=100): return (returns.replace(to_replace=np.nan, value=0) + 1).cumprod() * start
352,414
Calculates performance stats of a given object. If object is Series, a PerformanceStats object is returned. If object is DataFrame, a GroupStats object is returned. Args: * prices (Series, DataFrame): Set of prices
def calc_stats(prices): if isinstance(prices, pd.Series): return PerformanceStats(prices) elif isinstance(prices, pd.DataFrame): return GroupStats(*[prices[x] for x in prices.columns]) else: raise NotImplementedError('Unsupported type')
352,415
Returns a data frame with start, end, days (duration) and drawdown for each drawdown in a drawdown series. .. note:: days are actual calendar days, not trading days Args: * drawdown (pandas.Series): A drawdown Series (can be obtained w/ drawdown(prices). Returns: * pandas.DataFrame -- A data frame with the following columns: start, end, days, drawdown.
def drawdown_details(drawdown, index_type=pd.DatetimeIndex): is_zero = drawdown == 0 # find start dates (first day where dd is non-zero after a zero) start = ~is_zero & is_zero.shift(1) start = list(start[start == True].index) # NOQA # find end dates (first day where dd is 0 after non-zero) end = is_zero & (~is_zero).shift(1) end = list(end[end == True].index) # NOQA if len(start) is 0: return None # drawdown has no end (end period in dd) if len(end) is 0: end.append(drawdown.index[-1]) # if the first drawdown start is larger than the first drawdown end it # means the drawdown series begins in a drawdown and therefore we must add # the first index to the start series if start[0] > end[0]: start.insert(0, drawdown.index[0]) # if the last start is greater than the end then we must add the last index # to the end series since the drawdown series must finish with a drawdown if start[-1] > end[-1]: end.append(drawdown.index[-1]) result = pd.DataFrame( columns=('Start', 'End', 'Length', 'drawdown'), index=range(0, len(start)) ) for i in range(0, len(start)): dd = drawdown[start[i]:end[i]].min() if index_type is pd.DatetimeIndex: result.iloc[i] = (start[i], end[i], (end[i] - start[i]).days, dd) else: result.iloc[i] = (start[i], end[i], (end[i] - start[i]), dd) return result
352,417
Calculates the `CAGR (compound annual growth rate) <https://www.investopedia.com/terms/c/cagr.asp>`_ for a given price series. Args: * prices (pandas.Series): A Series of prices. Returns: * float -- cagr.
def calc_cagr(prices): start = prices.index[0] end = prices.index[-1] return (prices.iloc[-1] / prices.iloc[0]) ** (1 / year_frac(start, end)) - 1
352,418
Estimates the number of days required to assume that data is OK. Helper function used to determine if there are enough "good" data days over a given period. Args: * offset (DateOffset): Offset (lookback) period. * period (str): Period string. * perc_required (float): percentage of number of days expected required.
def get_num_days_required(offset, period='d', perc_required=0.90): x = pd.to_datetime('2010-01-01') delta = x - (x - offset) # convert to 'trading days' - rough guestimate days = delta.days * 0.69 if period == 'd': req = days * perc_required elif period == 'm': req = (days / 20) * perc_required elif period == 'y': req = (days / 252) * perc_required else: raise NotImplementedError( 'period not supported. Supported periods are d, m, y') return req
352,430
Calculates the clusters based on k-means clustering. Args: * returns (pd.DataFrame): DataFrame of returns * n (int): Specify # of clusters. If None, this will be automatically determined * plot (bool): Show plot? Returns: * dict with structure: {cluster# : [col names]}
def calc_clusters(returns, n=None, plot=False): # calculate correlation corr = returns.corr() # calculate dissimilarity matrix diss = 1 - corr # scale down to 2 dimensions using MDS # (multi-dimensional scaling) using the # dissimilarity matrix mds = sklearn.manifold.MDS(dissimilarity='precomputed') xy = mds.fit_transform(diss) def routine(k): # fit KMeans km = sklearn.cluster.KMeans(n_clusters=k) km_fit = km.fit(xy) labels = km_fit.labels_ centers = km_fit.cluster_centers_ # get {ticker: label} mappings mappings = dict(zip(returns.columns, labels)) # print % of var explained totss = 0 withinss = 0 # column average fot totss avg = np.array([np.mean(xy[:, 0]), np.mean(xy[:, 1])]) for idx, lbl in enumerate(labels): withinss += sum((xy[idx] - centers[lbl]) ** 2) totss += sum((xy[idx] - avg) ** 2) pvar_expl = 1.0 - withinss / totss return mappings, pvar_expl, labels if n: result = routine(n) else: n = len(returns.columns) n1 = int(np.ceil(n * 0.6666666666)) for i in range(2, n1 + 1): result = routine(i) if result[1] > 0.9: break if plot: fig, ax = plt.subplots() ax.scatter(xy[:, 0], xy[:, 1], c=result[2], s=90) for i, txt in enumerate(returns.columns): ax.annotate(txt, (xy[i, 0], xy[i, 1]), size=14) # sanitize return value tmp = result[0] # map as such {cluster: [list of tickers], cluster2: [...]} inv_map = {} for k, v in iteritems(tmp): inv_map[v] = inv_map.get(v, []) inv_map[v].append(k) return inv_map
352,431
Limits weights and redistributes excedent amount proportionally. ex: - weights are {a: 0.7, b: 0.2, c: 0.1} - call with limit=0.5 - excess 0.2 in a is ditributed to b and c proportionally. - result is {a: 0.5, b: 0.33, c: 0.167} Args: * weights (Series): A series describing the weights * limit (float): Maximum weight allowed
def limit_weights(weights, limit=0.1): if 1.0 / limit > len(weights): raise ValueError('invalid limit -> 1 / limit must be <= len(weights)') if isinstance(weights, dict): weights = pd.Series(weights) if np.round(weights.sum(), 1) != 1.0: raise ValueError('Expecting weights (that sum to 1) - sum is %s' % weights.sum()) res = np.round(weights.copy(), 4) to_rebalance = (res[res > limit] - limit).sum() ok = res[res < limit] ok += (ok / ok.sum()) * to_rebalance res[res > limit] = limit res[res < limit] = ok if any(x > limit for x in res): return limit_weights(res, limit=limit) return res
352,433
Generate pseudo-random weights. Returns a list of random weights that is of length n, where each weight is in the range bounds, and where the weights sum up to total. Useful for creating random portfolios when benchmarking. Args: * n (int): number of random weights * bounds ((low, high)): bounds for each weight * total (float): total sum of the weights
def random_weights(n, bounds=(0., 1.), total=1.0): low = bounds[0] high = bounds[1] if high < low: raise ValueError('Higher bound must be greater or ' 'equal to lower bound') if n * high < total or n * low > total: raise ValueError('solution not possible with given n and bounds') w = [0] * n tgt = -float(total) for i in range(n): rn = n - i - 1 rhigh = rn * high rlow = rn * low lowb = max(-rhigh - tgt, low) highb = min(-rlow - tgt, high) rw = random.uniform(lowb, highb) w[i] = rw tgt += rw random.shuffle(w) return w
352,434
Apply a function fn over a rolling window of size window. Args: * data (Series or DataFrame): Series or DataFrame * window (int): Window size * fn (function): Function to apply over the rolling window. For a series, the return value is expected to be a single number. For a DataFrame, it shuold return a new row. Returns: * Object of same dimensions as data
def rollapply(data, window, fn): res = data.copy() res[:] = np.nan n = len(data) if window > n: return res for i in range(window - 1, n): res.iloc[i] = fn(data.iloc[i - window + 1:i + 1]) return res
352,436
Given a series of returns, it will return the excess returns over rf. Args: * returns (Series, DataFrame): Returns * rf (float, Series): `Risk-Free rate(s) <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ expressed in annualized term or return series * nperiods (int): Optional. If provided, will convert rf to different frequency using deannualize only if rf is a float Returns: * excess_returns (Series, DataFrame): Returns - rf
def to_excess_returns(returns, rf, nperiods=None): if type(rf) is float and nperiods is not None: _rf = deannualize(rf, nperiods) else: _rf = rf return returns - _rf
352,441
Converts from prices -> `Ulcer index <https://www.investopedia.com/terms/u/ulcerindex.asp>`_ See https://en.wikipedia.org/wiki/Ulcer_index Args: * prices (Series, DataFrame): Prices
def to_ulcer_index(prices): dd = prices.to_drawdown_series() return np.divide(np.sqrt(np.sum(np.power(dd, 2))), dd.count())
352,442
Converts from prices -> `ulcer performance index <https://www.investopedia.com/terms/u/ulcerindex.asp>`_. See https://en.wikipedia.org/wiki/Ulcer_index Args: * prices (Series, DataFrame): Prices * rf (float, Series): `Risk-free rate of return <https://www.investopedia.com/terms/r/risk-freerate.asp>`_. Assumed to be expressed in yearly (annualized) terms or return series * nperiods (int): Used to deannualize rf if rf is provided (non-zero)
def to_ulcer_performance_index(prices, rf=0., nperiods=None): if type(rf) is float and rf != 0 and nperiods is None: raise Exception('nperiods must be set if rf != 0 and rf is not a price series') er = prices.to_returns().to_excess_returns(rf, nperiods=nperiods) return np.divide(er.mean(), prices.to_ulcer_index())
352,443
Set annual risk-free rate property and calculate properly annualized monthly and daily rates. Then performance stats are recalculated. Affects only this instance of the PerformanceStats. Args: * rf (float): Annual `risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_
def set_riskfree_rate(self, rf): self.rf = rf # Note, that we recalculate everything. self._update(self.prices)
352,447
Returns a CSV string with appropriate formatting. If path is not None, the string will be saved to file at path. Args: * sep (char): Separator * path (str): If None, CSV string returned. Else file written to specified path.
def to_csv(self, sep=',', path=None): stats = self._stats() data = [] first_row = ['Stat', self.name] data.append(sep.join(first_row)) for stat in stats: k, n, f = stat # blank row if k is None: row = [''] * len(data[0]) data.append(sep.join(row)) continue elif k == 'rf' and not type(self.rf) == float: continue row = [n] raw = getattr(self, k) if f is None: row.append(raw) elif f == 'p': row.append(fmtp(raw)) elif f == 'n': row.append(fmtn(raw)) elif f == 'dt': row.append(raw.strftime('%Y-%m-%d')) else: raise NotImplementedError('unsupported format %s' % f) data.append(sep.join(row)) res = '\n'.join(data) if path is not None: with open(path, 'w') as fl: fl.write(res) else: return res
352,456
Set annual `risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ property and calculate properly annualized monthly and daily rates. Then performance stats are recalculated. Affects only those instances of PerformanceStats that are children of this GroupStats object. Args: * rf (float, Series): Annual risk-free rate or risk-free rate price series
def set_riskfree_rate(self, rf): for key in self._names: self[key].set_riskfree_rate(rf) # calculate stats for entire series self._update_stats()
352,461
Update date range of stats, charts, etc. If None then the original date range is used. So to reset to the original range, just call with no args. Args: * start (date): start date * end (end): end date
def set_date_range(self, start=None, end=None): start = self._start if start is None else pd.to_datetime(start) end = self._end if end is None else pd.to_datetime(end) self._update(self._prices.loc[start:end])
352,462
Helper function for plotting the series. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * logy (bool): log-scale for y axis * kwargs: passed to pandas' plot method
def plot(self, freq=None, figsize=(15, 5), title=None, logy=False, **kwargs): if title is None: title = self._get_default_plot_title( freq, 'Equity Progression') ser = self._get_series(freq).rebase() return ser.plot(figsize=figsize, logy=logy, title=title, **kwargs)
352,465
Wrapper around pandas' scatter_matrix. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * kwargs: passed to pandas' scatter_matrix method
def plot_scatter_matrix(self, freq=None, title=None, figsize=(10, 10), **kwargs): if title is None: title = self._get_default_plot_title( freq, 'Return Scatter Matrix') plt.figure() ser = self._get_series(freq).to_returns().dropna() pd.scatter_matrix(ser, figsize=figsize, **kwargs) return plt.suptitle(title)
352,466
Wrapper around pandas' hist. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * kwargs: passed to pandas' hist method
def plot_histograms(self, freq=None, title=None, figsize=(10, 10), **kwargs): if title is None: title = self._get_default_plot_title( freq, 'Return Histogram Matrix') plt.figure() ser = self._get_series(freq).to_returns().dropna() ser.hist(figsize=figsize, **kwargs) return plt.suptitle(title)
352,467
Utility function to plot correlations. Args: * freq (str): Pandas data frequency alias string * title (str): Plot title * figsize (tuple (x,y)): figure size * kwargs: passed to Pandas' plot_corr_heatmap function
def plot_correlation(self, freq=None, title=None, figsize=(12, 6), **kwargs): if title is None: title = self._get_default_plot_title( freq, 'Return Correlation Matrix') rets = self._get_series(freq).to_returns().dropna() return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)
352,468
Returns a CSV string with appropriate formatting. If path is not None, the string will be saved to file at path. Args: * sep (char): Separator * path (str): If None, CSV string returned. Else file written to specified path.
def to_csv(self, sep=',', path=None): data = [] first_row = ['Stat'] first_row.extend(self._names) data.append(sep.join(first_row)) stats = self._stats() for stat in stats: k, n, f = stat # blank row if k is None: row = [''] * len(data[0]) data.append(sep.join(row)) continue row = [n] for key in self._names: raw = getattr(self[key], k) if f is None: row.append(raw) elif f == 'p': row.append(fmtp(raw)) elif f == 'n': row.append(fmtn(raw)) elif f == 'dt': row.append(raw.strftime('%Y-%m-%d')) else: raise NotImplementedError('unsupported format %s' % f) data.append(sep.join(row)) res = '\n'.join(data) if path is not None: with open(path, 'w') as fl: fl.write(res) else: return res
352,470
Recursively renders child tokens. Joins the rendered strings with no space in between. If newlines / spaces are needed between tokens, add them in their respective templates, or override this function in the renderer subclass, so that whitespace won't seem to appear magically for anyone reading your program. Arguments: token: a branch node who has children attribute.
def render_inner(self, token): rendered = [self.render(child) for child in token.children] return ''.join(rendered)
353,218
This function determines, if a textx object is an instance of a textx class. Args: obj: the object to be analyzed obj_cls: the class to be checked Returns: True if obj is an instance of obj_cls.
def textx_isinstance(obj, obj_cls): if isinstance(obj, obj_cls): return True if hasattr(obj_cls, "_tx_fqn") and hasattr(obj, "_tx_fqn"): if obj_cls._tx_fqn == obj._tx_fqn: return True if hasattr(obj_cls, "_tx_inh_by"): for cls in obj_cls._tx_inh_by: if (textx_isinstance(obj, cls)): return True return False
353,496
get a list of the objects consisting of - obj - obj+"."+dot_separated_name - (obj+"."+dot_separated_name)+"."+dot_separated_name (called recursively) Note: lists are expanded Args: obj: the starting point dot_separated_name: "the search path" (applied recursively) lst: the initial list (e.g. []) Returns: the filled list (if one single object is requested, a list with one entry is returned).
def get_list_of_concatenated_objects(obj, dot_separated_name, lst=None): from textx.scoping import Postponed if lst is None: lst = [] if not obj: return lst if obj in lst: return lst lst.append(obj) if type(obj) is Postponed: return lst ret = get_referenced_object(None, obj, dot_separated_name) if type(ret) is list: for r in ret: lst = get_list_of_concatenated_objects(r, dot_separated_name, lst) else: lst = get_list_of_concatenated_objects(ret, dot_separated_name, lst) return lst
353,497
Same as get_referenced_object, but always returns a list. Args: prev_obj: see get_referenced_object obj: see get_referenced_object dot_separated_name: see get_referenced_object desired_type: see get_referenced_object Returns: same as get_referenced_object, but always returns a list
def get_referenced_object_as_list( prev_obj, obj, dot_separated_name, desired_type=None): res = get_referenced_object(prev_obj, obj, dot_separated_name, desired_type) if res is None: return [] elif type(res) is list: return res else: return [res]
353,501
retrieves a unique named object (no fully qualified name) Args: root: start of search (if root is a model all known models are searched as well) name: name of object Returns: the object (if not unique, raises an error)
def get_unique_named_object_in_all_models(root, name): if hasattr(root, '_tx_model_repository'): src = list( root._tx_model_repository.local_models.filename_to_model.values()) if root not in src: src.append(root) else: src = [root] a = [] for m in src: print("analyzing {}".format(m._tx_filename)) a = a + get_children( lambda x: hasattr(x, 'name') and x.name == name, m) assert len(a) == 1 return a[0]
353,502
retrieves a unique named object (no fully qualified name) Args: root: start of search name: name of object Returns: the object (if not unique, raises an error)
def get_unique_named_object(root, name): a = get_children(lambda x: hasattr(x, 'name') and x.name == name, root) assert len(a) == 1 return a[0]
353,503