positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def multipart_encode(params, boundary=None, cb=None): """Encode ``params`` as multipart/form-data. ``params`` should be a sequence of (name, value) pairs or MultipartParam objects, or a mapping of names to values. Values are either strings parameter values, or file-like objects to use as the parameter value. The file-like objects must support .read() and either .fileno() or both .seek() and .tell(). If ``boundary`` is set, then it as used as the MIME boundary. Otherwise a randomly generated boundary will be used. In either case, if the boundary string appears in the parameter values a ValueError will be raised. If ``cb`` is set, it should be a callback which will get called as blocks of data are encoded. It will be called with (param, current, total), indicating the current parameter being encoded, the current amount encoded, and the total amount to encode. Returns a tuple of `datagen`, `headers`, where `datagen` is a generator that will yield blocks of data that make up the encoded parameters, and `headers` is a dictionary with the assoicated Content-Type and Content-Length headers. Examples: >>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] ) >>> s = "".join(datagen) >>> assert "value2" in s and "value1" in s >>> p = MultipartParam("key", "value2") >>> datagen, headers = multipart_encode( [("key", "value1"), p] ) >>> s = "".join(datagen) >>> assert "value2" in s and "value1" in s >>> datagen, headers = multipart_encode( {"key": "value1"} ) >>> s = "".join(datagen) >>> assert "value2" not in s and "value1" in s """ if boundary is None: boundary = gen_boundary() else: boundary = urllib.quote_plus(boundary) headers = get_headers(params, boundary) params = MultipartParam.from_params(params) return MultipartYielder(params, boundary, cb), headers
Encode ``params`` as multipart/form-data. ``params`` should be a sequence of (name, value) pairs or MultipartParam objects, or a mapping of names to values. Values are either strings parameter values, or file-like objects to use as the parameter value. The file-like objects must support .read() and either .fileno() or both .seek() and .tell(). If ``boundary`` is set, then it as used as the MIME boundary. Otherwise a randomly generated boundary will be used. In either case, if the boundary string appears in the parameter values a ValueError will be raised. If ``cb`` is set, it should be a callback which will get called as blocks of data are encoded. It will be called with (param, current, total), indicating the current parameter being encoded, the current amount encoded, and the total amount to encode. Returns a tuple of `datagen`, `headers`, where `datagen` is a generator that will yield blocks of data that make up the encoded parameters, and `headers` is a dictionary with the assoicated Content-Type and Content-Length headers. Examples: >>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] ) >>> s = "".join(datagen) >>> assert "value2" in s and "value1" in s >>> p = MultipartParam("key", "value2") >>> datagen, headers = multipart_encode( [("key", "value1"), p] ) >>> s = "".join(datagen) >>> assert "value2" in s and "value1" in s >>> datagen, headers = multipart_encode( {"key": "value1"} ) >>> s = "".join(datagen) >>> assert "value2" not in s and "value1" in s
def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False): """Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore. """ for name, slot_key in already_defined.iteritems(): if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) slot = self._output_dict.get(name) if slot is None: if self._strict: raise UnexpectedPipelineError( 'Inherited output named "%s" must be filled but ' 'not declared for pipeline class "%s"' % (name, pipeline_name)) else: self._output_dict[name] = Slot(name=name, slot_key=slot_key) else: slot.key = slot_key slot._exists = True if resolve_outputs: slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues()) all_slots = db.get(slot_key_dict.keys()) for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots): if slot_record is None: raise UnexpectedPipelineError( 'Inherited output named "%s" for pipeline class "%s" is ' 'missing its Slot in the datastore: "%s"' % (slot.name, pipeline_name, slot.key)) slot = slot_key_dict[slot_record.key()] slot._set_value(slot_record)
Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore.
def create_instances(self, config_list): """Creates multiple virtual server instances. This takes a list of dictionaries using the same arguments as create_instance(). .. warning:: This will add charges to your account Example:: # Define the instance we want to create. new_vsi = { 'domain': u'test01.labs.sftlyr.ws', 'hostname': u'minion05', 'datacenter': u'hkg02', 'flavor': 'BL1_1X2X100' 'dedicated': False, 'private': False, 'os_code' : u'UBUNTU_LATEST', 'hourly': True, 'ssh_keys': [1234], 'disks': ('100','25'), 'local_disk': True, 'tags': 'test, pleaseCancel', 'public_security_groups': [12, 15] } # using .copy() so we can make changes to individual nodes instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()] # give each its own hostname, not required. instances[0]['hostname'] = "multi-test01" instances[1]['hostname'] = "multi-test02" instances[2]['hostname'] = "multi-test03" vsi = mgr.create_instances(config_list=instances) #vsi will be a dictionary of all the new virtual servers print vsi """ tags = [conf.pop('tags', None) for conf in config_list] resp = self.guest.createObjects([self._generate_create_dict(**kwargs) for kwargs in config_list]) for instance, tag in zip(resp, tags): if tag is not None: self.set_tags(tag, guest_id=instance['id']) return resp
Creates multiple virtual server instances. This takes a list of dictionaries using the same arguments as create_instance(). .. warning:: This will add charges to your account Example:: # Define the instance we want to create. new_vsi = { 'domain': u'test01.labs.sftlyr.ws', 'hostname': u'minion05', 'datacenter': u'hkg02', 'flavor': 'BL1_1X2X100' 'dedicated': False, 'private': False, 'os_code' : u'UBUNTU_LATEST', 'hourly': True, 'ssh_keys': [1234], 'disks': ('100','25'), 'local_disk': True, 'tags': 'test, pleaseCancel', 'public_security_groups': [12, 15] } # using .copy() so we can make changes to individual nodes instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()] # give each its own hostname, not required. instances[0]['hostname'] = "multi-test01" instances[1]['hostname'] = "multi-test02" instances[2]['hostname'] = "multi-test03" vsi = mgr.create_instances(config_list=instances) #vsi will be a dictionary of all the new virtual servers print vsi
def filter(self, names): """ Returns a list with the names matching the pattern. """ names = list_strings(names) fnames = [] for f in names: for pat in self.pats: if fnmatch.fnmatch(f, pat): fnames.append(f) return fnames
Returns a list with the names matching the pattern.
def _fulfills_version_spec(version, version_spec): ''' Check version number against version specification info and return a boolean value based on whether or not the version number meets the specified version. ''' for oper, spec in version_spec: if oper is None: continue if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp): return False return True
Check version number against version specification info and return a boolean value based on whether or not the version number meets the specified version.
def backendStatus(self, *args, **kwargs): """ Backend Status This endpoint is used to show when the last time the provisioner has checked in. A check in is done through the deadman's snitch api. It is done at the conclusion of a provisioning iteration and used to tell if the background provisioning process is still running. **Warning** this api end-point is **not stable**. This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#`` This method is ``experimental`` """ return self._makeApiCall(self.funcinfo["backendStatus"], *args, **kwargs)
Backend Status This endpoint is used to show when the last time the provisioner has checked in. A check in is done through the deadman's snitch api. It is done at the conclusion of a provisioning iteration and used to tell if the background provisioning process is still running. **Warning** this api end-point is **not stable**. This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#`` This method is ``experimental``
def parse_addr(addr, *, proto=None, host=None): """Parses an address Returns: Address: the parsed address """ port = None if isinstance(addr, Address): return addr elif isinstance(addr, str): if addr.startswith('http://'): proto, addr = 'http', addr[7:] if addr.startswith('udp://'): proto, addr = 'udp', addr[6:] elif addr.startswith('tcp://'): proto, addr = 'tcp', addr[6:] elif addr.startswith('unix://'): proto, addr = 'unix', addr[7:] a, _, b = addr.partition(':') host = a or host port = b or port elif isinstance(addr, (tuple, list)): # list is not good a, b = addr host = a or host port = b or port elif isinstance(addr, int): port = addr else: raise ValueError('bad value') if port is not None: port = int(port) return Address(proto, host, port)
Parses an address Returns: Address: the parsed address
def _read_mptcp_dss(self, bits, size, kind): """Read Data Sequence Signal (Data ACK and Data Sequence Mapping) option. Positional arguments: * bits - str, 4-bit data * size - int, length of option * kind - int, 30 (Multipath TCP) Returns: * dict -- extracted Data Sequence Signal (DSS) option Structure of DSS [RFC 6824]: 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +---------------+---------------+-------+----------------------+ | Kind | Length |Subtype| (reserved) |F|m|M|a|A| +---------------+---------------+-------+----------------------+ | Data ACK (4 or 8 octets, depending on flags) | +--------------------------------------------------------------+ | Data sequence number (4 or 8 octets, depending on flags) | +--------------------------------------------------------------+ | Subflow Sequence Number (4 octets) | +-------------------------------+------------------------------+ | Data-Level Length (2 octets) | Checksum (2 octets) | +-------------------------------+------------------------------+ 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +--------------------------------------------------------------+ | | | Data Sequence Number (8 octets) | | | +--------------------------------------------------------------+ | Subflow Sequence Number (4 octets) | +-------------------------------+------------------------------+ | Data-Level Length (2 octets) | Zeros (2 octets) | +-------------------------------+------------------------------+ Octets Bits Name Description 0 0 tcp.mp.kind Kind (30) 1 8 tcp.mp.length Length 2 16 tcp.mp.subtype Subtype (2) 2 20 - Reserved (must be zero) 3 27 tcp.mp.dss.flags.fin DATA_FIN (F) 3 28 tcp.mp.dss.flags.dsn_len DSN Length (m) 3 29 tcp.mp.dss.flags.data_pre DSN, SSN, Data-Level Length, CHKSUM Present (M) 3 30 tcp.mp.dss.flags.ack_len ACK Length (a) 3 31 tcp.mp.dss.flags.ack_pre Data ACK Present (A) 4 32 tcp.mp.dss.ack Data ACK (4/8 octets) 8-12 64-96 tcp.mp.dss.dsn DSN (4/8 octets) 12-20 48-160 tcp.mp.dss.ssn Subflow Sequence Number 16-24 128-192 tcp.mp.dss.dl_len Data-Level Length 18-26 144-208 tcp.mp.dss.checksum Checksum """ bits = self._read_binary(1) mflg = 8 if int(bits[4]) else 4 Mflg = True if int(bits[5]) else False aflg = 8 if int(bits[6]) else 4 Aflg = True if int(bits[7]) else False ack_ = self._read_fileng(aflg) if Aflg else None dsn_ = self._read_unpack(mflg) if Mflg else None ssn_ = self._read_unpack(4) if Mflg else None dll_ = self._read_unpack(2) if Mflg else None chk_ = self._read_fileng(2) if Mflg else None data = dict( kind=kind, length=size + 1, subtype='DSS', dss=dict( flags=dict( fin=True if int(bits[3]) else False, dsn_len=mflg, data_pre=Mflg, ack_len=aflg, ack_pre=Aflg, ), ack=ack_, dsn=dsn_, ssn=ssn_, dl_len=dll_, checksum=chk_, ), ) return data
Read Data Sequence Signal (Data ACK and Data Sequence Mapping) option. Positional arguments: * bits - str, 4-bit data * size - int, length of option * kind - int, 30 (Multipath TCP) Returns: * dict -- extracted Data Sequence Signal (DSS) option Structure of DSS [RFC 6824]: 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +---------------+---------------+-------+----------------------+ | Kind | Length |Subtype| (reserved) |F|m|M|a|A| +---------------+---------------+-------+----------------------+ | Data ACK (4 or 8 octets, depending on flags) | +--------------------------------------------------------------+ | Data sequence number (4 or 8 octets, depending on flags) | +--------------------------------------------------------------+ | Subflow Sequence Number (4 octets) | +-------------------------------+------------------------------+ | Data-Level Length (2 octets) | Checksum (2 octets) | +-------------------------------+------------------------------+ 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +--------------------------------------------------------------+ | | | Data Sequence Number (8 octets) | | | +--------------------------------------------------------------+ | Subflow Sequence Number (4 octets) | +-------------------------------+------------------------------+ | Data-Level Length (2 octets) | Zeros (2 octets) | +-------------------------------+------------------------------+ Octets Bits Name Description 0 0 tcp.mp.kind Kind (30) 1 8 tcp.mp.length Length 2 16 tcp.mp.subtype Subtype (2) 2 20 - Reserved (must be zero) 3 27 tcp.mp.dss.flags.fin DATA_FIN (F) 3 28 tcp.mp.dss.flags.dsn_len DSN Length (m) 3 29 tcp.mp.dss.flags.data_pre DSN, SSN, Data-Level Length, CHKSUM Present (M) 3 30 tcp.mp.dss.flags.ack_len ACK Length (a) 3 31 tcp.mp.dss.flags.ack_pre Data ACK Present (A) 4 32 tcp.mp.dss.ack Data ACK (4/8 octets) 8-12 64-96 tcp.mp.dss.dsn DSN (4/8 octets) 12-20 48-160 tcp.mp.dss.ssn Subflow Sequence Number 16-24 128-192 tcp.mp.dss.dl_len Data-Level Length 18-26 144-208 tcp.mp.dss.checksum Checksum
def set_static_ip_address(self, payload): """Set static ip address for a VM.""" # This request is received from CLI for setting ip address of an # instance. macaddr = payload.get('mac') ipaddr = payload.get('ip') # Find the entry associated with the mac in the database. req = dict(mac=macaddr) instances = self.get_vms_for_this_req(**req) for vm in instances: LOG.info('Updating IP address: %(ip)s %(mac)s.', {'ip': ipaddr, 'mac': macaddr}) # Send request to update the rule. try: rule_info = dict(ip=ipaddr, mac=macaddr, port=vm.port_id, status='up') self.neutron_event.update_ip_rule(str(vm.host), str(rule_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error("RPC error: Failed to update rules.") else: # Update the database. params = dict(columns=dict(ip=ipaddr)) self.update_vm_db(vm.port_id, **params) # Send update to the agent. vm_info = dict(status=vm.status, vm_mac=vm.mac, segmentation_id=vm.segmentation_id, host=vm.host, port_uuid=vm.port_id, net_uuid=vm.network_id, oui=dict(ip_addr=ipaddr, vm_name=vm.name, vm_uuid=vm.instance_id, gw_mac=vm.gw_mac, fwd_mod=vm.fwd_mod, oui_id='cisco')) try: self.neutron_event.send_vm_info(vm.host, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error('Failed to send VM info to agent.')
Set static ip address for a VM.
def libvlc_set_user_agent(p_instance, name, http): '''Sets the application name. LibVLC passes this as the user agent string when a protocol requires it. @param p_instance: LibVLC instance. @param name: human-readable application name, e.g. "FooBar player 1.2.3". @param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0". @version: LibVLC 1.1.1 or later. ''' f = _Cfunctions.get('libvlc_set_user_agent', None) or \ _Cfunction('libvlc_set_user_agent', ((1,), (1,), (1,),), None, None, Instance, ctypes.c_char_p, ctypes.c_char_p) return f(p_instance, name, http)
Sets the application name. LibVLC passes this as the user agent string when a protocol requires it. @param p_instance: LibVLC instance. @param name: human-readable application name, e.g. "FooBar player 1.2.3". @param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0". @version: LibVLC 1.1.1 or later.
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn): """Compute the CTC loss.""" del model_hparams, vocab_size # unused arg logits = top_out with tf.name_scope("ctc_loss", values=[logits, targets]): # For CTC we assume targets are 1d, [batch, length, 1, 1] here. targets_shape = targets.get_shape().as_list() assert len(targets_shape) == 4 assert targets_shape[2] == 1 assert targets_shape[3] == 1 targets = tf.squeeze(targets, axis=[2, 3]) logits = tf.squeeze(logits, axis=[2, 3]) targets_mask = 1 - tf.to_int32(tf.equal(targets, 0)) targets_lengths = tf.reduce_sum(targets_mask, axis=1) sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse( targets, targets_lengths) xent = tf.nn.ctc_loss( sparse_targets, logits, targets_lengths, time_major=False, preprocess_collapse_repeated=False, ctc_merge_repeated=False) weights = weight_fn(targets) return tf.reduce_sum(xent), tf.reduce_sum(weights)
Compute the CTC loss.
def _compare_replication(current, desired, region, key, keyid, profile): ''' Replication accepts a non-ARN role name, but always returns an ARN ''' if desired is not None and desired.get('Role'): desired = copy.deepcopy(desired) desired['Role'] = _get_role_arn(desired['Role'], region=region, key=key, keyid=keyid, profile=profile) return __utils__['boto3.json_objs_equal'](current, desired)
Replication accepts a non-ARN role name, but always returns an ARN
def transport_jsonrpc(self): """ Installs the JSON-RPC transport bundles and instantiates components """ # Install the bundle self.context.install_bundle("pelix.remote.json_rpc").start() with use_waiting_list(self.context) as ipopo: # Instantiate the discovery ipopo.add( rs.FACTORY_TRANSPORT_JSONRPC_EXPORTER, "pelix-jsonrpc-exporter" ) ipopo.add( rs.FACTORY_TRANSPORT_JSONRPC_IMPORTER, "pelix-jsonrpc-importer" )
Installs the JSON-RPC transport bundles and instantiates components
def list_tasks(self, app_id=None, **kwargs): """List running tasks, optionally filtered by app_id. :param str app_id: if passed, only show tasks for this application :param kwargs: arbitrary search filters :returns: list of tasks :rtype: list[:class:`marathon.models.task.MarathonTask`] """ response = self._do_request( 'GET', '/v2/apps/%s/tasks' % app_id if app_id else '/v2/tasks') tasks = self._parse_response( response, MarathonTask, is_list=True, resource_name='tasks') [setattr(t, 'app_id', app_id) for t in tasks if app_id and t.app_id is None] for k, v in kwargs.items(): tasks = [o for o in tasks if getattr(o, k) == v] return tasks
List running tasks, optionally filtered by app_id. :param str app_id: if passed, only show tasks for this application :param kwargs: arbitrary search filters :returns: list of tasks :rtype: list[:class:`marathon.models.task.MarathonTask`]
def dms2dd(degrees, minutes, seconds, direction): """convert degrees, minutes, seconds to dd :param string direction: one of N S W E """ dd = (degrees + minutes/60.0) + (seconds/3600.0) # 60.0 fraction for python 2+ compatibility return dd * -1 if direction == 'S' or direction == 'W' else dd
convert degrees, minutes, seconds to dd :param string direction: one of N S W E
def split_bgedge(self, bgedge, guidance=None, sorted_guidance=False, account_for_colors_multiplicity_in_guidance=True, key=None): """ Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance. Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method. :param bgedge: an edge to find most "similar to" among existing edges for a split :type bgedge: :class:`bg.edge.BGEdge` :param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split :type guidance: iterable where each entry is iterable with colors entries :param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors :type duplication_splitting: ``Boolean`` :param key: unique identifier of edge to be split :type key: any python object. ``int`` is expected :return: ``None``, performs inplace changes """ self.__split_bgedge(bgedge=bgedge, guidance=guidance, sorted_guidance=sorted_guidance, account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance, key=key)
Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance. Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method. :param bgedge: an edge to find most "similar to" among existing edges for a split :type bgedge: :class:`bg.edge.BGEdge` :param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split :type guidance: iterable where each entry is iterable with colors entries :param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors :type duplication_splitting: ``Boolean`` :param key: unique identifier of edge to be split :type key: any python object. ``int`` is expected :return: ``None``, performs inplace changes
def ignores_exc_tb(*args, **kwargs): """ PYTHON 2 ONLY VERSION -- needs to be in its own file for syntactic reasons ignore_exc_tb decorates a function and remove both itself and the function from any exception traceback that occurs. This is useful to decorate other trivial decorators which are polluting your stacktrace. if IGNORE_TRACEBACK is False then this decorator does nothing (and it should do nothing in production code!) References: https://github.com/jcrocholl/pep8/issues/34 # NOQA http://legacy.python.org/dev/peps/pep-3109/ """ outer_wrapper = kwargs.get('outer_wrapper', True) def ignores_exc_tb_closure(func): if not IGNORE_TRACEBACK: # if the global enforces that we should not ignore anytracebacks # then just return the original function without any modifcation return func from utool import util_decor #@wraps(func) def wrp_noexectb(*args, **kwargs): try: return func(*args, **kwargs) except Exception: # Define function to reraise with python 2 syntax #exc_type, exc_value, exc_traceback = sys.exc_info() # Code to remove this decorator from traceback # Remove two levels to remove this one as well exc_type, exc_value, exc_traceback = sys.exc_info() try: exc_traceback = exc_traceback.tb_next exc_traceback = exc_traceback.tb_next #exc_traceback = exc_traceback.tb_next except Exception: print('too many reraise') pass raise exc_type, exc_value, exc_traceback if outer_wrapper: wrp_noexectb = util_decor.preserve_sig(wrp_noexectb, func) return wrp_noexectb if len(args) == 1: # called with one arg means its a function call func = args[0] return ignores_exc_tb_closure(func) else: # called with no args means kwargs as specified return ignores_exc_tb_closure
PYTHON 2 ONLY VERSION -- needs to be in its own file for syntactic reasons ignore_exc_tb decorates a function and remove both itself and the function from any exception traceback that occurs. This is useful to decorate other trivial decorators which are polluting your stacktrace. if IGNORE_TRACEBACK is False then this decorator does nothing (and it should do nothing in production code!) References: https://github.com/jcrocholl/pep8/issues/34 # NOQA http://legacy.python.org/dev/peps/pep-3109/
def get_epoch_namespace_prices( block_height, units ): """ get the list of namespace prices by block height """ assert units in ['BTC', TOKEN_TYPE_STACKS], 'Invalid unit {}'.format(units) epoch_config = get_epoch_config( block_height ) if units == 'BTC': return epoch_config['namespace_prices'] else: return epoch_config['namespace_prices_stacks']
get the list of namespace prices by block height
def delling_network(): """ Architecture according to Duelling DQN: https://arxiv.org/abs/1511.06581 """ @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01)) def q_network(x): x /= 255 x = layers.conv2d(x, 32, 8, 4) x = layers.conv2d(x, 64, 4, 2) x = layers.conv2d(x, 64, 3, 1) x = layers.flatten(x) xv = layers.fully_connected(x, 512) val = layers.fully_connected(xv, 1, activation_fn=None) # val = tf.squeeze(val, 1) xa = layers.fully_connected(x, 512) adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None) q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True) q = tf.identity(q, name='Q') return q
Architecture according to Duelling DQN: https://arxiv.org/abs/1511.06581
def get_all_bundle_tasks(self, bundle_ids=None, filters=None): """ Retrieve current bundling tasks. If no bundle id is specified, all tasks are retrieved. :type bundle_ids: list :param bundle_ids: A list of strings containing identifiers for previously created bundling tasks. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. """ params = {} if bundle_ids: self.build_list_params(params, bundle_ids, 'BundleId') if filters: self.build_filter_params(params, filters) return self.get_list('DescribeBundleTasks', params, [('item', BundleInstanceTask)], verb='POST')
Retrieve current bundling tasks. If no bundle id is specified, all tasks are retrieved. :type bundle_ids: list :param bundle_ids: A list of strings containing identifiers for previously created bundling tasks. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details.
def _get_mixed_actions(labeling_bits, equation_tup, trans_recips): """ From a labeling for player 0, a tuple of hyperplane equations of the polar polytopes, and a tuple of the reciprocals of the translations, return a tuple of the corresponding, normalized mixed actions. Parameters ---------- labeling_bits : scalar(np.uint64) Integer with set bits representing a labeling of a mixed action of player 0. equation_tup : tuple(ndarray(float, ndim=1)) Tuple of hyperplane equations of the polar polytopes. trans_recips : tuple(scalar(float)) Tuple of the reciprocals of the translations. Returns ------- tuple(ndarray(float, ndim=1)) Tuple of mixed actions. """ m, n = equation_tup[0].shape[0] - 1, equation_tup[1].shape[0] - 1 out = np.empty(m+n) for pl, (start, stop, skip) in enumerate([(0, m, np.uint64(1)), (m, m+n, np.uint64(0))]): sum_ = 0. for i in range(start, stop): if (labeling_bits & np.uint64(1)) == skip: out[i] = 0 else: out[i] = equation_tup[pl][i-start] * trans_recips[pl] - \ equation_tup[pl][-1] sum_ += out[i] labeling_bits = labeling_bits >> np.uint64(1) if sum_ != 0: out[start:stop] /= sum_ return out[:m], out[m:]
From a labeling for player 0, a tuple of hyperplane equations of the polar polytopes, and a tuple of the reciprocals of the translations, return a tuple of the corresponding, normalized mixed actions. Parameters ---------- labeling_bits : scalar(np.uint64) Integer with set bits representing a labeling of a mixed action of player 0. equation_tup : tuple(ndarray(float, ndim=1)) Tuple of hyperplane equations of the polar polytopes. trans_recips : tuple(scalar(float)) Tuple of the reciprocals of the translations. Returns ------- tuple(ndarray(float, ndim=1)) Tuple of mixed actions.
def clear_further_steps(self): """Clear all further steps in order to properly calculate the prev step """ self.parent.step_kw_hazard_category.lstHazardCategories.clear() self.parent.step_kw_subcategory.lstSubcategories.clear() self.parent.step_kw_layermode.lstLayerModes.clear() self.parent.step_kw_unit.lstUnits.clear() self.parent.step_kw_field.lstFields.clear() self.parent.step_kw_classification.lstClassifications.clear() self.parent.step_kw_threshold.classes.clear() self.parent.step_kw_multi_classifications.clear() self.parent.step_kw_inasafe_fields.clear() self.parent.step_kw_default_inasafe_fields.clear() self.parent.step_kw_inasafe_raster_default_values.clear() self.parent.step_kw_fields_mapping.clear() self.parent.step_kw_multi_classifications.clear()
Clear all further steps in order to properly calculate the prev step
def add(self, path): """Add a path to the overlay filesytem. Any filesystem operation involving the this path or any sub-paths of it will be transparently redirected to temporary root dir. @path: An absolute path string. """ if not path.startswith(os.sep): raise ValueError("Non-absolute path '{}'".format(path)) path = path.rstrip(os.sep) while True: self._paths[path] = None path, _ = os.path.split(path) if path == os.sep: break
Add a path to the overlay filesytem. Any filesystem operation involving the this path or any sub-paths of it will be transparently redirected to temporary root dir. @path: An absolute path string.
def make_hmap(pmap, imtls, poes): """ Compute the hazard maps associated to the passed probability map. :param pmap: hazard curves in the form of a ProbabilityMap :param imtls: DictArray with M intensity measure types :param poes: P PoEs where to compute the maps :returns: a ProbabilityMap with size (N, M, P) """ M, P = len(imtls), len(poes) hmap = probability_map.ProbabilityMap.build(M, P, pmap, dtype=F32) if len(pmap) == 0: return hmap # empty hazard map for i, imt in enumerate(imtls): curves = numpy.array([pmap[sid].array[imtls(imt), 0] for sid in pmap.sids]) data = compute_hazard_maps(curves, imtls[imt], poes) # array (N, P) for sid, value in zip(pmap.sids, data): array = hmap[sid].array for j, val in enumerate(value): array[i, j] = val return hmap
Compute the hazard maps associated to the passed probability map. :param pmap: hazard curves in the form of a ProbabilityMap :param imtls: DictArray with M intensity measure types :param poes: P PoEs where to compute the maps :returns: a ProbabilityMap with size (N, M, P)
def format_page(text): """Format the text for output adding ASCII frame around the text. Args: text (str): Text that needs to be formatted. Returns: str: Formatted string. """ width = max(map(len, text.splitlines())) page = "+-" + "-" * width + "-+\n" for line in text.splitlines(): page += "| " + line.ljust(width) + " |\n" page += "+-" + "-" * width + "-+\n" return page
Format the text for output adding ASCII frame around the text. Args: text (str): Text that needs to be formatted. Returns: str: Formatted string.
def mission_request_partial_list_encode(self, target_system, target_component, start_index, end_index): ''' Request a partial list of mission items from the system/component. http://qgroundcontrol.org/mavlink/waypoint_protocol. If start and end index are the same, just send one waypoint. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start_index : Start index, 0 by default (int16_t) end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t) ''' return MAVLink_mission_request_partial_list_message(target_system, target_component, start_index, end_index)
Request a partial list of mission items from the system/component. http://qgroundcontrol.org/mavlink/waypoint_protocol. If start and end index are the same, just send one waypoint. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start_index : Start index, 0 by default (int16_t) end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t)
def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None): ''' Generate minion RSA public keypair ''' keydir, keyname, keysize, user = self._get_key_attrs(keydir, keyname, keysize, user) salt.crypt.gen_keys(keydir, keyname, keysize, user, self.passphrase) return salt.utils.crypt.pem_finger(os.path.join(keydir, keyname + '.pub'))
Generate minion RSA public keypair
def name(self, pretty=False): """ Return the name of the OS distribution, as a string. For details, see :func:`distro.name`. """ name = self.os_release_attr('name') \ or self.lsb_release_attr('distributor_id') \ or self.distro_release_attr('name') \ or self.uname_attr('name') if pretty: name = self.os_release_attr('pretty_name') \ or self.lsb_release_attr('description') if not name: name = self.distro_release_attr('name') \ or self.uname_attr('name') version = self.version(pretty=True) if version: name = name + ' ' + version return name or ''
Return the name of the OS distribution, as a string. For details, see :func:`distro.name`.
def set_implementation(self, impl): """ Sets the implementation of this module Parameters ---------- impl : str One of ["python", "c"] """ if impl.lower() == 'python': self.__impl__ = self.__IMPL_PYTHON__ elif impl.lower() == 'c': self.__impl__ = self.__IMPL_C__ else: import warnings warnings.warn('Implementation '+impl+' is not known. Using the fallback python implementation.') self.__impl__ = self.__IMPL_PYTHON__
Sets the implementation of this module Parameters ---------- impl : str One of ["python", "c"]
def _get_prefix_length(number1, number2, bits): """Get the number of leading bits that are same for two numbers. Args: number1: an integer. number2: another integer. bits: the maximum number of bits to compare. Returns: The number of leading bits that are the same for two numbers. """ for i in range(bits): if number1 >> i == number2 >> i: return bits - i return 0
Get the number of leading bits that are same for two numbers. Args: number1: an integer. number2: another integer. bits: the maximum number of bits to compare. Returns: The number of leading bits that are the same for two numbers.
def stream(identifier=None, priority=LOG_INFO, level_prefix=False): r"""Return a file object wrapping a stream to journal. Log messages written to this file as simple newline sepearted text strings are written to the journal. The file will be line buffered, so messages are actually sent after a newline character is written. >>> from systemd import journal >>> stream = journal.stream('myapp') # doctest: +SKIP >>> res = stream.write('message...\n') # doctest: +SKIP will produce the following message in the journal:: PRIORITY=7 SYSLOG_IDENTIFIER=myapp MESSAGE=message... If identifier is None, a suitable default based on sys.argv[0] will be used. This interface can be used conveniently with the print function: >>> from __future__ import print_function >>> stream = journal.stream() # doctest: +SKIP >>> print('message...', file=stream) # doctest: +SKIP priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`. level_prefix is a boolean. If true, kernel-style log priority level prefixes (such as '<1>') are interpreted. See sd-daemon(3) for more information. """ if identifier is None: if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == '-c': identifier = 'python' else: identifier = _sys.argv[0] fd = stream_fd(identifier, priority, level_prefix) return _os.fdopen(fd, 'w', 1)
r"""Return a file object wrapping a stream to journal. Log messages written to this file as simple newline sepearted text strings are written to the journal. The file will be line buffered, so messages are actually sent after a newline character is written. >>> from systemd import journal >>> stream = journal.stream('myapp') # doctest: +SKIP >>> res = stream.write('message...\n') # doctest: +SKIP will produce the following message in the journal:: PRIORITY=7 SYSLOG_IDENTIFIER=myapp MESSAGE=message... If identifier is None, a suitable default based on sys.argv[0] will be used. This interface can be used conveniently with the print function: >>> from __future__ import print_function >>> stream = journal.stream() # doctest: +SKIP >>> print('message...', file=stream) # doctest: +SKIP priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`. level_prefix is a boolean. If true, kernel-style log priority level prefixes (such as '<1>') are interpreted. See sd-daemon(3) for more information.
def p_operation_definition4(self, p): """ operation_definition : operation_type name selection_set """ p[0] = self.operation_cls(p[1])(selections=p[3], name=p[2])
operation_definition : operation_type name selection_set
def check_requirements(to_populate, prompts, helper=False): ''' Iterates through required values, checking to_populate for required values If a key in prompts is missing in to_populate and ``helper==True``, prompts the user using the values in to_populate. Otherwise, raises an error. Parameters ---------- to_populate : dict Data dictionary to fill. Prompts given to the user are taken from this dictionary. prompts : dict Keys and prompts to use when filling ``to_populate`` ''' for kw, prompt in prompts.items(): if helper: if kw not in to_populate: to_populate[kw] = click.prompt(prompt) else: msg = ( 'Required value "{}" not found. ' 'Use helper=True or the --helper ' 'flag for assistance.'.format(kw)) assert kw in to_populate, msg
Iterates through required values, checking to_populate for required values If a key in prompts is missing in to_populate and ``helper==True``, prompts the user using the values in to_populate. Otherwise, raises an error. Parameters ---------- to_populate : dict Data dictionary to fill. Prompts given to the user are taken from this dictionary. prompts : dict Keys and prompts to use when filling ``to_populate``
def is_topology(self, layers=None): ''' valid the topology ''' if layers is None: layers = self.layers layers_nodle = [] result = [] for i, layer in enumerate(layers): if layer.is_delete is False: layers_nodle.append(i) while True: flag_break = True layers_toremove = [] for layer1 in layers_nodle: flag_arrive = True for layer2 in layers[layer1].input: if layer2 in layers_nodle: flag_arrive = False if flag_arrive is True: for layer2 in layers[layer1].output: # Size is error if layers[layer2].set_size(layer1, layers[layer1].size) is False: return False layers_toremove.append(layer1) result.append(layer1) flag_break = False for layer in layers_toremove: layers_nodle.remove(layer) result.append('|') if flag_break: break # There is loop in graph || some layers can't to arrive if layers_nodle: return False return result
valid the topology
def blit_2x( self, console: tcod.console.Console, dest_x: int, dest_y: int, img_x: int = 0, img_y: int = 0, img_width: int = -1, img_height: int = -1, ) -> None: """Blit onto a Console with double resolution. Args: console (Console): Blit destination Console. dest_x (int): Console tile X position starting from the left at 0. dest_y (int): Console tile Y position starting from the top at 0. img_x (int): Left corner pixel of the Image to blit img_y (int): Top corner pixel of the Image to blit img_width (int): Width of the Image to blit. Use -1 for the full Image width. img_height (int): Height of the Image to blit. Use -1 for the full Image height. """ lib.TCOD_image_blit_2x( self.image_c, _console(console), dest_x, dest_y, img_x, img_y, img_width, img_height, )
Blit onto a Console with double resolution. Args: console (Console): Blit destination Console. dest_x (int): Console tile X position starting from the left at 0. dest_y (int): Console tile Y position starting from the top at 0. img_x (int): Left corner pixel of the Image to blit img_y (int): Top corner pixel of the Image to blit img_width (int): Width of the Image to blit. Use -1 for the full Image width. img_height (int): Height of the Image to blit. Use -1 for the full Image height.
def WriteOutput(title, locations, limit, f): """Write html to f for up to limit trips between locations. Args: title: String used in html title locations: list of (lat, lng) tuples limit: maximum number of queries in the html f: a file object """ output_prefix = """ <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>%(title)s</title> </head> <body> Random queries for %(title)s<p> This list of random queries should speed up important manual testing. Here are some things to check when looking at the results of a query. <ul> <li> Check the agency attribution under the trip results: <ul> <li> has correct name and spelling of the agency <li> opens a page with general information about the service </ul> <li> For each alternate trip check that each of these is reasonable: <ul> <li> the total time of the trip <li> the time for each leg. Bad data frequently results in a leg going a long way in a few minutes. <li> the icons and mode names (Tram, Bus, etc) are correct for each leg <li> the route names and headsigns are correctly formatted and not redundant. For a good example see <a href="https://developers.google.com/transit/gtfs/examples/display-to-users"> the screenshots in the Google Transit Feed Specification</a>. <li> the shape line on the map looks correct. Make sure the polyline does not zig-zag, loop, skip stops or jump far away unless the trip does the same thing. <li> the route is active on the day the trip planner returns </ul> </ul> If you find a problem be sure to save the URL. This file is generated randomly. <ol> """ % locals() output_suffix = """ </ol> </body> </html> """ % locals() f.write(transitfeed.EncodeUnicode(output_prefix)) for source, destination in zip(locations[0:limit], locations[1:limit + 1]): f.write(transitfeed.EncodeUnicode("<li>%s\n" % LatLngsToGoogleLink(source, destination))) f.write(transitfeed.EncodeUnicode(output_suffix))
Write html to f for up to limit trips between locations. Args: title: String used in html title locations: list of (lat, lng) tuples limit: maximum number of queries in the html f: a file object
async def chain(*sources): """Chain asynchronous sequences together, in the order they are given. Note: the sequences are not iterated until it is required, so if the operation is interrupted, the remaining sequences will be left untouched. """ for source in sources: async with streamcontext(source) as streamer: async for item in streamer: yield item
Chain asynchronous sequences together, in the order they are given. Note: the sequences are not iterated until it is required, so if the operation is interrupted, the remaining sequences will be left untouched.
def update(self, new_data: Dict[Text, Dict[Text, Text]]): """ Receive an update from a loader. :param new_data: New translation data from the loader """ for locale, data in new_data.items(): if locale not in self.dict: self.dict[locale] = {} self.dict[locale].update(data)
Receive an update from a loader. :param new_data: New translation data from the loader
def get_url_by_label(self, label, asset_content_type=None): """stub""" return self._get_asset_content(self.get_asset_id_by_label(label)).get_url()
stub
def _iop(self, operation, other, *allowed): """An iterative operation operating on multiple values. Consumes iterators to construct a concrete list at time of execution. """ f = self._field if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz). return reduce(self._combining, (q._iop(operation, other, *allowed) for q in f)) # pylint:disable=protected-access # Optimize this away in production; diagnosic aide. if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover raise NotImplementedError("{self!r} does not allow {op} comparison.".format( self=self, op=operation)) def _t(o): for value in o: yield None if value is None else f.transformer.foreign(value, (f, self._document)) other = other if len(other) > 1 else other[0] values = list(_t(other)) return Filter({self._name: {operation: values}})
An iterative operation operating on multiple values. Consumes iterators to construct a concrete list at time of execution.
def protocol(self): """The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str """ proto_id = self.lib.iperf_get_test_protocol_id(self._test) if proto_id == SOCK_STREAM: self._protocol = 'tcp' elif proto_id == SOCK_DGRAM: self._protocol = 'udp' return self._protocol
The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str
def c2ln(c,l1,l2,n): "char[n] to two unsigned long???" c = c + n l1, l2 = U32(0), U32(0) f = 0 if n == 8: l2 = l2 | (U32(c[7]) << 24) f = 1 if f or (n == 7): l2 = l2 | (U32(c[6]) << 16) f = 1 if f or (n == 6): l2 = l2 | (U32(c[5]) << 8) f = 1 if f or (n == 5): l2 = l2 | U32(c[4]) f = 1 if f or (n == 4): l1 = l1 | (U32(c[3]) << 24) f = 1 if f or (n == 3): l1 = l1 | (U32(c[2]) << 16) f = 1 if f or (n == 2): l1 = l1 | (U32(c[1]) << 8) f = 1 if f or (n == 1): l1 = l1 | U32(c[0]) return (l1, l2)
char[n] to two unsigned long???
def _add_dominance_relation(self, source, target): """add a dominance relation to this docgraph""" # TODO: fix #39, so we don't need to add nodes by hand self.add_node(target, layers={self.ns, self.ns+':unit'}) self.add_edge(source, target, layers={self.ns, self.ns+':discourse'}, edge_type=EdgeTypes.dominance_relation)
add a dominance relation to this docgraph
def parse_done(self, buf: memoryview) -> Tuple[bool, memoryview]: """Parse the continuation line sent by the client to end the ``IDLE`` command. Args: buf: The continuation line to parse. """ match = self._pattern.match(buf) if not match: raise NotParseable(buf) done = match.group(1).upper() == self.continuation buf = buf[match.end(0):] return done, buf
Parse the continuation line sent by the client to end the ``IDLE`` command. Args: buf: The continuation line to parse.
def auto_levels_cb(self, setting, value): """Handle callback related to changes in auto-cut levels.""" # Did we change the method? method = self.t_['autocut_method'] params = self.t_.get('autocut_params', []) params = dict(params) if method != str(self.autocuts): ac_class = AutoCuts.get_autocuts(method) self.autocuts = ac_class(self.logger, **params) else: self.autocuts.update_params(**params) # Redo the auto levels #if self.t_['autocuts'] != 'off': # NOTE: users seems to expect that when the auto cuts parameters # are changed that the cuts should be immediately recalculated self.auto_levels()
Handle callback related to changes in auto-cut levels.
def add_plugin(plugin, directory=None): """Adds the specified plugin. This returns False if it was already added.""" repo = require_repo(directory) plugins = get_value(repo, 'plugins', expect_type=dict) if plugin in plugins: return False plugins[plugin] = {} set_value(repo, 'plugins', plugins) return True
Adds the specified plugin. This returns False if it was already added.
def add_embed_field(self, **kwargs): """ set field of embed :keyword name: name of the field :keyword value: value of the field :keyword inline: (optional) whether or not this field should display inline """ self.fields.append({ 'name': kwargs.get('name'), 'value': kwargs.get('value'), 'inline': kwargs.get('inline', True) })
set field of embed :keyword name: name of the field :keyword value: value of the field :keyword inline: (optional) whether or not this field should display inline
def _set_adjustment_threshold(self, v, load=False): """ Setter method for adjustment_threshold, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/autobw_template/adjustment_threshold (container) If this variable is read-only (config: false) in the source YANG file, then _set_adjustment_threshold is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_adjustment_threshold() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=adjustment_threshold.adjustment_threshold, is_container='container', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set adjustment-threshold', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """adjustment_threshold must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=adjustment_threshold.adjustment_threshold, is_container='container', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set adjustment-threshold', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__adjustment_threshold = t if hasattr(self, '_set'): self._set()
Setter method for adjustment_threshold, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/autobw_template/adjustment_threshold (container) If this variable is read-only (config: false) in the source YANG file, then _set_adjustment_threshold is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_adjustment_threshold() directly.
def exit_sync(self): ''' Waiting for all threads to appear, then continue. ''' if self._scan_threads and self.current_module_handle not in [t.name for t in self._scan_threads]: raise RuntimeError('Thread name "%s" is not valid.') if self._scan_threads and self.current_module_handle not in self._curr_sync_threads: raise RuntimeError('Thread "%s" is not reading FIFO.') with self._sync_lock: self._curr_sync_threads.remove(self.current_module_handle) self._exit_sync_event.clear() while not self._exit_sync_event.wait(0.01): if self.abort_run.is_set(): break with self._sync_lock: if len(set(self._curr_sync_threads) & set([t.name for t in self._scan_threads if t.is_alive()])) == 0 or not self._scan_threads: self._exit_sync_event.set()
Waiting for all threads to appear, then continue.
def get_by_name(self, name, style_type = None): """Find style by it's descriptive name. :Returns: Returns found style of type :class:`ooxml.doc.Style`. """ for st in self.styles.values(): if st: if st.name == name: return st if style_type and not st: st = self.styles.get(self.default_styles[style_type], None) return st
Find style by it's descriptive name. :Returns: Returns found style of type :class:`ooxml.doc.Style`.
def POST(self, *args, **kwargs): """ POST request """ return self._handle_api(self.API_POST, args, kwargs)
POST request
def start(self): """Public method for initiating connectivity with the emby server.""" asyncio.ensure_future(self.register(), loop=self._event_loop) if self._own_loop: _LOGGER.info("Starting up our own event loop.") self._event_loop.run_forever() self._event_loop.close() _LOGGER.info("Connection shut down.")
Public method for initiating connectivity with the emby server.
def parse(self, data, extent): # type: (bytes, int) -> None ''' Parse the passed in data into a UDF NSR Volume Structure. Parameters: data - The data to parse. extent - The extent that this descriptor currently lives at. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure already initialized') (structure_type, self.standard_ident, structure_version, reserved_unused) = struct.unpack_from(self.FMT, data, 0) if structure_type != 0: raise pycdlibexception.PyCdlibInvalidISO('Invalid structure type') if self.standard_ident not in [b'NSR02', b'NSR03']: raise pycdlibexception.PyCdlibInvalidISO('Invalid standard identifier') if structure_version != 1: raise pycdlibexception.PyCdlibInvalidISO('Invalid structure version') self.orig_extent_loc = extent self._initialized = True
Parse the passed in data into a UDF NSR Volume Structure. Parameters: data - The data to parse. extent - The extent that this descriptor currently lives at. Returns: Nothing.
def derivative(f, t): """Fourth-order finite-differencing with non-uniform time steps The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a fourth-order formula -- though that's a squishy concept with non-uniform time steps. TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas. """ dfdt = np.empty_like(f) if (f.ndim == 1): _derivative(f, t, dfdt) elif (f.ndim == 2): _derivative_2d(f, t, dfdt) elif (f.ndim == 3): _derivative_3d(f, t, dfdt) else: raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim)) return dfdt
Fourth-order finite-differencing with non-uniform time steps The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a fourth-order formula -- though that's a squishy concept with non-uniform time steps. TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
def send(self, data, opcode=ABNF.OPCODE_TEXT): """ send message. data: message to send. If you set opcode to OPCODE_TEXT, data must be utf-8 string or unicode. opcode: operation code of data. default is OPCODE_TEXT. """ if not self.sock or self.sock.send(data, opcode) == 0: raise WebSocketConnectionClosedException( "Connection is already closed.")
send message. data: message to send. If you set opcode to OPCODE_TEXT, data must be utf-8 string or unicode. opcode: operation code of data. default is OPCODE_TEXT.
def _from_dict(cls, _dict): """Initialize a Word object from a json dictionary.""" args = {} if 'word' in _dict: args['word'] = _dict.get('word') else: raise ValueError( 'Required property \'word\' not present in Word JSON') if 'sounds_like' in _dict: args['sounds_like'] = _dict.get('sounds_like') else: raise ValueError( 'Required property \'sounds_like\' not present in Word JSON') if 'display_as' in _dict: args['display_as'] = _dict.get('display_as') else: raise ValueError( 'Required property \'display_as\' not present in Word JSON') if 'count' in _dict: args['count'] = _dict.get('count') else: raise ValueError( 'Required property \'count\' not present in Word JSON') if 'source' in _dict: args['source'] = _dict.get('source') else: raise ValueError( 'Required property \'source\' not present in Word JSON') if 'error' in _dict: args['error'] = [ WordError._from_dict(x) for x in (_dict.get('error')) ] return cls(**args)
Initialize a Word object from a json dictionary.
def vn_info(call=None, kwargs=None): ''' Retrieves information for the virtual network. .. versionadded:: 2016.3.0 name The name of the virtual network for which to gather information. Can be used instead of ``vn_id``. vn_id The ID of the virtual network for which to gather information. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f vn_info opennebula vn_id=3 salt-cloud --function vn_info opennebula name=public ''' if call != 'function': raise SaltCloudSystemExit( 'The vn_info function must be called with -f or --function.' ) if kwargs is None: kwargs = {} name = kwargs.get('name', None) vn_id = kwargs.get('vn_id', None) if vn_id: if name: log.warning( 'Both the \'vn_id\' and \'name\' arguments were provided. ' '\'vn_id\' will take precedence.' ) elif name: vn_id = get_vn_id(kwargs={'name': name}) else: raise SaltCloudSystemExit( 'The vn_info function requires either a \'name\' or a \'vn_id\' ' 'to be provided.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) response = server.one.vn.info(auth, int(vn_id)) if response[0] is False: return response[1] else: info = {} tree = _get_xml(response[1]) info[tree.find('NAME').text] = _xml_to_dict(tree) return info
Retrieves information for the virtual network. .. versionadded:: 2016.3.0 name The name of the virtual network for which to gather information. Can be used instead of ``vn_id``. vn_id The ID of the virtual network for which to gather information. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f vn_info opennebula vn_id=3 salt-cloud --function vn_info opennebula name=public
def save_filelist(self, opFile, opFormat, delim=',', qu='"'): """ uses a List of files and collects meta data on them and saves to an text file as a list or with metadata depending on opFormat. """ op_folder = os.path.dirname(opFile) if op_folder is not None: # short filename passed if not os.path.exists(op_folder): os.makedirs(op_folder) with open(opFile,'w') as fout: fout.write("fullFilename" + delim) for colHeading in opFormat: fout.write(colHeading + delim) fout.write('\n') for f in self.filelist: line = qu + f + qu + delim try: for fld in opFormat: if fld == "name": line = line + qu + os.path.basename(f) + qu + delim if fld == "date": line = line + qu + self.GetDateAsString(f) + qu + delim if fld == "size": line = line + qu + str(os.path.getsize(f)) + qu + delim if fld == "path": line = line + qu + os.path.dirname(f) + qu + delim except IOError: line += '\n' # no metadata try: fout.write (str(line.encode('ascii', 'ignore').decode('utf-8'))) fout.write ('\n') except IOError: #print("Cant print line - cls_filelist line 304") pass
uses a List of files and collects meta data on them and saves to an text file as a list or with metadata depending on opFormat.
def resource_types(self): """resource types used by the collection.""" rtypes = set() for p in self.policies: rtypes.add(p.resource_type) return rtypes
resource types used by the collection.
def _synthesize_single_python_helper( self, text, voice_code, output_file_path=None, return_audio_data=True ): """ This is an helper function to synthesize a single text fragment via a Python call. If ``output_file_path`` is ``None``, the audio data will not persist to file at the end of the method. :rtype: tuple (result, (duration, sample_rate, encoding, data)) """ # return zero if text is the empty string if len(text) == 0: # # NOTE values of sample_rate, encoding, data # do not matter if the duration is 0.000, # so set them to None instead of the more precise: # return (True, (TimeValue("0.000"), 16000, "pcm_s16le", numpy.array([]))) # self.log(u"len(text) is zero: returning 0.000") return (True, (TimeValue("0.000"), None, None, None)) # # NOTE in this example, we assume that the Speect voice data files # are located in the same directory of this .py source file # and that the voice JSON file is called "voice.json" # # NOTE the voice_code value is ignored in this example, # since we have only one TTS voice, # but in general one might select a voice file to load, # depending on voice_code; # in fact, we could have created the ``voice`` object # only once, in the constructor, instead of creating it # each time this function is invoked, # achieving slightly faster synthesis # voice_json_path = gf.safe_str(gf.absolute_path("voice.json", __file__)) voice = speect.SVoice(voice_json_path) utt = voice.synth(text) audio = utt.features["audio"] if output_file_path is None: self.log(u"output_file_path is None => not saving to file") else: self.log(u"output_file_path is not None => saving to file...") # NOTE apparently, save_riff needs the path to be a byte string audio.save_riff(gf.safe_str(output_file_path)) self.log(u"output_file_path is not None => saving to file... done") # return immediately if returning audio data is not needed if not return_audio_data: self.log(u"return_audio_data is True => return immediately") return (True, None) # get length and data using speect Python API self.log(u"return_audio_data is True => read and return audio data") waveform = audio.get_audio_waveform() audio_sample_rate = int(waveform["samplerate"]) audio_length = TimeValue(audio.num_samples() / audio_sample_rate) audio_format = "pcm16" audio_samples = numpy.fromstring( waveform["samples"], dtype=numpy.int16 ).astype("float64") / 32768 return (True, ( audio_length, audio_sample_rate, audio_format, audio_samples ))
This is an helper function to synthesize a single text fragment via a Python call. If ``output_file_path`` is ``None``, the audio data will not persist to file at the end of the method. :rtype: tuple (result, (duration, sample_rate, encoding, data))
def quantile(x, q, interpolation_method='fraction'): """ Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """ x = np.asarray(x) mask = isna(x) x = x[~mask] values = np.sort(x) def _interpolate(a, b, fraction): """Returns the point at the given fraction between a and b, where 'fraction' must be between 0 and 1. """ return a + (b - a) * fraction def _get_score(at): if len(values) == 0: return np.nan idx = at * (len(values) - 1) if idx % 1 == 0: score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[np.floor(idx)] elif interpolation_method == 'higher': score = values[np.ceil(idx)] else: raise ValueError("interpolation_method can only be 'fraction' " ", 'lower' or 'higher'") return score if is_scalar(q): return _get_score(q) else: q = np.asarray(q, np.float64) return algos.arrmap_float64(q, _get_score)
Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5
def from_two_bytes(bytes): """ Return an integer from two 7 bit bytes. """ lsb, msb = bytes try: # Usually bytes have been converted to integers with ord already return msb << 7 | lsb except TypeError: # But add this for easy testing # One of them can be a string, or both try: lsb = ord(lsb) except TypeError: pass try: msb = ord(msb) except TypeError: pass return msb << 7 | lsb
Return an integer from two 7 bit bytes.
def list(self, **kwargs): """ https://api.slack.com/methods/groups.list """ if kwargs: self.params.update(kwargs) return FromUrl('https://slack.com/api/groups.list', self._requests)(data=self.params).get()
https://api.slack.com/methods/groups.list
def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos): ''' Searches for the next location where a valid block could reside and constructs the block object representing that location. ''' for row_index in range(len(table)): if row_index < start_pos[0] or row_index > end_pos[0]: continue convRow = table[row_index] used_row = used_cells[row_index] for column_index, conv in enumerate(convRow): if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]): continue # Is non empty cell? if not is_empty_cell(conv): block_start, block_end = self._find_block_bounds(table, used_cells, (row_index, column_index), start_pos, end_pos) if (block_end[0] > block_start[0] and block_end[1] > block_start[1]): try: return TableBlock(table, used_cells, block_start, block_end, worksheet, flags, units, self.assume_complete_blocks, self.max_title_rows) except InvalidBlockError: pass # Prevent infinite loops if something goes wrong used_cells[row_index][column_index] = True
Searches for the next location where a valid block could reside and constructs the block object representing that location.
def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams, train): """LSTM seq2seq model with attention, main step used for training.""" with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"): inputs_length = common_layers.length_from_embedding(inputs) # Flatten inputs. inputs = common_layers.flatten4d3d(inputs) # LSTM encoder. encoder_outputs, final_encoder_state = lstm_bid_encoder( inputs, inputs_length, hparams, train, "encoder") # LSTM decoder with attention shifted_targets = common_layers.shift_right(targets) # Add 1 to account for the padding added to the left from shift_right targets_length = common_layers.length_from_embedding(shifted_targets) + 1 hparams_decoder = copy.copy(hparams) hparams_decoder.hidden_size = 2 * hparams.hidden_size decoder_outputs = lstm_attention_decoder( common_layers.flatten4d3d(shifted_targets), hparams_decoder, train, "decoder", final_encoder_state, encoder_outputs, inputs_length, targets_length) return tf.expand_dims(decoder_outputs, axis=2)
LSTM seq2seq model with attention, main step used for training.
def b58encode(b, errors='strict'): "Encode bytes to a base58-encoded string." len_ = len(b) # Convert big-endian bytes to integer n = BigInteger.deserialize(BytesIO(b), len_) # Divide that integer into base58 res = [] while n > 0: n, r = divmod (n, 58) res.append(b58digits[r]) res = ''.join(res[::-1]) # Encode leading zeros as base58 zeros pad = 0 for c in b: if c == six.int2byte(0): pad += 1 else: break return (b58digits[0] * pad + res, len_)
Encode bytes to a base58-encoded string.
def _determine_heterogen_chain_type(residue_types): '''We distinguish three types of heterogen chain: i) all solution; ii) all ligand; or iii) other (a mix of solution, ligand, and/or ions). residue_types should be a Set of sequence identifers e.g. GTP, ZN, HOH. ''' residue_type_id_lengths = set(map(len, residue_types)) if (len(residue_types) > 0): if len(residue_types.difference(common_solution_ids)) == 0: return 'Solution' elif (len(residue_type_id_lengths) == 1) and (3 in residue_type_id_lengths) and (len(residue_types.difference(common_solution_ids)) > 0): # The last expression discounts chains which only contain solution molecules e.g. HOH return 'Ligand' return 'Heterogen'
We distinguish three types of heterogen chain: i) all solution; ii) all ligand; or iii) other (a mix of solution, ligand, and/or ions). residue_types should be a Set of sequence identifers e.g. GTP, ZN, HOH.
def get_user_profile_photos(self, user_id, offset=None, limit=None): """ Retrieves the user profile photos of the person with 'user_id' See https://core.telegram.org/bots/api#getuserprofilephotos :param user_id: :param offset: :param limit: :return: API reply. """ result = apihelper.get_user_profile_photos(self.token, user_id, offset, limit) return types.UserProfilePhotos.de_json(result)
Retrieves the user profile photos of the person with 'user_id' See https://core.telegram.org/bots/api#getuserprofilephotos :param user_id: :param offset: :param limit: :return: API reply.
def _build_request(self, method, url, params=None): """Build a function to do an API request "We have to go deeper" or "It's functions all the way down!" """ full_params = self._get_base_params() if params is not None: full_params.update(params) try: request_func = lambda u, d: \ getattr(self._connector, method.lower())(u, params=d, headers=self._request_headers) except AttributeError: raise ApiException('Invalid request method') # TODO: need to catch a network here and raise as ApiNetworkException def do_request(): logger.debug('Sending %s request "%s" with params: %r', method, url, full_params) try: resp = request_func(url, full_params) logger.debug('Received response code: %d', resp.status_code) except requests.RequestException as err: raise ApiNetworkException(err) try: resp_json = resp.json() except TypeError: resp_json = resp.json method_returns_list = False try: resp_json['error'] except TypeError: logger.warn('Api method did not return map: %s', method) method_returns_list = True except KeyError: logger.warn('Api method did not return map with error key: %s', method) if method_returns_list is None: raise ApiBadResponseException(resp.content) elif method_returns_list: data = resp_json else: try: if resp_json['error']: raise ApiError('%s: %s' % (resp_json['code'], resp_json['message'])) except KeyError: data = resp_json else: data = resp_json['data'] self._do_post_request_tasks(data) self._last_response = resp return data return do_request
Build a function to do an API request "We have to go deeper" or "It's functions all the way down!"
def doc_open(): """Build the HTML docs and open them in a web browser.""" doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html') if sys.platform == 'darwin': # Mac OS X subprocess.check_call(['open', doc_index]) elif sys.platform == 'win32': # Windows subprocess.check_call(['start', doc_index], shell=True) elif sys.platform == 'linux2': # All freedesktop-compatible desktops subprocess.check_call(['xdg-open', doc_index]) else: print_failure_message( "Unsupported platform. Please open `{0}' manually.".format( doc_index))
Build the HTML docs and open them in a web browser.
def lyap_e_len(**kwargs): """ Helper function that calculates the minimum number of data points required to use lyap_e. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb and min_tsep) Returns: minimum number of data points required to call lyap_e with the given parameters """ m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1) # minimum length required to find single orbit vector min_len = kwargs['emb_dim'] # we need to follow each starting point of an orbit vector for m more steps min_len += m # we need min_tsep * 2 + 1 orbit vectors to find neighbors for each min_len += kwargs['min_tsep'] * 2 # we need at least min_nb neighbors for each orbit vector min_len += kwargs['min_nb'] return min_len
Helper function that calculates the minimum number of data points required to use lyap_e. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb and min_tsep) Returns: minimum number of data points required to call lyap_e with the given parameters
def readlinkabs(l): """ Return an absolute path for the destination of a symlink """ assert (os.path.islink(l)) p = os.readlink(l) if os.path.isabs(p): return os.path.abspath(p) return os.path.abspath(os.path.join(os.path.dirname(l), p))
Return an absolute path for the destination of a symlink
def login(self, login, password, url=None): """login page """ auth = self._auth(login, password) cherrypy.session['isadmin'] = auth['isadmin'] cherrypy.session['connected'] = auth['connected'] if auth['connected']: if auth['isadmin']: message = \ "login success for user '%(user)s' as administrator" % { 'user': login } else: message = \ "login success for user '%(user)s' as normal user" % { 'user': login } cherrypy.log.error( msg=message, severity=logging.INFO ) cherrypy.session[SESSION_KEY] = cherrypy.request.login = login if url is None: redirect = "/" else: redirect = url raise cherrypy.HTTPRedirect(redirect) else: message = "login failed for user '%(user)s'" % { 'user': login } cherrypy.log.error( msg=message, severity=logging.WARNING ) if url is None: qs = '' else: qs = '?url=' + quote_plus(url) raise cherrypy.HTTPRedirect("/signin" + qs)
login page
def querytime(self, value): """ Sets self._querytime as well as self.query.querytime. :param value: None or datetime :return: """ self._querytime = value self.query.querytime = value
Sets self._querytime as well as self.query.querytime. :param value: None or datetime :return:
def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs): """ Voxelize a mesh in the region of a cube around a point. When fill=True, uses proximity.contains to fill the resulting voxels so may be meaningless for non-watertight meshes. Useful to reduce memory cost for small values of pitch as opposed to global voxelization. Parameters ----------- mesh : trimesh.Trimesh Source geometry point : (3, ) float Point in space to voxelize around pitch : float Side length of a single voxel cube radius : int Number of voxel cubes to return in each direction. kwargs : parameters to pass to voxelize_subdivide Returns ----------- voxels : (m, m, m) bool Array of local voxels where m=2*radius+1 origin_position : (3,) float Position of the voxel grid origin in space """ from scipy import ndimage # make sure point is correct type/shape point = np.asanyarray(point, dtype=np.float64).reshape(3) # this is a gotcha- radius sounds a lot like it should be in # float model space, not int voxel space so check if not isinstance(radius, int): raise ValueError('radius needs to be an integer number of cubes!') # Bounds of region bounds = np.concatenate((point - (radius + 0.5) * pitch, point + (radius + 0.5) * pitch)) # faces that intersect axis aligned bounding box faces = list(mesh.triangles_tree.intersection(bounds)) # didn't hit anything so exit if len(faces) == 0: return np.array([], dtype=np.bool), np.zeros(3) local = mesh.submesh([[f] for f in faces], append=True) # Translate mesh so point is at 0,0,0 local.apply_translation(-point) sparse, origin = voxelize_subdivide(local, pitch, **kwargs) matrix = sparse_to_matrix(sparse) # Find voxel index for point center = np.round(-origin / pitch).astype(np.int64) # pad matrix if necessary prepad = np.maximum(radius - center, 0) postpad = np.maximum(center + radius + 1 - matrix.shape, 0) matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1), mode='constant') center += prepad # Extract voxels within the bounding box voxels = matrix[center[0] - radius:center[0] + radius + 1, center[1] - radius:center[1] + radius + 1, center[2] - radius:center[2] + radius + 1] local_origin = point - radius * pitch # origin of local voxels # Fill internal regions if fill: regions, n = ndimage.measurements.label(~voxels) distance = ndimage.morphology.distance_transform_cdt(~voxels) representatives = [np.unravel_index((distance * (regions == i)).argmax(), distance.shape) for i in range(1, n + 1)] contains = mesh.contains( np.asarray(representatives) * pitch + local_origin) where = np.where(contains)[0] + 1 # use in1d vs isin for older numpy versions internal = np.in1d(regions.flatten(), where).reshape(regions.shape) voxels = np.logical_or(voxels, internal) return voxels, local_origin
Voxelize a mesh in the region of a cube around a point. When fill=True, uses proximity.contains to fill the resulting voxels so may be meaningless for non-watertight meshes. Useful to reduce memory cost for small values of pitch as opposed to global voxelization. Parameters ----------- mesh : trimesh.Trimesh Source geometry point : (3, ) float Point in space to voxelize around pitch : float Side length of a single voxel cube radius : int Number of voxel cubes to return in each direction. kwargs : parameters to pass to voxelize_subdivide Returns ----------- voxels : (m, m, m) bool Array of local voxels where m=2*radius+1 origin_position : (3,) float Position of the voxel grid origin in space
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]: """Get jobs from a queue.""" jobs_json_string = self._run_script( self._get_jobs_from_queue, self._to_namespaced(queue), self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)), JobStatus.RUNNING.value, max_jobs ) jobs = json.loads(jobs_json_string.decode()) jobs = [Job.deserialize(job) for job in jobs] return jobs
Get jobs from a queue.
def get_macs(vm_): ''' Return a list off MAC addresses from the named vm CLI Example: .. code-block:: bash salt '*' virt.get_macs <vm name> ''' macs = [] nics = get_nics(vm_) if nics is None: return None for nic in nics: macs.append(nic) return macs
Return a list off MAC addresses from the named vm CLI Example: .. code-block:: bash salt '*' virt.get_macs <vm name>
def count(self, with_limit_and_skip=False): """Get the size of the results set for this query. Returns the number of documents in the results set for this query. Does not take :meth:`limit` and :meth:`skip` into account by default - set `with_limit_and_skip` to ``True`` if that is the desired behavior. Raises :class:`~pymongo.errors.OperationFailure` on a database error. When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint` applied to the query. In the following example the hint is passed to the count command: collection.find({'field': 'value'}).hint('field_1').count() The :meth:`count` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `with_limit_and_skip` (optional): take any :meth:`limit` or :meth:`skip` that has been applied to this cursor into account when getting the count .. note:: The `with_limit_and_skip` parameter requires server version **>= 1.1.4-** .. versionchanged:: 2.8 The :meth:`~count` method now supports :meth:`~hint`. """ validate_boolean("with_limit_and_skip", with_limit_and_skip) cmd = SON([("count", self.__collection.name), ("query", self.__spec)]) if self.__max_time_ms is not None: cmd["maxTimeMS"] = self.__max_time_ms if self.__comment: cmd["$comment"] = self.__comment if self.__hint is not None: cmd["hint"] = self.__hint if with_limit_and_skip: if self.__limit: cmd["limit"] = self.__limit if self.__skip: cmd["skip"] = self.__skip return self.__collection._count(cmd, self.__collation)
Get the size of the results set for this query. Returns the number of documents in the results set for this query. Does not take :meth:`limit` and :meth:`skip` into account by default - set `with_limit_and_skip` to ``True`` if that is the desired behavior. Raises :class:`~pymongo.errors.OperationFailure` on a database error. When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint` applied to the query. In the following example the hint is passed to the count command: collection.find({'field': 'value'}).hint('field_1').count() The :meth:`count` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `with_limit_and_skip` (optional): take any :meth:`limit` or :meth:`skip` that has been applied to this cursor into account when getting the count .. note:: The `with_limit_and_skip` parameter requires server version **>= 1.1.4-** .. versionchanged:: 2.8 The :meth:`~count` method now supports :meth:`~hint`.
def update_instance_extent(self, instance, module, operation): """Updates a new instance that was added to a module to be complete if the end token is present in any remaining, overlapping operations. """ #Essentially, we want to look in the rest of the statements that are #part of the current operation to see how many more of them pertain #to the new instance that was added. #New signatures only result in instances being added if mode is "insert" #or "replace". In both cases, the important code is in the buffered #statements, *not* the cached version. Iterate the remaining statements #in the buffer and look for the end_token for the instance. If we don't #find it, check for overlap between the operations' index specifiers. instance.end -= operation.curlength end_token = instance.end_token (ibuffer, length) = self._find_end_token(end_token, operation) cum_length = length opstack = [operation] while ibuffer is None and opstack[-1].index + 1 < len(self._operations): #We didn't find a natural termination to the new instance. Look for #overlap in the operations noperation = self._operations[opstack[-1].index + 1] #We only want to check the next operation if it is a neighbor #in line numbers in the buffer. if noperation.ibuffer[0] - opstack[-1].ibuffer[1] == 1: (ibuffer, length) = self._find_end_token(end_token, noperation) cum_length += length opstack.append(noperation) else: break if ibuffer is not None: instance.incomplete = False instance.end += cum_length for op in opstack: op.bar_extent = True op.set_element(instance) else: #We set the element for the current operation to be the new instance #for the rest of statements in its set. operation.set_element(instance)
Updates a new instance that was added to a module to be complete if the end token is present in any remaining, overlapping operations.
def temporal_firing_rate(self,time_dimension=0,resolution=1.0,units=None, min_t=None,max_t=None,weight_function=None,normalize_time=False, normalize_n=False,start_units_with_0=True,cell_dimension='N'): """ Outputs a time histogram of spikes. `bins`: number of bins (default is 1ms bins from 0 to t_max) `weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)] `normalize_time` `normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension) Generally does not make sense when using a weight_function other than 'count'. `start_units_with_0`: starts indizes from 0 instead from the actual index """ units = self._default_units(units) if self.data_format == 'spike_times': converted_dimension,st = self.spike_times.get_converted(0,units) if min_t is None: min_t = converted_dimension.min if max_t is None: max_t = converted_dimension.max st = st[(st>=min_t)*(st<max_t)] bins = converted_dimension.linspace_by_resolution(resolution,end_at_end=True,extra_bins=0) H,edg = np.histogram(st,bins=bins) if normalize_time: H = H/(convert_time(resolution,from_units=units,to_units='s')) # make it Hertz if normalize_n: H = H/(len(np.unique(self.spike_times[cell_dimension]))) return H,edg
Outputs a time histogram of spikes. `bins`: number of bins (default is 1ms bins from 0 to t_max) `weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)] `normalize_time` `normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension) Generally does not make sense when using a weight_function other than 'count'. `start_units_with_0`: starts indizes from 0 instead from the actual index
def start(): ''' Start simple_server() ''' from wsgiref.simple_server import make_server # When started outside of salt-api __opts__ will not be injected if '__opts__' not in globals(): globals()['__opts__'] = get_opts() if __virtual__() is False: raise SystemExit(1) mod_opts = __opts__.get(__virtualname__, {}) # pylint: disable=C0103 httpd = make_server('localhost', mod_opts['port'], application) try: httpd.serve_forever() except KeyboardInterrupt: raise SystemExit(0)
Start simple_server()
def get_user_metadata( self, bucket: str, key: str ) -> typing.Dict[str, str]: """ Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or suffixes for the metadata keys, they should be stripped before being returned. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: a dictionary mapping metadata keys to metadata values. """ try: response = self.get_all_metadata(bucket, key) metadata = response['Metadata'].copy() response = self.s3_client.get_object_tagging( Bucket=bucket, Key=key, ) for tag in response['TagSet']: key, value = tag['Key'], tag['Value'] metadata[key] = value return metadata except botocore.exceptions.ClientError as ex: if str(ex.response['Error']['Code']) == \ str(requests.codes.not_found): raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or suffixes for the metadata keys, they should be stripped before being returned. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: a dictionary mapping metadata keys to metadata values.
def StartFlow(client_id=None, cpu_limit=None, creator=None, flow_args=None, flow_cls=None, network_bytes_limit=None, original_flow=None, output_plugins=None, start_at=None, parent_flow_obj=None, parent_hunt_id=None, **kwargs): """The main factory function for creating and executing a new flow. Args: client_id: ID of the client this flow should run on. cpu_limit: CPU limit in seconds for this flow. creator: Username that requested this flow. flow_args: An arg protocol buffer which is an instance of the required flow's args_type class attribute. flow_cls: Class of the flow that should be started. network_bytes_limit: Limit on the network traffic this flow can generated. original_flow: A FlowReference object in case this flow was copied from another flow. output_plugins: An OutputPluginDescriptor object indicating what output plugins should be used for this flow. start_at: If specified, flow will be started not immediately, but at a given time. parent_flow_obj: A parent flow object. None if this is a top level flow. parent_hunt_id: String identifying parent hunt. Can't be passed together with parent_flow_obj. **kwargs: If args or runner_args are not specified, we construct these protobufs from these keywords. Returns: the flow id of the new flow. Raises: ValueError: Unknown or invalid parameters were provided. """ if parent_flow_obj is not None and parent_hunt_id is not None: raise ValueError( "parent_flow_obj and parent_hunt_id are mutually exclusive.") # Is the required flow a known flow? try: registry.FlowRegistry.FlowClassByName(flow_cls.__name__) except ValueError: stats_collector_instance.Get().IncrementCounter( "grr_flow_invalid_flow_count") raise ValueError("Unable to locate flow %s" % flow_cls.__name__) if not client_id: raise ValueError("Client_id is needed to start a flow.") # Now parse the flow args into the new object from the keywords. if flow_args is None: flow_args = flow_cls.args_type() FilterArgsFromSemanticProtobuf(flow_args, kwargs) # At this point we should exhaust all the keyword args. If any are left # over, we do not know what to do with them so raise. if kwargs: raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" % kwargs) # Check that the flow args are valid. flow_args.Validate() rdf_flow = rdf_flow_objects.Flow( client_id=client_id, flow_class_name=flow_cls.__name__, args=flow_args, create_time=rdfvalue.RDFDatetime.Now(), creator=creator, output_plugins=output_plugins, original_flow=original_flow, flow_state="RUNNING") if parent_hunt_id is not None and parent_flow_obj is None: rdf_flow.flow_id = parent_hunt_id if IsLegacyHunt(parent_hunt_id): rdf_flow.flow_id = rdf_flow.flow_id[2:] else: rdf_flow.flow_id = RandomFlowId() # For better performance, only do conflicting IDs check for top-level flows. if not parent_flow_obj: try: data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id) raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id) except db.UnknownFlowError: pass if parent_flow_obj: # A flow is a nested flow. parent_rdf_flow = parent_flow_obj.rdf_flow rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id, rdf_flow.flow_id) rdf_flow.parent_flow_id = parent_rdf_flow.flow_id rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id rdf_flow.parent_request_id = parent_flow_obj.GetCurrentOutboundId() if parent_rdf_flow.creator: rdf_flow.creator = parent_rdf_flow.creator elif parent_hunt_id: # A flow is a root-level hunt-induced flow. rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id) rdf_flow.parent_hunt_id = parent_hunt_id else: # A flow is a root-level non-hunt flow. rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id) if output_plugins: rdf_flow.output_plugins_states = GetOutputPluginStates( output_plugins, rdf_flow.long_flow_id, token=access_control.ACLToken(username=rdf_flow.creator)) if network_bytes_limit is not None: rdf_flow.network_bytes_limit = network_bytes_limit if cpu_limit is not None: rdf_flow.cpu_limit = cpu_limit logging.info(u"Scheduling %s(%s) on %s (%s)", rdf_flow.long_flow_id, rdf_flow.flow_class_name, client_id, start_at or "now") rdf_flow.current_state = "Start" flow_obj = flow_cls(rdf_flow) if start_at is None: # Store an initial version of the flow straight away. This is needed so the # database doesn't raise consistency errors due to missing parent keys when # writing logs / errors / results which might happen in Start(). data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow) # Just run the first state inline. NOTE: Running synchronously means # that this runs on the thread that starts the flow. The advantage is # that that Start method can raise any errors immediately. flow_obj.Start() # The flow does not need to actually remain running. if not flow_obj.outstanding_requests: flow_obj.RunStateMethod("End") # Additional check for the correct state in case the End method raised and # terminated the flow. if flow_obj.IsRunning(): flow_obj.MarkDone() else: flow_obj.CallState("Start", start_time=start_at) flow_obj.PersistState() data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow) if parent_flow_obj is not None: # We can optimize here and not write requests/responses to the database # since we have to do this for the parent flow at some point anyways. parent_flow_obj.MergeQueuedMessages(flow_obj) else: flow_obj.FlushQueuedMessages() # Publish an audit event, only for top level flows. # TODO(amoser): split urn field into dedicated strings. events.Events.PublishEvent( "Audit", rdf_events.AuditEvent( user=creator, action="RUN_FLOW", flow_name=rdf_flow.flow_class_name, urn=rdf_flow.long_flow_id, client=client_id)) return rdf_flow.flow_id
The main factory function for creating and executing a new flow. Args: client_id: ID of the client this flow should run on. cpu_limit: CPU limit in seconds for this flow. creator: Username that requested this flow. flow_args: An arg protocol buffer which is an instance of the required flow's args_type class attribute. flow_cls: Class of the flow that should be started. network_bytes_limit: Limit on the network traffic this flow can generated. original_flow: A FlowReference object in case this flow was copied from another flow. output_plugins: An OutputPluginDescriptor object indicating what output plugins should be used for this flow. start_at: If specified, flow will be started not immediately, but at a given time. parent_flow_obj: A parent flow object. None if this is a top level flow. parent_hunt_id: String identifying parent hunt. Can't be passed together with parent_flow_obj. **kwargs: If args or runner_args are not specified, we construct these protobufs from these keywords. Returns: the flow id of the new flow. Raises: ValueError: Unknown or invalid parameters were provided.
def load(self, draw_bbox = False, **kwargs): ''' Makes the canvas. This could be far speedier if it copied raw pixels, but that would take far too much time to write vs using Image inbuilts ''' im = Image.new('RGBA', self.img_size) draw = None if draw_bbox: draw = ImageDraw.Draw(im) for sprite in self.images: data = sprite.load() sprite_im = Image.open(BytesIO(data)) size = sprite.imgrect im.paste(sprite_im, (size[0], size[2])) if draw_bbox: draw.rectangle((size[0], size[2], size[1], size[3]), outline='red') del draw b = BytesIO() im.save(b, format = 'PNG') return b.getvalue()
Makes the canvas. This could be far speedier if it copied raw pixels, but that would take far too much time to write vs using Image inbuilts
def body_template(self, value): """ Must be an instance of a prestans.types.DataCollection subclass; this is generally set during the RequestHandler lifecycle. Setting this spwans the parsing process of the body. If the HTTP verb is GET an AssertionError is thrown. Use with extreme caution. """ if self.method == VERB.GET: raise AssertionError("body_template cannot be set for GET requests") if value is None: self.logger.warning("body_template is None, parsing will be ignored") return if not isinstance(value, DataCollection): msg = "body_template must be an instance of %s.%s" % ( DataCollection.__module__, DataCollection.__name__ ) raise AssertionError(msg) self._body_template = value # get a deserializer based on the Content-Type header # do this here so the handler gets a chance to setup extra serializers self.set_deserializer_by_mime_type(self.content_type)
Must be an instance of a prestans.types.DataCollection subclass; this is generally set during the RequestHandler lifecycle. Setting this spwans the parsing process of the body. If the HTTP verb is GET an AssertionError is thrown. Use with extreme caution.
def find_args(self, text, start=None): """implementation details""" if start is None: start = 0 first_occurance = text.find(self.__begin, start) if first_occurance == -1: return self.NOT_FOUND previous_found, found = first_occurance + 1, 0 while True: found = self.__find_args_separator(text, previous_found) if found == -1: return self.NOT_FOUND elif text[found] == self.__end: return first_occurance, found else: previous_found = found + 1
implementation details
def get_supports(self): """Returns set of extension support strings referenced in this Registry :return: set of extension support strings """ out = set() for ext in self.extensions.values(): out.update(ext.get_supports()) return out
Returns set of extension support strings referenced in this Registry :return: set of extension support strings
def create_topic_rule(ruleName, sql, actions, description, ruleDisabled=False, region=None, key=None, keyid=None, profile=None): ''' Given a valid config, create a topic rule. Returns {created: true} if the rule was created and returns {created: False} if the rule was not created. CLI Example: .. code-block:: bash salt myminion boto_iot.create_topic_rule my_rule "SELECT * FROM 'some/thing'" \\ '[{"lambda":{"functionArn":"arn:::::something"}},{"sns":{\\ "targetArn":"arn:::::something","roleArn":"arn:::::something"}}]' ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic_rule(ruleName=ruleName, topicRulePayload={ 'sql': sql, 'description': description, 'actions': actions, 'ruleDisabled': ruleDisabled }) return {'created': True} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
Given a valid config, create a topic rule. Returns {created: true} if the rule was created and returns {created: False} if the rule was not created. CLI Example: .. code-block:: bash salt myminion boto_iot.create_topic_rule my_rule "SELECT * FROM 'some/thing'" \\ '[{"lambda":{"functionArn":"arn:::::something"}},{"sns":{\\ "targetArn":"arn:::::something","roleArn":"arn:::::something"}}]'
def aggregate_precipitation(vec_data,hourly=True, percentile=50): """Aggregates highly resolved precipitation data and creates statistics Parameters ---------- vec_data : pd.Series hourly (hourly=True) OR 5-min values Returns ------- output : cascade object representing statistics of the cascade model """ cascade_opt = cascade.CascadeStatistics() cascade_opt.percentile = percentile # length of input time series n_in = len(vec_data) n_out = np.floor(n_in/2) # alternative: # 1st step: new time series vec_time = vec_data.index vdn0 = [] vtn0 = [] j = 0 for i in range(0, n_in): if np.mod(i, 2) != 0: vdn0.append(vec_data.precip.values[i-1] + vec_data.precip.values[i]) vtn0.append(vec_time[i]) j = j+1 vdn = pd.DataFrame(index=vtn0, data={'precip': vdn0}) # length of new time series n_out = len(vdn) # series of box types: vbtype = np.zeros((n_out, ), dtype=np.int) # fields for empirical probabilities # counts nb = np.zeros((2, 4)) nbxx = np.zeros((2, 4)) # class boundaries for histograms # wclassbounds = np.linspace(0, 1, num=8) wlower = np.array([0, 0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571]) # wclassbounds[0:7] wupper = np.array([0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571, 1.0]) # wclassbounds[1:8] # evaluate mean rainfall intensity for wet boxes # these values should be determined during the aggregation phase!!!!! # mean volume threshold meanvol = np.percentile(vdn.precip[vdn.precip > 0.], cascade_opt.percentile) # np.mean(vdn.precip[vdn.precip>0.]) cascade_opt.threshold = np.array([meanvol]) # 2nd step: classify boxes at the upper level for i in range(0, n_out): if vdn.precip.values[i] > 0.: # rain? if i == 0: # only starting or isolated if vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.starting else: vbtype[i] = cascade.BoxTypes.isolated elif i == n_out-1: # only ending or isolated if vdn.precip.values[i-1] > 0.: vbtype[i] = cascade.BoxTypes.ending else: vbtype[i] = cascade.BoxTypes.isolated else: # neither at at the end nor at the beginning if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] == 0.: vbtype[i] = cascade.BoxTypes.isolated if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.starting if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.enclosed if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] == 0.: vbtype[i] = cascade.BoxTypes.ending else: vbtype[i] = cascade.BoxTypes.dry # no rain # 3rd step: examine branching j = 0 for i in range(0, n_in): if np.mod(i, 2) != 0: if vdn.precip.values[j] > 0: if vdn.precip.values[j] > meanvol: belowabove = 1 # above mean else: belowabove = 0 # below mean nb[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] == 0: # P(1/0) cascade_opt.p10[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] == 0 and vec_data.precip.values[i] > 0: # P(0/1) cascade_opt.p01[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] > 0: # P(x/x) cascade_opt.pxx[belowabove, vbtype[j]-1] += 1 nbxx[belowabove, vbtype[j]-1] += 1 # weights r1 = vec_data.precip.values[i-1] r2 = vec_data.precip.values[i] wxxval = r1 / (r1 + r2) # Test if abs(r1+r2-vdn.precip.values[j]) > 1.E-3: print('i=' + str(i) + ', j=' + str(j) + ', r1=' + str(r1) + ", r2=" + str(r2) + ", Summe=" + str(vdn.precip.values[j])) print(vec_data.index[i]) print(vdn.index[j]) print('error') return cascade_opt, vdn for k in range(0, 7): if wxxval > wlower[k] and wxxval <= wupper[k]: cascade_opt.wxx[k, belowabove, vbtype[j]-1] += 1 break j = j + 1 # 4th step: transform counts to percentages cascade_opt.p01 = cascade_opt.p01 / nb cascade_opt.p10 = cascade_opt.p10 / nb cascade_opt.pxx = cascade_opt.pxx / nb with np.errstate(divide='ignore', invalid='ignore'): # do not issue warnings here when dividing by zero, this is handled below for k in range(0, 7): cascade_opt.wxx[k, :, :] = cascade_opt.wxx[k, :, :] / nbxx[:, :] # In some cases, the time series are too short for deriving statistics. if (np.isnan(cascade_opt.p01).any() or np.isnan(cascade_opt.p10).any() or np.isnan(cascade_opt.pxx).any()): print("ERROR (branching probabilities):") print("Invalid statistics. Default values will be returned. " "Try to use longer time series or apply statistics " "derived for another station.") cascade_opt.fill_with_sample_data() # For some box types, the corresponding probabilities might yield nan. # If this happens, nan values will be replaced by 1/7 in order to provide # valid values for disaggregation. if np.isnan(cascade_opt.wxx).any(): print("Warning (weighting probabilities):") print("The derived cascade statistics are not valid as some " "probabilities are undefined! ", end="") print("Try to use longer time series that might be more " "appropriate for deriving statistics. ", end="") print("As a workaround, default values according to equally " "distributed probabilities ", end="") print("will be applied...", end="") cascade_opt.wxx[np.isnan(cascade_opt.wxx)] = 1.0 / 7.0 wxx = np.zeros((2, 4)) for k in range(0, 7): wxx[:, :] += cascade_opt.wxx[k, :, :] if wxx.any() > 1.001 or wxx.any() < 0.999: print("failed! Using default values!") cascade_opt.fill_with_sample_data() else: print("OK!") return cascade_opt, vdn
Aggregates highly resolved precipitation data and creates statistics Parameters ---------- vec_data : pd.Series hourly (hourly=True) OR 5-min values Returns ------- output : cascade object representing statistics of the cascade model
def build_path(graph, node1, node2, path=None): """ Build the path from node1 to node2. The path is composed of all the nodes between node1 and node2, node1 excluded. Although if there is a loop starting from node1, it will be included in the path. """ if path is None: path = [] if node1 is node2: return path path.append(node2) for pred in graph.all_preds(node2): if pred in path: continue build_path(graph, node1, pred, path) return path
Build the path from node1 to node2. The path is composed of all the nodes between node1 and node2, node1 excluded. Although if there is a loop starting from node1, it will be included in the path.
def complex_space(self): """The space corresponding to this space's `complex_dtype`. Raises ------ ValueError If `dtype` is not a numeric data type. """ if not is_numeric_dtype(self.dtype): raise ValueError( '`complex_space` not defined for non-numeric `dtype`') return self.astype(self.complex_dtype)
The space corresponding to this space's `complex_dtype`. Raises ------ ValueError If `dtype` is not a numeric data type.
def _read_data_type_none(self, length): """Read IPv6-Route unknown type data. Structure of IPv6-Route unknown type data [RFC 8200][RFC 5095]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Routing Type | Segments Left | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . type-specific data . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 route.data Type-Specific Data """ _data = self._read_fileng(length) data = dict( data=_data, ) return data
Read IPv6-Route unknown type data. Structure of IPv6-Route unknown type data [RFC 8200][RFC 5095]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Routing Type | Segments Left | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . type-specific data . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 route.data Type-Specific Data
def join(delim, items, quotes=False): """ Joins the supplied list of strings after removing any empty strings from the list """ transform = lambda s: s if quotes == True: transform = lambda s: s if ' ' not in s else '"{}"'.format(s) stripped = list([transform(i) for i in items if len(i) > 0]) if len(stripped) > 0: return delim.join(stripped) return ''
Joins the supplied list of strings after removing any empty strings from the list
def _scalar_property(fieldname): """Create a property descriptor around the :class:`_PropertyMixin` helpers. """ def _getter(self): """Scalar property getter.""" return self._properties.get(fieldname) def _setter(self, value): """Scalar property setter.""" self._patch_property(fieldname, value) return property(_getter, _setter)
Create a property descriptor around the :class:`_PropertyMixin` helpers.
def setup_ui(self, ): """Setup the ui :returns: None :rtype: None :raises: None """ labels = self.reftrack.get_option_labels() self.browser = ComboBoxBrowser(len(labels), headers=labels) self.browser_vbox.addWidget(self.browser)
Setup the ui :returns: None :rtype: None :raises: None
def napalm_cli(task: Task, commands: List[str]) -> Result: """ Run commands on remote devices using napalm Arguments: commands: commands to execute Returns: Result object with the following attributes set: * result (``dict``): result of the commands execution """ device = task.host.get_connection("napalm", task.nornir.config) result = device.cli(commands) return Result(host=task.host, result=result)
Run commands on remote devices using napalm Arguments: commands: commands to execute Returns: Result object with the following attributes set: * result (``dict``): result of the commands execution
def __CombineGlobalParams(self, global_params, default_params): """Combine the given params with the defaults.""" util.Typecheck(global_params, (type(None), self.__client.params_type)) result = self.__client.params_type() global_params = global_params or self.__client.params_type() for field in result.all_fields(): value = global_params.get_assigned_value(field.name) if value is None: value = default_params.get_assigned_value(field.name) if value not in (None, [], ()): setattr(result, field.name, value) return result
Combine the given params with the defaults.
def gpg_version(sp=subprocess): """Get a keygrip of the primary GPG key of the specified user.""" args = gpg_command(['--version']) output = check_output(args=args, sp=sp) line = output.split(b'\n')[0] # b'gpg (GnuPG) 2.1.11' line = line.split(b' ')[-1] # b'2.1.11' line = line.split(b'-')[0] # remove trailing version parts return line.split(b'v')[-1]
Get a keygrip of the primary GPG key of the specified user.
def init_app(self, app, env_file=None, verbose_mode=False): """Imports .env file.""" if self.app is None: self.app = app self.verbose_mode = verbose_mode if env_file is None: env_file = os.path.join(os.getcwd(), ".env") if not os.path.exists(env_file): warnings.warn("can't read {0} - it doesn't exist".format(env_file)) else: self.__import_vars(env_file)
Imports .env file.
def _load_version(cls, unpickler, version): """ A function to load a previously saved SentenceSplitter instance. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer. """ state, _exclude, _features = unpickler.load() features = state['features'] excluded_features = state['excluded_features'] model = cls.__new__(cls) model._setup() model.__proxy__.update(state) model._exclude = _exclude model._features = _features return model
A function to load a previously saved SentenceSplitter instance. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer.