code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def visualize_explanation(explanation, label=None): """ Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence """ if not sys.version_info[:2] >= (3, 5): raise IndicoError("Python >= 3.5+ is required for explanation visualization") try: from colr import Colr as C except ImportError: raise IndicoError("Package colr >= 0.8.1 is required for explanation visualization.") cursor = 0 text = explanation['text'] for token in explanation.get('token_predictions'): try: class_confidence = token.get('prediction')[label] except KeyError: raise IndicoError("Invalid label: {}".format(label)) if class_confidence > 0.5: fg_color = (255, 255, 255) else: fg_color = (0, 0, 0) rg_value = 255 - int(class_confidence * 255) token_end = token.get('token').get('end') token_text = text[cursor:token_end] cursor = token_end sys.stdout.write( str(C().b_rgb( rg_value, rg_value, 255 ).rgb( fg_color[0], fg_color[1], fg_color[2], token_text )) ) sys.stdout.write("\n") sys.stdout.flush()
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
def _writetypesdoc(doc, thing, forceload=0): """Write HTML documentation to a file in the current directory. """ try: object, name = pydoc.resolve(thing, forceload) name = os.path.join(doc, name + '.html') except (ImportError, pydoc.ErrorDuringImport), value: log.debug(str(value)) return # inner classes cdict = {} fdict = {} elements_dict = {} types_dict = {} for kname,klass in inspect.getmembers(thing, inspect.isclass): if thing is not inspect.getmodule(klass): continue cdict[kname] = inspect.getmembers(klass, inspect.isclass) for iname,iklass in cdict[kname]: key = (kname,iname) fdict[key] = _writedoc(doc, iklass) if issubclass(iklass, ElementDeclaration): try: typecode = iklass() except (AttributeError,RuntimeError), ex: elements_dict[iname] = _writebrokedoc(doc, ex, iname) continue elements_dict[iname] = None if typecode.pyclass is not None: elements_dict[iname] = _writedoc(doc, typecode.pyclass) continue if issubclass(iklass, TypeDefinition): try: typecode = iklass(None) except (AttributeError,RuntimeError), ex: types_dict[iname] = _writebrokedoc(doc, ex, iname) continue types_dict[iname] = None if typecode.pyclass is not None: types_dict[iname] = _writedoc(doc, typecode.pyclass) continue def strongarm(self, object, name=None, mod=None, funcs={}, classes={}, *ignored): """Produce HTML documentation for a class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ object, name = pydoc.resolve(object, forceload) contents = [] push = contents.append if name == realname: title = '<a name="%s">class <strong>%s</strong></a>' % ( name, realname) else: title = '<strong>%s</strong> = <a name="%s">class %s</a>' % ( name, name, realname) mdict = {} if bases: parents = [] for base in bases: parents.append(self.classlink(base, object.__module__)) title = title + '(%s)' % pydoc.join(parents, ', ') doc = self.markup(pydoc.getdoc(object), self.preformat, funcs, classes, mdict) doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc for iname,iclass in cdict[name]: fname = fdict[(name,iname)] if elements_dict.has_key(iname): push('class <a href="%s">%s</a>: element declaration typecode<br/>'\ %(fname,iname)) pyclass = elements_dict[iname] if pyclass is not None: push('<ul>instance attributes:') push('<li><a href="%s">pyclass</a>: instances serializable to XML<br/></li>'\ %elements_dict[iname]) push('</ul>') elif types_dict.has_key(iname): push('class <a href="%s">%s</a>: type definition typecode<br/>' %(fname,iname)) pyclass = types_dict[iname] if pyclass is not None: push('<ul>instance attributes:') push('<li><a href="%s">pyclass</a>: instances serializable to XML<br/></li>'\ %types_dict[iname]) push('</ul>') else: push('class <a href="%s">%s</a>: TODO not sure what this is<br/>' %(fname,iname)) contents = ''.join(contents) return self.section(title, '#000000', '#ffc8d8', contents, 3, doc) doclass = pydoc.HTMLDoc.docclass pydoc.HTMLDoc.docclass = strongarm try: page = pydoc.html.page(pydoc.describe(object), pydoc.html.document(object, name)) file = open(name, 'w') file.write(page) file.close() except (ImportError, pydoc.ErrorDuringImport), value: log.debug(str(value)) pydoc.HTMLDoc.docclass = doclass
Write HTML documentation to a file in the current directory.
def PublishEvent(cls, event_name, msg, token=None): """Publish the message into all listeners of the event. We send the message to all event handlers which contain this string in their EVENT static member. This allows the event to be sent to multiple interested listeners. Args: event_name: An event name. msg: The message to send to the event handler. token: ACL token. Raises: ValueError: If the message is invalid. The message must be a Semantic Value (instance of RDFValue) or a full GrrMessage. """ cls.PublishMultipleEvents({event_name: [msg]}, token=token)
Publish the message into all listeners of the event. We send the message to all event handlers which contain this string in their EVENT static member. This allows the event to be sent to multiple interested listeners. Args: event_name: An event name. msg: The message to send to the event handler. token: ACL token. Raises: ValueError: If the message is invalid. The message must be a Semantic Value (instance of RDFValue) or a full GrrMessage.
def openflow_controller_controller_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") openflow_controller = ET.SubElement(config, "openflow-controller", xmlns="urn:brocade.com:mgmt:brocade-openflow") controller_name = ET.SubElement(openflow_controller, "controller-name") controller_name.text = kwargs.pop('controller_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _set_default_vrf(self, v, load=False): """ Setter method for default_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf (container) If this variable is read-only (config: false) in the source YANG file, then _set_default_vrf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_default_vrf() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=default_vrf.default_vrf, is_container='container', presence=False, yang_name="default-vrf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-suppress-no': None, u'cli-add-mode': None, u'cli-drop-node-name': None, u'cli-full-command': None, u'callpoint': u'AfIpv6Ucast', u'cli-mode-name': u'config-bgp-ipv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """default_vrf must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=default_vrf.default_vrf, is_container='container', presence=False, yang_name="default-vrf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-suppress-no': None, u'cli-add-mode': None, u'cli-drop-node-name': None, u'cli-full-command': None, u'callpoint': u'AfIpv6Ucast', u'cli-mode-name': u'config-bgp-ipv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__default_vrf = t if hasattr(self, '_set'): self._set()
Setter method for default_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf (container) If this variable is read-only (config: false) in the source YANG file, then _set_default_vrf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_default_vrf() directly.
def _empty_queue(self): """Dump all live point proposals currently on the queue.""" while True: try: # Remove unused points from the queue. self.queue.pop() self.unused += 1 # add to the total number of unused points self.nqueue -= 1 except: # If the queue is empty, we're done! self.nqueue = 0 break
Dump all live point proposals currently on the queue.
def _set_ocsp_callback(self, helper, data): """ This internal helper does the common work for ``set_ocsp_server_callback`` and ``set_ocsp_client_callback``, which is almost all of it. """ self._ocsp_helper = helper self._ocsp_callback = helper.callback if data is None: self._ocsp_data = _ffi.NULL else: self._ocsp_data = _ffi.new_handle(data) rc = _lib.SSL_CTX_set_tlsext_status_cb( self._context, self._ocsp_callback ) _openssl_assert(rc == 1) rc = _lib.SSL_CTX_set_tlsext_status_arg(self._context, self._ocsp_data) _openssl_assert(rc == 1)
This internal helper does the common work for ``set_ocsp_server_callback`` and ``set_ocsp_client_callback``, which is almost all of it.
def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \ -> Iterator[str]: """ Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files. """ # N.B. this method will only ever return files, as directories cannot # be "modified" if self.is_modified: yield str(base / self.right.name)
Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files.
def inverse(self): """Return the inverse operator. Examples -------- >>> r3 = odl.rn(3) >>> vec = r3.element([1, 2, 3]) >>> op = ScalingOperator(r3, 2.0) >>> inv = op.inverse >>> inv(op(vec)) == vec True >>> op(inv(vec)) == vec True """ if self.scalar == 0.0: raise ZeroDivisionError('scaling operator not invertible for ' 'scalar==0') return ScalingOperator(self.domain, 1.0 / self.scalar)
Return the inverse operator. Examples -------- >>> r3 = odl.rn(3) >>> vec = r3.element([1, 2, 3]) >>> op = ScalingOperator(r3, 2.0) >>> inv = op.inverse >>> inv(op(vec)) == vec True >>> op(inv(vec)) == vec True
def as_tree(self, visitor=None, children=None): """ Recursively traverses each tree (starting from each root) in order to generate a dictionary-based tree structure of the entire forest. Each level of the forest/tree is a list of nodes, and each node consists of a dictionary representation, where the entry ``children`` (by default) consists of a list of dictionary representations of its children. See :meth:`CTENodeManager.as_tree` and :meth:`CTENodeManager.node_as_tree` for details on how this method works, as well as its expected arguments. :param visitor: optional function responsible for generating the dictionary representation of a node. :param children: optional function responsible for generating a children key and list for a node. :return: a dictionary representation of the structure of the forest. """ _parameters = {"node": self} if visitor is not None: _parameters["visitor"] = visitor if children is not None: _parameters["children"] = children return self.__class__.objects.node_as_tree(**_parameters)
Recursively traverses each tree (starting from each root) in order to generate a dictionary-based tree structure of the entire forest. Each level of the forest/tree is a list of nodes, and each node consists of a dictionary representation, where the entry ``children`` (by default) consists of a list of dictionary representations of its children. See :meth:`CTENodeManager.as_tree` and :meth:`CTENodeManager.node_as_tree` for details on how this method works, as well as its expected arguments. :param visitor: optional function responsible for generating the dictionary representation of a node. :param children: optional function responsible for generating a children key and list for a node. :return: a dictionary representation of the structure of the forest.
def default(self, vid): """ Defaults the VLAN configuration .. code-block:: none default vlan <vlanid> Args: vid (str): The VLAN ID to default Returns: True if the operation was successful otherwise False """ command = 'default vlan %s' % vid return self.configure(command) if isvlan(vid) else False
Defaults the VLAN configuration .. code-block:: none default vlan <vlanid> Args: vid (str): The VLAN ID to default Returns: True if the operation was successful otherwise False
def put(self, transfer_id, amount, created_timestamp, receipt): """ :param transfer_id: int of the account_id to deposit the money to :param amount: float of the amount to transfer :param created_timestamp: str of the validated receipt that money has been received :param receipt: str of the receipt :return: Transfer dict """ return self.connection.put('account/transfer/claim', data=dict(transfer_id=transfer_id, amount=amount, created_timestamp=created_timestamp, receipt=receipt))
:param transfer_id: int of the account_id to deposit the money to :param amount: float of the amount to transfer :param created_timestamp: str of the validated receipt that money has been received :param receipt: str of the receipt :return: Transfer dict
def _sort_results(self, results): """ Order the results. :param results: The disordened results. :type results: array.bs4.element.Tag :return: The ordened results. :rtype: array.bs4.element.Tag """ parents = [] groups = [] for result in results: if not self._in_list(parents, result.parent): parents.append(result.parent) groups.append([]) groups[len(groups) - 1].append(result) else: groups[parents.index(result.parent)].append(result) array = [] for group in groups: array += sorted( group, key=lambda element: element.parent.contents.index(element) ) return array
Order the results. :param results: The disordened results. :type results: array.bs4.element.Tag :return: The ordened results. :rtype: array.bs4.element.Tag
def attach_stream(self, stream): """Notify that we would like to attach a node input to this stream. The return value from this function is the DataStream that should be attached to since this function may internally allocate a new SGNode that copies the stream if there is no space in the output list to hold another input. This function should be called once for every node input before allocated a new sensor graph node that attaches to a stream that is managed by the StreamAllocator. Args: stream (DataStream): The stream (originally returned from allocate_stream) that we want to attach to. Returns: Datastream: A data stream, possible the same as stream, that should be attached to a node input. """ curr_stream, count, prev = self._allocated_streams[stream] # Check if we need to split this stream and allocate a new one if count == (self.model.get(u'max_node_outputs') - 1): new_stream = self.allocate_stream(curr_stream.stream_type, previous=curr_stream) copy_desc = u"({} always) => {} using copy_all_a".format(curr_stream, new_stream) self.sensor_graph.add_node(copy_desc) self._allocated_streams[stream] = (new_stream, 1, curr_stream) # If we are splitting a constant stream, make sure we also duplicate the initialization value # FIXME: If there is no default value for the stream, that is probably a warning since all constant # streams should be initialized with a value. if curr_stream.stream_type == DataStream.ConstantType and curr_stream in self.sensor_graph.constant_database: self.sensor_graph.add_constant(new_stream, self.sensor_graph.constant_database[curr_stream]) return new_stream self._allocated_streams[stream] = (curr_stream, count + 1, prev) return curr_stream
Notify that we would like to attach a node input to this stream. The return value from this function is the DataStream that should be attached to since this function may internally allocate a new SGNode that copies the stream if there is no space in the output list to hold another input. This function should be called once for every node input before allocated a new sensor graph node that attaches to a stream that is managed by the StreamAllocator. Args: stream (DataStream): The stream (originally returned from allocate_stream) that we want to attach to. Returns: Datastream: A data stream, possible the same as stream, that should be attached to a node input.
def wait_for_simulation_stop(self, timeout=None): """Block until the simulation is done or timeout seconds exceeded. If the simulation stops before timeout, siminfo is returned. """ start = datetime.now() while self.get_is_sim_running(): sleep(0.5) if timeout is not None: if (datetime.now() - start).seconds >= timeout: ret = None break else: ret = self.simulation_info() return ret
Block until the simulation is done or timeout seconds exceeded. If the simulation stops before timeout, siminfo is returned.
def read_config_files(self, files): """Read a list of config files. :param iterable files: An iterable (e.g. list) of files to read. """ errors = {} for _file in files: config, valid = self.read_config_file(_file) self.update(config) if valid is not True: errors[_file] = valid return errors or True
Read a list of config files. :param iterable files: An iterable (e.g. list) of files to read.
def get_active_lines(lines, comment_char="#"): """ Returns lines, or parts of lines, from content that are not commented out or completely empty. The resulting lines are all individually stripped. This is useful for parsing many config files such as ifcfg. Parameters: lines (list): List of strings to parse. comment_char (str): String indicating that all chars following are part of a comment and will be removed from the output. Returns: list: List of valid lines remaining in the input. Examples: >>> lines = [ ... 'First line', ... ' ', ... '# Comment line', ... 'Inline comment # comment', ... ' Whitespace ', ... 'Last line'] >>> get_active_lines(lines) ['First line', 'Inline comment', 'Whitespace', 'Last line'] """ return list(filter(None, (line.split(comment_char, 1)[0].strip() for line in lines)))
Returns lines, or parts of lines, from content that are not commented out or completely empty. The resulting lines are all individually stripped. This is useful for parsing many config files such as ifcfg. Parameters: lines (list): List of strings to parse. comment_char (str): String indicating that all chars following are part of a comment and will be removed from the output. Returns: list: List of valid lines remaining in the input. Examples: >>> lines = [ ... 'First line', ... ' ', ... '# Comment line', ... 'Inline comment # comment', ... ' Whitespace ', ... 'Last line'] >>> get_active_lines(lines) ['First line', 'Inline comment', 'Whitespace', 'Last line']
def _convert_coordinatelist(input_obj): """convert from 'list' or 'tuple' object to pgmagick.CoordinateList. :type input_obj: list or tuple """ cdl = pgmagick.CoordinateList() for obj in input_obj: cdl.append(pgmagick.Coordinate(obj[0], obj[1])) return cdl
convert from 'list' or 'tuple' object to pgmagick.CoordinateList. :type input_obj: list or tuple
def load_module(self, module): ''' Introspect Ansible module. :param module: :return: ''' m_ref = self._modules_map.get(module) if m_ref is None: raise LoaderError('Module "{0}" was not found'.format(module)) mod = importlib.import_module('ansible.modules{0}'.format( '.'.join([elm.split('.')[0] for elm in m_ref.split(os.path.sep)]))) return mod
Introspect Ansible module. :param module: :return:
def name2rgb(name): """Convert the name of a color into its RGB value""" try: import colour except ImportError: raise ImportError('You need colour to be installed: pip install colour') c = colour.Color(name) color = int(c.red * 255), int(c.green * 255), int(c.blue * 255) return color
Convert the name of a color into its RGB value
def _mk_connectivity_flats(self, i12, j1, j2, mat_data, flats, elev, mag): """ Helper function for _mk_adjacency_matrix. This calcualtes the connectivity for flat regions. Every pixel in the flat will drain to a random pixel in the flat. This accumulates all the area in the flat region to a single pixel. All that area is then drained from that pixel to the surroundings on the flat. If the border of the flat has a single pixel with a much lower elevation, all the area will go towards that pixel. If the border has pixels with similar elevation, then the area will be distributed amongst all the border pixels proportional to their elevation. """ nn, mm = flats.shape NN = np.prod(flats.shape) # Label the flats assigned, n_flats = spndi.label(flats, FLATS_KERNEL3) flat_ids, flat_coords, flat_labelsf = _get_flat_ids(assigned) flat_j = [None] * n_flats flat_prop = [None] * n_flats flat_i = [None] * n_flats # Temporary array to find the flats edges = np.zeros_like(flats) # %% Calcute the flat drainage warn_flats = [] for ii in xrange(n_flats): ids_flats = flat_ids[flat_coords[ii]:flat_coords[ii+1]] edges[:] = 0 j = ids_flats % mm i = ids_flats // mm for iii in [-1, 0, 1]: for jjj in [-1, 0, 1]: i_2 = i + iii j_2 = j + jjj ids_tmp = (i_2 >= 0) & (j_2 >= 0) & (i_2 < nn) & (j_2 < mm) edges[i_2[ids_tmp], j_2[ids_tmp]] += \ FLATS_KERNEL3[iii+1, jjj+1] edges.ravel()[ids_flats] = 0 ids_edge = np.argwhere(edges.ravel()).squeeze() flat_elev_loc = elev.ravel()[ids_flats] # It is possble for the edges to merge 2 flats, so we need to # take the lower elevation to avoid large circular regions flat_elev = flat_elev_loc.min() loc_elev = elev.ravel()[ids_edge] # Filter out any elevations larger than the flat elevation # TODO: Figure out if this should be <= or < I_filt = loc_elev < flat_elev try: loc_elev = loc_elev[I_filt] loc_slope = mag.ravel()[ids_edge][I_filt] except: # If this is fully masked out (i.e. inside a no-data area) loc_elev = np.array([]) loc_slope = np.array([]) loc_dx = self.dX.mean() # Now I have to figure out if I should just use the minimum or # distribute amongst many pixels on the flat boundary n = len(loc_slope) if n == 0: # Flat does not have anywhere to drain # Let's see if the flat goes to the edge. If yes, we'll just # distribute the area along the edge. ids_flat_on_edge = ((ids_flats % mag.shape[1]) == 0) | \ ((ids_flats % mag.shape[1]) == (mag.shape[1] - 1)) | \ (ids_flats <= mag.shape[1]) | \ (ids_flats >= (mag.shape[1] * (mag.shape[0] - 1))) if ids_flat_on_edge.sum() == 0: warn_flats.append(ii) continue drain_ids = ids_flats[ids_flat_on_edge] loc_proportions = mag.ravel()[ids_flats[ids_flat_on_edge]] loc_proportions /= loc_proportions.sum() ids_flats = ids_flats[~ids_flat_on_edge] # This flat is entirely on the edge of the image if len(ids_flats) == 0: # therefore, whatever drains into it is done. continue flat_elev_loc = flat_elev_loc[~ids_flat_on_edge] else: # Flat has a place to drain to min_edges = np.zeros(loc_slope.shape, bool) min_edges[np.argmin(loc_slope)] = True # Add to the min edges any edge that is within an error # tolerance as small as the minimum min_edges = (loc_slope + loc_slope * loc_dx / 2) \ >= loc_slope[min_edges] drain_ids = ids_edge[I_filt][min_edges] loc_proportions = loc_slope[min_edges] loc_proportions /= loc_proportions.sum() # Now distribute the connectivity amongst the chosen elevations # proportional to their slopes # First, let all the the ids in the flats drain to 1 # flat id (for ease) one_id = np.zeros(ids_flats.size, bool) one_id[np.argmin(flat_elev_loc)] = True j1.ravel()[ids_flats[~one_id]] = ids_flats[one_id] mat_data.ravel()[ids_flats[~one_id]] = 1 # Negative indices will be eliminated before making the matix j2.ravel()[ids_flats[~one_id]] = -1 mat_data.ravel()[ids_flats[~one_id] + NN] = 0 # Now drain the 1 flat to the drains j1.ravel()[ids_flats[one_id]] = drain_ids[0] mat_data.ravel()[ids_flats[one_id]] = loc_proportions[0] if len(drain_ids) > 1: j2.ravel()[ids_flats[one_id]] = drain_ids[1] mat_data.ravel()[ids_flats[one_id] + NN] = loc_proportions[1] if len(loc_proportions > 2): flat_j[ii] = drain_ids[2:] flat_prop[ii] = loc_proportions[2:] flat_i[ii] = np.ones(drain_ids[2:].size, 'int64') * ids_flats[one_id] try: flat_j = np.concatenate([fj for fj in flat_j if fj is not None]) flat_prop = \ np.concatenate([fp for fp in flat_prop if fp is not None]) flat_i = np.concatenate([fi for fi in flat_i if fi is not None]) except: flat_j = np.array([], 'int64') flat_prop = np.array([], 'float64') flat_i = np.array([], 'int64') if len(warn_flats) > 0: warnings.warn("Warning %d flats had no place" % len(warn_flats) + " to drain to --> these are pits (check pit-remove" "algorithm).") return j1, j2, mat_data, flat_i, flat_j, flat_prop
Helper function for _mk_adjacency_matrix. This calcualtes the connectivity for flat regions. Every pixel in the flat will drain to a random pixel in the flat. This accumulates all the area in the flat region to a single pixel. All that area is then drained from that pixel to the surroundings on the flat. If the border of the flat has a single pixel with a much lower elevation, all the area will go towards that pixel. If the border has pixels with similar elevation, then the area will be distributed amongst all the border pixels proportional to their elevation.
def find_credentials(): ''' Cycle through all the possible credentials and return the first one that works. ''' # if the username and password were already found don't fo though the # connection process again if 'username' in DETAILS and 'password' in DETAILS: return DETAILS['username'], DETAILS['password'] passwords = DETAILS['passwords'] for password in passwords: DETAILS['password'] = password if not __salt__['vsphere.test_vcenter_connection'](): # We are unable to authenticate continue # If we have data returned from above, we've successfully authenticated. return DETAILS['username'], password # We've reached the end of the list without successfully authenticating. raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' 'incorrect credentials.')
Cycle through all the possible credentials and return the first one that works.
def get_config_id(kwargs=None, call=None): ''' Returns a config_id for a given linode. .. versionadded:: 2015.8.0 name The name of the Linode for which to get the config_id. Can be used instead of ``linode_id``.h linode_id The ID of the Linode for which to get the config_id. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f get_config_id my-linode-config name=my-linode salt-cloud -f get_config_id my-linode-config linode_id=1234567 ''' if call == 'action': raise SaltCloudException( 'The get_config_id function must be called with -f or --function.' ) if kwargs is None: kwargs = {} name = kwargs.get('name', None) linode_id = kwargs.get('linode_id', None) if name is None and linode_id is None: raise SaltCloudSystemExit( 'The get_config_id function requires either a \'name\' or a \'linode_id\' ' 'to be provided.' ) if linode_id is None: linode_id = get_linode_id_from_name(name) response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA'] config_id = {'config_id': response[0]['ConfigID']} return config_id
Returns a config_id for a given linode. .. versionadded:: 2015.8.0 name The name of the Linode for which to get the config_id. Can be used instead of ``linode_id``.h linode_id The ID of the Linode for which to get the config_id. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f get_config_id my-linode-config name=my-linode salt-cloud -f get_config_id my-linode-config linode_id=1234567
def get_link(self, task_id): """Get a ``LinkOfTrust`` by task id. Args: task_id (str): the task id to find. Returns: LinkOfTrust: the link matching the task id. Raises: CoTError: if no ``LinkOfTrust`` matches. """ links = [x for x in self.links if x.task_id == task_id] if len(links) != 1: raise CoTError("No single Link matches task_id {}!\n{}".format(task_id, self.dependent_task_ids())) return links[0]
Get a ``LinkOfTrust`` by task id. Args: task_id (str): the task id to find. Returns: LinkOfTrust: the link matching the task id. Raises: CoTError: if no ``LinkOfTrust`` matches.
def catch_config_error(method, app, *args, **kwargs): """Method decorator for catching invalid config (Trait/ArgumentErrors) during init. On a TraitError (generally caused by bad config), this will print the trait's message, and exit the app. For use on init methods, to prevent invoking excepthook on invalid input. """ try: return method(app, *args, **kwargs) except (TraitError, ArgumentError) as e: app.print_description() app.print_help() app.print_examples() app.log.fatal("Bad config encountered during initialization:") app.log.fatal(str(e)) app.log.debug("Config at the time: %s", app.config) app.exit(1)
Method decorator for catching invalid config (Trait/ArgumentErrors) during init. On a TraitError (generally caused by bad config), this will print the trait's message, and exit the app. For use on init methods, to prevent invoking excepthook on invalid input.
def remove_all_observers(self): """ Removes all registered observers. """ for weak_observer in self._weak_observers: observer = weak_observer() if observer: self.remove_observer(observer)
Removes all registered observers.
def compose(self, other, qargs=None, front=False): """Return the composition channel self∘other. Args: other (QuantumChannel): a quantum channel subclass. qargs (list): a list of subsystem positions to compose other on. front (bool): If False compose in standard order other(self(input)) otherwise compose in reverse order self(other(input)) [default: False] Returns: Stinespring: The composition channel as a Stinespring object. Raises: QiskitError: if other cannot be converted to a channel or has incompatible dimensions. """ if qargs is not None: return Stinespring( SuperOp(self).compose(other, qargs=qargs, front=front)) # Convert other to Kraus if not isinstance(other, Kraus): other = Kraus(other) # Check dimensions match up if front and self._input_dim != other._output_dim: raise QiskitError( 'input_dim of self must match output_dim of other') if not front and self._output_dim != other._input_dim: raise QiskitError( 'input_dim of other must match output_dim of self') # Since we cannot directly compose two channels in Stinespring # representation we convert to the Kraus representation return Stinespring(Kraus(self).compose(other, front=front))
Return the composition channel self∘other. Args: other (QuantumChannel): a quantum channel subclass. qargs (list): a list of subsystem positions to compose other on. front (bool): If False compose in standard order other(self(input)) otherwise compose in reverse order self(other(input)) [default: False] Returns: Stinespring: The composition channel as a Stinespring object. Raises: QiskitError: if other cannot be converted to a channel or has incompatible dimensions.
def generate_nodeinfo2_document(**kwargs): """ Generate a NodeInfo2 document. Pass in a dictionary as per NodeInfo2 1.0 schema: https://github.com/jaywink/nodeinfo2/blob/master/schemas/1.0/schema.json Minimum required schema: {server: baseUrl name software version } openRegistrations Protocols default will match what this library supports, ie "diaspora" currently. :return: dict :raises: KeyError on missing required items """ return { "version": "1.0", "server": { "baseUrl": kwargs['server']['baseUrl'], "name": kwargs['server']['name'], "software": kwargs['server']['software'], "version": kwargs['server']['version'], }, "organization": { "name": kwargs.get('organization', {}).get('name', None), "contact": kwargs.get('organization', {}).get('contact', None), "account": kwargs.get('organization', {}).get('account', None), }, "protocols": kwargs.get('protocols', ["diaspora"]), "relay": kwargs.get('relay', ''), "services": { "inbound": kwargs.get('service', {}).get('inbound', []), "outbound": kwargs.get('service', {}).get('outbound', []), }, "openRegistrations": kwargs['openRegistrations'], "usage": { "users": { "total": kwargs.get('usage', {}).get('users', {}).get('total'), "activeHalfyear": kwargs.get('usage', {}).get('users', {}).get('activeHalfyear'), "activeMonth": kwargs.get('usage', {}).get('users', {}).get('activeMonth'), "activeWeek": kwargs.get('usage', {}).get('users', {}).get('activeWeek'), }, "localPosts": kwargs.get('usage', {}).get('localPosts'), "localComments": kwargs.get('usage', {}).get('localComments'), } }
Generate a NodeInfo2 document. Pass in a dictionary as per NodeInfo2 1.0 schema: https://github.com/jaywink/nodeinfo2/blob/master/schemas/1.0/schema.json Minimum required schema: {server: baseUrl name software version } openRegistrations Protocols default will match what this library supports, ie "diaspora" currently. :return: dict :raises: KeyError on missing required items
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'): """ Configure the default stream handler for logging messages to the console, remove other logging handlers, and enable capturing warnings. .. versionadded:: 1.3.0 :param str logger: The logger to add the stream handler for. :param level: The level to set the logger to, will default to WARNING if no level is specified. :type level: None, int, str :param formatter: The format to use for logging messages to the console. :type formatter: str, :py:class:`logging.Formatter` :return: The new configured stream handler. :rtype: :py:class:`logging.StreamHandler` """ level = level or logging.WARNING if isinstance(level, str): level = getattr(logging, level, None) if level is None: raise ValueError('invalid log level: ' + level) root_logger = logging.getLogger('') for handler in root_logger.handlers: root_logger.removeHandler(handler) logging.getLogger(logger).setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(level) if isinstance(formatter, str): formatter = logging.Formatter(formatter) elif not isinstance(formatter, logging.Formatter): raise TypeError('formatter must be an instance of logging.Formatter') console_log_handler.setFormatter(formatter) logging.getLogger(logger).addHandler(console_log_handler) logging.captureWarnings(True) return console_log_handler
Configure the default stream handler for logging messages to the console, remove other logging handlers, and enable capturing warnings. .. versionadded:: 1.3.0 :param str logger: The logger to add the stream handler for. :param level: The level to set the logger to, will default to WARNING if no level is specified. :type level: None, int, str :param formatter: The format to use for logging messages to the console. :type formatter: str, :py:class:`logging.Formatter` :return: The new configured stream handler. :rtype: :py:class:`logging.StreamHandler`
def parse_request() -> Dict[str, str]: """ Parse the request of the git credential API from stdin. Returns: A dictionary with all key-value pairs of the request """ in_lines = sys.stdin.readlines() LOGGER.debug('Received request "%s"', in_lines) request = {} for line in in_lines: # skip empty lines to be a bit resilient against protocol errors if not line.strip(): continue parts = line.split('=', 1) assert len(parts) == 2 request[parts[0].strip()] = parts[1].strip() return request
Parse the request of the git credential API from stdin. Returns: A dictionary with all key-value pairs of the request
def verify_leaf_inclusion(self, leaf: bytes, leaf_index: int, proof: List[bytes], sth: STH): """Verify a Merkle Audit Path. See section 2.1.1 of RFC6962 for the exact path description. Args: leaf: The leaf for which the proof was provided. leaf_index: Index of the leaf in the tree. proof: A list of SHA-256 hashes representing the Merkle audit path. sth: STH with the same tree size as the one used to fetch the proof. The sha256_root_hash from this STH will be compared against the root hash produced from the proof. Returns: True. The return value is enforced by a decorator and need not be checked by the caller. Raises: ProofError: the proof is invalid. """ leaf_hash = self.hasher.hash_leaf(leaf) return self.verify_leaf_hash_inclusion(leaf_hash, leaf_index, proof, sth)
Verify a Merkle Audit Path. See section 2.1.1 of RFC6962 for the exact path description. Args: leaf: The leaf for which the proof was provided. leaf_index: Index of the leaf in the tree. proof: A list of SHA-256 hashes representing the Merkle audit path. sth: STH with the same tree size as the one used to fetch the proof. The sha256_root_hash from this STH will be compared against the root hash produced from the proof. Returns: True. The return value is enforced by a decorator and need not be checked by the caller. Raises: ProofError: the proof is invalid.
def dumps(self): """Return representation of the path command.""" ret_str = [] for item in self._arg_list: if isinstance(item, TikZUserPath): ret_str.append(item.dumps()) elif isinstance(item, TikZCoordinate): ret_str.append(item.dumps()) elif isinstance(item, str): ret_str.append(item) return ' '.join(ret_str)
Return representation of the path command.
def transformer_tall_finetune_uniencdec(): """Fine-tune CNN/DM with a unidirectional encoder and decoder.""" hparams = transformer_tall() hparams.max_input_seq_length = 750 hparams.max_target_seq_length = 100 hparams.optimizer = "true_adam" hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.learning_rate_decay_steps = 80000 hparams.learning_rate_constant = 5e-5 hparams.learning_rate_warmup_steps = 100 hparams.unidirectional_encoder = True return hparams
Fine-tune CNN/DM with a unidirectional encoder and decoder.
def has_sources(self, extension=None): """Return `True` if this target owns sources; optionally of the given `extension`. :API: public :param string extension: Optional suffix of filenames to test for. :return: `True` if the target contains sources that match the optional extension suffix. :rtype: bool """ source_paths = self._sources_field.source_paths if not source_paths: return False if not extension: return True return any(source.endswith(extension) for source in source_paths)
Return `True` if this target owns sources; optionally of the given `extension`. :API: public :param string extension: Optional suffix of filenames to test for. :return: `True` if the target contains sources that match the optional extension suffix. :rtype: bool
def get_probmodel_data(model): """ Returns the model_data based on the given model. Parameters ---------- model: BayesianModel instance Model to write Return ------ model_data: dict dictionary containing model data of the given model. Examples -------- >>> model_data = pgmpy.readwrite.get_model_data(model) >>> writer.get_model_data(model) """ if not isinstance(model, BayesianModel): raise TypeError("Model must an instance of BayesianModel.") model_data = {'probnet': {'type': 'BayesianNetwork', 'Variables': {}}} variables = model.nodes() for var in variables: model_data['probnet']['Variables'][var] = model.node[var] model_data['probnet']['edges'] = {} edges = model.edges() for edge in edges: model_data['probnet']['edges'][str(edge)] = model.adj[edge[0]][edge[1]] model_data['probnet']['Potentials'] = [] cpds = model.get_cpds() for cpd in cpds: potential_dict = {} potential_dict['Variables'] = {} evidence = cpd.variables[:0:-1] if evidence: potential_dict['Variables'][cpd.variable] = evidence else: potential_dict['Variables'][cpd.variable] = [] potential_dict['type'] = "Table" potential_dict['role'] = "conditionalProbability" potential_dict['Values'] = " ".join([str(val) for val in cpd.values.ravel().astype(float)]) + " " model_data['probnet']['Potentials'].append(potential_dict) return model_data
Returns the model_data based on the given model. Parameters ---------- model: BayesianModel instance Model to write Return ------ model_data: dict dictionary containing model data of the given model. Examples -------- >>> model_data = pgmpy.readwrite.get_model_data(model) >>> writer.get_model_data(model)
def _merge_tops_merge_all(self, tops): ''' Merge the top files into a single dictionary ''' def _read_tgt(tgt): match_type = None states = [] for item in tgt: if isinstance(item, dict): match_type = item if isinstance(item, six.string_types): states.append(item) return match_type, states top = DefaultOrderedDict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue try: for tgt in targets: if tgt not in top[saltenv]: top[saltenv][tgt] = ctop[saltenv][tgt] continue m_type1, m_states1 = _read_tgt(top[saltenv][tgt]) m_type2, m_states2 = _read_tgt(ctop[saltenv][tgt]) merged = [] match_type = m_type2 or m_type1 if match_type is not None: merged.append(match_type) merged.extend(m_states1) merged.extend([x for x in m_states2 if x not in merged]) top[saltenv][tgt] = merged except TypeError: raise SaltRenderError('Unable to render top file. No targets found.') return top
Merge the top files into a single dictionary
def auto_discover_board(self, verbose): """ This method will allow up to 30 seconds for discovery (communicating with) an Arduino board and then will determine a pin configuration table for the board. :return: True if board is successfully discovered or False upon timeout """ # get current time start_time = time.time() # wait for up to 30 seconds for a successful capability query to occur while len(self.analog_mapping_query_results) == 0: if time.time() - start_time > 30: return False # keep sending out a capability query until there is a response self.send_sysex(self.ANALOG_MAPPING_QUERY) time.sleep(.1) if verbose: print("Board initialized in %d seconds" % (time.time() - start_time)) for pin in self.analog_mapping_query_results: self.total_pins_discovered += 1 # non analog pins will be marked as IGNORE if pin != self.pymata.IGNORE: self.number_of_analog_pins_discovered += 1 if verbose: print('Total Number of Pins Detected = %d' % self.total_pins_discovered) print('Total Number of Analog Pins Detected = %d' % self.number_of_analog_pins_discovered) # response table initialization # for each pin set the mode to input and the last read data value to zero for pin in range(0, self.total_pins_discovered): response_entry = [self.pymata.INPUT, 0, None] self.digital_response_table.append(response_entry) for pin in range(0, self.number_of_analog_pins_discovered): response_entry = [self.pymata.INPUT, 0, None] self.analog_response_table.append(response_entry) # set up latching tables for pin in range(0, self.total_pins_discovered): digital_latch_table_entry = [0, 0, 0, 0, None] self.digital_latch_table.append(digital_latch_table_entry) for pin in range(0, self.number_of_analog_pins_discovered): analog_latch_table_entry = [0, 0, 0, 0, 0, None] self.analog_latch_table.append(analog_latch_table_entry) return True
This method will allow up to 30 seconds for discovery (communicating with) an Arduino board and then will determine a pin configuration table for the board. :return: True if board is successfully discovered or False upon timeout
def _cleanup_temp_dir(self, base_dir): """Delete given temporary directory and all its contents.""" if self._should_cleanup_temp_dir: logging.debug('Cleaning up temporary directory %s.', base_dir) if self._user is None: util.rmtree(base_dir, onerror=util.log_rmtree_error) else: rm = subprocess.Popen(self._build_cmdline(['rm', '-rf', '--', base_dir]), stderr=subprocess.PIPE) rm_output = rm.stderr.read().decode() rm.stderr.close() if rm.wait() != 0 or rm_output: logging.warning("Failed to clean up temp directory %s: %s.", base_dir, rm_output) else: logging.info("Skipping cleanup of temporary directory %s.", base_dir)
Delete given temporary directory and all its contents.
def available_phone_numbers(self): """ Access the available_phone_numbers :returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryList :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryList """ if self._available_phone_numbers is None: self._available_phone_numbers = AvailablePhoneNumberCountryList( self._version, account_sid=self._solution['sid'], ) return self._available_phone_numbers
Access the available_phone_numbers :returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryList :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryList
def load_stock_quantity(self, symbol: str) -> Decimal(0): """ retrieves stock quantity """ book = self.get_gc_book() collection = SecuritiesAggregate(book) sec = collection.get_aggregate_for_symbol(symbol) quantity = sec.get_quantity() return quantity
retrieves stock quantity
def _update_alpha(self, event=None): """Update display after a change in the alpha spinbox.""" a = self.alpha.get() hexa = self.hexa.get() hexa = hexa[:7] + ("%2.2x" % a).upper() self.hexa.delete(0, 'end') self.hexa.insert(0, hexa) self.alphabar.set(a) self._update_preview()
Update display after a change in the alpha spinbox.
def _enable_read_access(self): """! @brief Ensure flash is accessible by initing the algo for verify. Not all flash memories are always accessible. For instance, external QSPI. Initing the flash algo for the VERIFY operation is the canonical way to ensure that the flash is memory mapped and accessible. """ if not self.algo_inited_for_read: self.flash.init(self.flash.Operation.VERIFY) self.algo_inited_for_read = True
! @brief Ensure flash is accessible by initing the algo for verify. Not all flash memories are always accessible. For instance, external QSPI. Initing the flash algo for the VERIFY operation is the canonical way to ensure that the flash is memory mapped and accessible.
async def process_check_ins(self): """ finalize the check in phase |methcoro| Warning: |unstable| Note: |from_api| This should be invoked after a tournament's check-in window closes before the tournament is started. 1. Marks participants who have not checked in as inactive. 2. Moves inactive participants to bottom seeds (ordered by original seed). 3. Transitions the tournament state from 'checking_in' to 'checked_in' NOTE: Checked in participants on the waiting list will be promoted if slots become available. Raises: APIException """ params = { 'include_participants': 1, # forced to 1 since we need to update the Participant instances 'include_matches': 1 if AUTO_GET_MATCHES else 0 } res = await self.connection('POST', 'tournaments/{}/process_check_ins'.format(self._id), **params) self._refresh_from_json(res)
finalize the check in phase |methcoro| Warning: |unstable| Note: |from_api| This should be invoked after a tournament's check-in window closes before the tournament is started. 1. Marks participants who have not checked in as inactive. 2. Moves inactive participants to bottom seeds (ordered by original seed). 3. Transitions the tournament state from 'checking_in' to 'checked_in' NOTE: Checked in participants on the waiting list will be promoted if slots become available. Raises: APIException
def generate_row_keys(self): """ Method for generating key features at serving time or prediction time :param data: Pass in the data that is necessary for generating the keys Example : Feature : User warehouse searches and conversions Keys will be of the form 'user_id#warehouse_id#searches=23811676#3' Keys will be of the form 'user_id#warehouse_id#conversions=23811676#3' data Frame should have values for all the columns as feature_key in this case ['user_id','warehouse_id'] :return: """ keys = self.key columns = self.values if not self._data: self._data = self.get_data() for column in columns: key_prefix = self.cache_key_prefix() + "#" + column self._data['cache_key'] = self._data[keys].apply(lambda xdf: key_prefix + "=" + '#'.join(xdf.astype(str).values), axis=1) return list(self._data['cache_key'].values)
Method for generating key features at serving time or prediction time :param data: Pass in the data that is necessary for generating the keys Example : Feature : User warehouse searches and conversions Keys will be of the form 'user_id#warehouse_id#searches=23811676#3' Keys will be of the form 'user_id#warehouse_id#conversions=23811676#3' data Frame should have values for all the columns as feature_key in this case ['user_id','warehouse_id'] :return:
def launch_external_file(filename: str, raise_if_fails: bool = False) -> None: """ Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed. """ log.info("Launching external file: {!r}", filename) try: if sys.platform.startswith('linux'): cmdargs = ["xdg-open", filename] # log.debug("... command: {!r}", cmdargs) subprocess.call(cmdargs) else: # log.debug("... with os.startfile()") # noinspection PyUnresolvedReferences os.startfile(filename) except Exception as e: log.critical("Error launching {!r}: error was {}.\n\n{}", filename, str(e), traceback.format_exc()) if raise_if_fails: raise
Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed.
def add_postprocessor(postproc): """ Define a postprocessor to run after the function is executed, when running in console script mode. :param postproc: The callable, which will be passed the Namespace object generated by argparse and the return result of the function. The return result of the callable will be used as the final return result (or as the result fed into the next postprocessor). """ def decorator(func): func = ScriptAdaptor._wrap(func) func._add_postprocessor(postproc) return func return decorator
Define a postprocessor to run after the function is executed, when running in console script mode. :param postproc: The callable, which will be passed the Namespace object generated by argparse and the return result of the function. The return result of the callable will be used as the final return result (or as the result fed into the next postprocessor).
def matchiter(r, s, flags=0): """ Yields contiguous MatchObjects of r in s. Raises ValueError if r eventually doesn't match contiguously. """ if isinstance(r, basestring): r = re.compile(r, flags) i = 0 while s: m = r.match(s) g = m and m.group(0) if not m or not g: raise ValueError("{}: {!r}".format(i, s[:50])) i += len(g) s = s[len(g):] yield m
Yields contiguous MatchObjects of r in s. Raises ValueError if r eventually doesn't match contiguously.
def _parse_connection_string(connstr): """ MSSQL style connection string parser Returns normalized dictionary of connection string parameters """ res = {} for item in connstr.split(';'): item = item.strip() if not item: continue key, value = item.split('=', 1) key = key.strip().lower().replace(' ', '_') value = value.strip() res[key] = value return res
MSSQL style connection string parser Returns normalized dictionary of connection string parameters
def _check_dhcp_server(self, vboxnet): """ Check if the DHCP server associated with a vboxnet is enabled. :param vboxnet: vboxnet name :returns: boolean """ properties = yield from self._execute("list", ["dhcpservers"]) flag_dhcp_server_found = False for prop in properties.splitlines(): try: name, value = prop.split(':', 1) except ValueError: continue if name.strip() == "NetworkName" and value.strip().endswith(vboxnet): flag_dhcp_server_found = True if flag_dhcp_server_found and name.strip() == "Enabled": if value.strip() == "Yes": return True return False
Check if the DHCP server associated with a vboxnet is enabled. :param vboxnet: vboxnet name :returns: boolean
def _posix_split_name(self, name): """Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[:LENGTH_PREFIX + 1] while prefix and prefix[-1] != "/": prefix = prefix[:-1] name = name[len(prefix):] prefix = prefix[:-1] if not prefix or len(name) > LENGTH_NAME: raise ValueError("name is too long") return prefix, name
Split a name longer than 100 chars into a prefix and a name part.
def _track_stack_pointers(self): """ For each instruction, track its stack pointer offset and stack base pointer offset. :return: None """ regs = {self.project.arch.sp_offset} if hasattr(self.project.arch, 'bp_offset') and self.project.arch.bp_offset is not None: regs.add(self.project.arch.bp_offset) spt = self.project.analyses.StackPointerTracker(self.function, regs, track_memory=self._sp_tracker_track_memory) if spt.inconsistent_for(self.project.arch.sp_offset): l.warning("Inconsistency found during stack pointer tracking. Decompilation results might be incorrect.") return spt
For each instruction, track its stack pointer offset and stack base pointer offset. :return: None
def results(self): """Return the value and optionally derivative and second order derivative""" if self.deriv == 0: return self.v, if self.deriv == 1: return self.v, self.d if self.deriv == 2: return self.v, self.d, self.dd
Return the value and optionally derivative and second order derivative
def _compile_pattern(pat, ignore_case=True): """Translate and compile a glob pattern to a regular expression matcher.""" if isinstance(pat, bytes): pat_str = pat.decode('ISO-8859-1') res_str = _translate_glob(pat_str) res = res_str.encode('ISO-8859-1') else: res = _translate_glob(pat) flags = re.IGNORECASE if ignore_case else 0 return re.compile(res, flags=flags).match
Translate and compile a glob pattern to a regular expression matcher.
def environ_setting(name, default=None, required=True): """ Fetch setting from the environment. The bahavior of the setting if it is not in environment is as follows: 1. If it is required and the default is None, raise Exception 2. If it is requried and a default exists, return default 3. If it is not required and default is None, return None 4. If it is not required and default exists, return default """ if name not in os.environ and default is None: message = "The {0} ENVVAR is not set.".format(name) if required: raise ImproperlyConfigured(message) else: warnings.warn(ConfigurationMissing(message)) return os.environ.get(name, default)
Fetch setting from the environment. The bahavior of the setting if it is not in environment is as follows: 1. If it is required and the default is None, raise Exception 2. If it is requried and a default exists, return default 3. If it is not required and default is None, return None 4. If it is not required and default exists, return default
def from_class(cls, target_class): """Create a FunctionDescriptor from a class. Args: cls: Current class which is required argument for classmethod. target_class: the python class used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the class. """ module_name = target_class.__module__ class_name = target_class.__name__ return cls(module_name, "__init__", class_name)
Create a FunctionDescriptor from a class. Args: cls: Current class which is required argument for classmethod. target_class: the python class used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the class.
def get_edges_with_citations(self, citations: Iterable[Citation]) -> List[Edge]: """Get edges with one of the given citations.""" return self.session.query(Edge).join(Evidence).filter(Evidence.citation.in_(citations)).all()
Get edges with one of the given citations.
def Times(self, val): """ Returns a new point which is pointwise multiplied by val. """ return Point(self.x * val, self.y * val, self.z * val)
Returns a new point which is pointwise multiplied by val.
def stack_push(self, key, value): """Set a value in a task context stack """ task = Task.current_task() try: context = task._context_stack except AttributeError: task._context_stack = context = {} if key not in context: context[key] = [] context[key].append(value)
Set a value in a task context stack
def wsgi(self, environ, start_response): """Implements the mapper's WSGI interface.""" request = Request(environ) ctx = Context(request) try: try: response = self(request, ctx) ctx._run_callbacks('finalize', (request, response)) response = response.conditional_to(request) except HTTPException as e: response = e.response except Exception: self.handle_error(request, ctx) response = InternalServerError().response response.add_callback(lambda: ctx._run_callbacks('close')) return response(environ, start_response) finally: ctx._run_callbacks('teardown', log_errors=True)
Implements the mapper's WSGI interface.
def contributors(self, sr, limit=None): """Login required. GETs list of contributors to subreddit ``sr``. Returns :class:`things.ListBlob` object. **NOTE**: The :class:`things.Account` objects in the returned ListBlob *only* have ``id`` and ``name`` set. This is because that's all reddit returns. If you need full info on each contributor, you must individually GET them using :meth:`user` or :meth:`things.Account.about`. URL: ``http://www.reddit.com/r/<sr>/about/contributors/`` :param sr: name of subreddit """ userlist = self._limit_get('r', sr, 'about', 'contributors', limit=limit) return _process_userlist(userlist)
Login required. GETs list of contributors to subreddit ``sr``. Returns :class:`things.ListBlob` object. **NOTE**: The :class:`things.Account` objects in the returned ListBlob *only* have ``id`` and ``name`` set. This is because that's all reddit returns. If you need full info on each contributor, you must individually GET them using :meth:`user` or :meth:`things.Account.about`. URL: ``http://www.reddit.com/r/<sr>/about/contributors/`` :param sr: name of subreddit
def STRH(self, params): """ STRH Ra, [Rb, Rc] STRH Ra, [Rb, #imm6_2] Store Ra into memory as a half word Ra, Rb, and Rc must be low registers """ Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params) if self.is_immediate(Rc): self.check_arguments(low_registers=(Ra, Rb), imm5=(Rc,)) def STRH_func(): for i in range(2): self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:]) + i] = ((self.register[Ra] >> (8 * i)) & 0xFF) else: self.check_arguments(low_registers=(Ra, Rb, Rc)) def STRH_func(): for i in range(2): self.memory[self.register[Rb] + self.register[Rc] + i] = ((self.register[Ra] >> (8 * i)) & 0xFF) return STRH_func
STRH Ra, [Rb, Rc] STRH Ra, [Rb, #imm6_2] Store Ra into memory as a half word Ra, Rb, and Rc must be low registers
def set_values(self,x): """ Updates self.theta parameter. No returns values""" x = numpy.atleast_2d(x) x = x.real # ahem C_inv = self.__C_inv__ theta = numpy.dot( x, C_inv ) self.theta = theta return theta
Updates self.theta parameter. No returns values
def ADC(cpu, dest, src): """ Adds with carry. Adds the destination operand (first operand), the source operand (second operand), and the carry (CF) flag and stores the result in the destination operand. The state of the CF flag represents a carry from a previous addition. When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format. The ADC instruction does not distinguish between signed or unsigned operands. Instead, the processor evaluates the result for both data types and sets the OF and CF flags to indicate a carry in the signed or unsigned result, respectively. The SF flag indicates the sign of the signed result. The ADC instruction is usually executed as part of a multibyte or multiword addition in which an ADD instruction is followed by an ADC instruction:: DEST = DEST + SRC + CF; The OF, SF, ZF, AF, CF, and PF flags are set according to the result. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ cpu._ADD(dest, src, carry=True)
Adds with carry. Adds the destination operand (first operand), the source operand (second operand), and the carry (CF) flag and stores the result in the destination operand. The state of the CF flag represents a carry from a previous addition. When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format. The ADC instruction does not distinguish between signed or unsigned operands. Instead, the processor evaluates the result for both data types and sets the OF and CF flags to indicate a carry in the signed or unsigned result, respectively. The SF flag indicates the sign of the signed result. The ADC instruction is usually executed as part of a multibyte or multiword addition in which an ADD instruction is followed by an ADC instruction:: DEST = DEST + SRC + CF; The OF, SF, ZF, AF, CF, and PF flags are set according to the result. :param cpu: current CPU. :param dest: destination operand. :param src: source operand.
def show(self): """ Print with a pretty display the MapList object """ bytecode._Print("MAP_LIST SIZE", self.size) for i in self.map_item: if i.item != self: # FIXME this does not work for CodeItems! # as we do not have the method analysis here... i.show()
Print with a pretty display the MapList object
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if stats exist and plugin not disabled if not self.stats or self.is_disable(): return ret # Build the string message # Header msg = '{}'.format('MEM') ret.append(self.curse_add_line(msg, "TITLE")) msg = ' {:2}'.format(self.trend_msg(self.get_trend('percent'))) ret.append(self.curse_add_line(msg)) # Percent memory usage msg = '{:>7.1%}'.format(self.stats['percent'] / 100) ret.append(self.curse_add_line(msg)) # Active memory usage if 'active' in self.stats: msg = ' {:9}'.format('active:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='active', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['active'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='active', option='optional'))) # New line ret.append(self.curse_new_line()) # Total memory usage msg = '{:6}'.format('total:') ret.append(self.curse_add_line(msg)) msg = '{:>7}'.format(self.auto_unit(self.stats['total'])) ret.append(self.curse_add_line(msg)) # Inactive memory usage if 'inactive' in self.stats: msg = ' {:9}'.format('inactive:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='inactive', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['inactive'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='inactive', option='optional'))) # New line ret.append(self.curse_new_line()) # Used memory usage msg = '{:6}'.format('used:') ret.append(self.curse_add_line(msg)) msg = '{:>7}'.format(self.auto_unit(self.stats['used'])) ret.append(self.curse_add_line( msg, self.get_views(key='used', option='decoration'))) # Buffers memory usage if 'buffers' in self.stats: msg = ' {:9}'.format('buffers:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='buffers', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['buffers'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='buffers', option='optional'))) # New line ret.append(self.curse_new_line()) # Free memory usage msg = '{:6}'.format('free:') ret.append(self.curse_add_line(msg)) msg = '{:>7}'.format(self.auto_unit(self.stats['free'])) ret.append(self.curse_add_line(msg)) # Cached memory usage if 'cached' in self.stats: msg = ' {:9}'.format('cached:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='cached', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['cached'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='cached', option='optional'))) return ret
Return the dict to display in the curse interface.
def set_current_operation_progress(self, percent): """Internal method, not to be called externally. in percent of type int """ if not isinstance(percent, baseinteger): raise TypeError("percent can only be an instance of type baseinteger") self._call("setCurrentOperationProgress", in_p=[percent])
Internal method, not to be called externally. in percent of type int
def delete_connection(self, **kwargs): """Remove a single connection to a provider for the specified user.""" conn = self.find_connection(**kwargs) if not conn: return False self.delete(conn) return True
Remove a single connection to a provider for the specified user.
def p_genvarlist(self, p): 'genvarlist : genvarlist COMMA genvar' p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
genvarlist : genvarlist COMMA genvar
def _compute_total_chunks(self, chunk_size): # type: (Descriptor, int) -> int """Compute total number of chunks for entity :param Descriptor self: this :param int chunk_size: chunk size :rtype: int :return: num chunks """ try: return int(math.ceil(self._ase.size / chunk_size)) except ZeroDivisionError: return 0
Compute total number of chunks for entity :param Descriptor self: this :param int chunk_size: chunk size :rtype: int :return: num chunks
def call_moses_detokenizer(workspace_dir: str, input_fname: str, output_fname: str, lang_code: Optional[str] = None): """ Call Moses detokenizer. :param workspace_dir: Workspace third-party directory where Moses tokenizer is checked out. :param input_fname: Path of tokenized input file, plain text or gzipped. :param output_fname: Path of tokenized output file, plain text. :param lang_code: Language code for rules and non-breaking prefixes. Can be None if unknown (using pre-tokenized data), which will cause the tokenizer to default to English. """ detokenizer_fname = os.path.join(workspace_dir, DIR_THIRD_PARTY, MOSES_DEST, "scripts", "tokenizer", "detokenizer.perl") with bin_open(input_fname) as inp, open(output_fname, "wb") as out, open(os.devnull, "wb") as devnull: command = ["perl", detokenizer_fname] if lang_code: command.append("-l") command.append(lang_code) detokenizer = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=devnull) detokenizer_thread = threading.Thread(target=copy_out, args=(detokenizer.stdout, out)) detokenizer_thread.start() for line in inp: detokenizer.stdin.write(line) detokenizer.stdin.close() detokenizer_thread.join() detokenizer.wait()
Call Moses detokenizer. :param workspace_dir: Workspace third-party directory where Moses tokenizer is checked out. :param input_fname: Path of tokenized input file, plain text or gzipped. :param output_fname: Path of tokenized output file, plain text. :param lang_code: Language code for rules and non-breaking prefixes. Can be None if unknown (using pre-tokenized data), which will cause the tokenizer to default to English.
def convert_relational(relational): """Convert all inequalities to >=0 form. """ rel = relational.rel_op if rel in ['==', '>=', '>']: return relational.lhs-relational.rhs elif rel in ['<=', '<']: return relational.rhs-relational.lhs else: raise Exception("The relational operation ' + rel + ' is not " "implemented!")
Convert all inequalities to >=0 form.
def window_iterator(data, width): """ Instead of iterating element by element, get a number of elements at each iteration step. :param data: data to iterate on :param width: maximum number of elements to get in each iteration step :return: """ start = 0 while start < len(data): yield data[start:start+width] start += width
Instead of iterating element by element, get a number of elements at each iteration step. :param data: data to iterate on :param width: maximum number of elements to get in each iteration step :return:
def _paged_api_call(self, func, kwargs, item_type='photo'): """ Takes a Flickr API function object and dict of keyword args and calls the API call repeatedly with an incrementing page value until all contents are exhausted. Flickr seems to limit to about 500 items. """ page = 1 while True: LOG.info("Fetching page %s" % page) kwargs['page'] = page rsp = self._load_rsp(func(**kwargs)) if rsp["stat"] == "ok": plural = item_type + 's' if plural in rsp: items = rsp[plural] if int(items["page"]) < page: LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"])) break for i in items[item_type]: yield self._prep(i) else: yield rsp page += 1 else: yield [rsp] break
Takes a Flickr API function object and dict of keyword args and calls the API call repeatedly with an incrementing page value until all contents are exhausted. Flickr seems to limit to about 500 items.
def get_agile_board(self, board_id): """ Get agile board info by id :param board_id: :return: """ url = 'rest/agile/1.0/board/{}'.format(str(board_id)) return self.get(url)
Get agile board info by id :param board_id: :return:
def add_boundary_regions(regions=None, faces=['front', 'back', 'left', 'right', 'top', 'bottom']): r""" Given an image partitioned into regions, pads specified faces with new regions Parameters ---------- regions : ND-array An image of the pore space partitioned into regions and labeled faces : list of strings The faces of ``regions`` which should have boundaries added. Options are: *'right'* - Adds boundaries to the x=0 face (``im[0, :, :]``) *'left'* - Adds boundaries to the x=X face (``im[-1, :, :]``) *'front'* - Adds boundaries to the y=0 face (``im[:, ), :]``) *'back'* - Adds boundaries to the x=0 face (``im[:, -1, :]``) *'bottom'* - Adds boundaries to the x=0 face (``im[:, :, 0]``) *'top'* - Adds boundaries to the x=0 face (``im[:, :, -1]``) The default is all faces. Returns ------- image : ND-array A copy of ``regions`` with the specified boundaries added, so will be slightly larger in each direction where boundaries were added. """ # ------------------------------------------------------------------------- # Edge pad segmentation and distance transform if faces is not None: regions = sp.pad(regions, 1, 'edge') # --------------------------------------------------------------------- if regions.ndim == 3: # Remove boundary nodes interconnection regions[:, :, 0] = regions[:, :, 0] + regions.max() regions[:, :, -1] = regions[:, :, -1] + regions.max() regions[0, :, :] = regions[0, :, :] + regions.max() regions[-1, :, :] = regions[-1, :, :] + regions.max() regions[:, 0, :] = regions[:, 0, :] + regions.max() regions[:, -1, :] = regions[:, -1, :] + regions.max() regions[:, :, 0] = (~find_boundaries(regions[:, :, 0], mode='outer')) * regions[:, :, 0] regions[:, :, -1] = (~find_boundaries(regions[:, :, -1], mode='outer')) * regions[:, :, -1] regions[0, :, :] = (~find_boundaries(regions[0, :, :], mode='outer')) * regions[0, :, :] regions[-1, :, :] = (~find_boundaries(regions[-1, :, :], mode='outer')) * regions[-1, :, :] regions[:, 0, :] = (~find_boundaries(regions[:, 0, :], mode='outer')) * regions[:, 0, :] regions[:, -1, :] = (~find_boundaries(regions[:, -1, :], mode='outer')) * regions[:, -1, :] # ----------------------------------------------------------------- regions = sp.pad(regions, 2, 'edge') # Remove unselected faces if 'front' not in faces: regions = regions[:, 3:, :] # y if 'back' not in faces: regions = regions[:, :-3, :] if 'left' not in faces: regions = regions[3:, :, :] # x if 'right' not in faces: regions = regions[:-3, :, :] if 'bottom' not in faces: regions = regions[:, :, 3:] # z if 'top' not in faces: regions = regions[:, :, :-3] elif regions.ndim == 2: # Remove boundary nodes interconnection regions[0, :] = regions[0, :] + regions.max() regions[-1, :] = regions[-1, :] + regions.max() regions[:, 0] = regions[:, 0] + regions.max() regions[:, -1] = regions[:, -1] + regions.max() regions[0, :] = (~find_boundaries(regions[0, :], mode='outer')) * regions[0, :] regions[-1, :] = (~find_boundaries(regions[-1, :], mode='outer')) * regions[-1, :] regions[:, 0] = (~find_boundaries(regions[:, 0], mode='outer')) * regions[:, 0] regions[:, -1] = (~find_boundaries(regions[:, -1], mode='outer')) * regions[:, -1] # ----------------------------------------------------------------- regions = sp.pad(regions, 2, 'edge') # Remove unselected faces if 'left' not in faces: regions = regions[3:, :] # x if 'right' not in faces: regions = regions[:-3, :] if 'front' not in faces and 'bottom' not in faces: regions = regions[:, 3:] # y if 'back' not in faces and 'top' not in faces: regions = regions[:, :-3] else: print('add_boundary_regions works only on 2D and 3D images') # --------------------------------------------------------------------- # Make labels contiguous regions = make_contiguous(regions) else: regions = regions return regions
r""" Given an image partitioned into regions, pads specified faces with new regions Parameters ---------- regions : ND-array An image of the pore space partitioned into regions and labeled faces : list of strings The faces of ``regions`` which should have boundaries added. Options are: *'right'* - Adds boundaries to the x=0 face (``im[0, :, :]``) *'left'* - Adds boundaries to the x=X face (``im[-1, :, :]``) *'front'* - Adds boundaries to the y=0 face (``im[:, ), :]``) *'back'* - Adds boundaries to the x=0 face (``im[:, -1, :]``) *'bottom'* - Adds boundaries to the x=0 face (``im[:, :, 0]``) *'top'* - Adds boundaries to the x=0 face (``im[:, :, -1]``) The default is all faces. Returns ------- image : ND-array A copy of ``regions`` with the specified boundaries added, so will be slightly larger in each direction where boundaries were added.
def clone(self, instance): ''' Create a shallow clone of an *instance*. **Note:** the clone and the original instance **does not** have to be part of the same metaclass. ''' metaclass = get_metaclass(instance) metaclass = self.find_metaclass(metaclass.kind) return metaclass.clone(instance)
Create a shallow clone of an *instance*. **Note:** the clone and the original instance **does not** have to be part of the same metaclass.
def channels(self): """List[:class:`abc.GuildChannel`]: Returns the channels that are under this category. These are sorted by the official Discord UI, which places voice channels below the text channels. """ def comparator(channel): return (not isinstance(channel, TextChannel), channel.position) ret = [c for c in self.guild.channels if c.category_id == self.id] ret.sort(key=comparator) return ret
List[:class:`abc.GuildChannel`]: Returns the channels that are under this category. These are sorted by the official Discord UI, which places voice channels below the text channels.
def parse_dossier_data(data, ep): """Parse data from parltarck dossier export (1 dossier) Update dossier if it existed before, this function goal is to import and update a dossier, not to import all parltrack data """ changed = False doc_changed = False ref = data['procedure']['reference'] logger.debug('Processing dossier %s', ref) with transaction.atomic(): try: dossier = Dossier.objects.get(reference=ref) except Dossier.DoesNotExist: dossier = Dossier(reference=ref) logger.debug('Dossier did not exist') changed = True if dossier.title != data['procedure']['title']: logger.debug('Title changed from "%s" to "%s"', dossier.title, data['procedure']['title']) dossier.title = data['procedure']['title'] changed = True if changed: logger.info('Updated dossier %s', ref) dossier.save() source = data['meta']['source'].replace('&l=en', '') try: doc = Document.objects.get(dossier=dossier, kind='procedure-file') except Document.DoesNotExist: doc = Document(dossier=dossier, kind='procedure-file', chamber=ep) logger.debug('Document for dossier %s did not exist', ref) doc_changed = True if doc.link != source: logger.debug('Link changed from %s to %s', doc.link, source) doc.link = source doc_changed = True if doc_changed: logger.info('Updated document %s for dossier %s', doc.link, ref) doc.save() if 'votes' in data.keys() and 'epref' in data['votes']: command = Command() command.init_cache() command.parse_vote_data(data['votes'])
Parse data from parltarck dossier export (1 dossier) Update dossier if it existed before, this function goal is to import and update a dossier, not to import all parltrack data
def _compute_a22_factor(self, imt): """ Compute and return the a22 factor, equation 20, page 80. """ if imt.name == 'PGV': return 0.0 period = imt.period if period < 2.0: return 0.0 else: return 0.0625 * (period - 2.0)
Compute and return the a22 factor, equation 20, page 80.
def precompile_python_code(context: Context): """ Pre-compiles python modules """ from compileall import compile_dir kwargs = {} if context.verbosity < 2: kwargs['quiet'] = True compile_dir(context.app.django_app_name, **kwargs)
Pre-compiles python modules
def try_greyscale(pixels, alpha=False, dirty_alpha=True): """ Check if flatboxed RGB `pixels` could be converted to greyscale If could - return iterator with greyscale pixels, otherwise return `False` constant """ planes = 3 + bool(alpha) res = list() apix = list() for row in pixels: green = row[1::planes] if alpha: apix.append(row[4:planes]) if (green != row[0::planes] or green != row[2::planes]): return False else: res.append(green) if alpha: return MergedPlanes(res, 1, apix, 1) else: return res
Check if flatboxed RGB `pixels` could be converted to greyscale If could - return iterator with greyscale pixels, otherwise return `False` constant
def get_gan_loss(self, true_frames, gen_frames, name): """Get the discriminator + generator loss at every step. This performs an 1:1 update of the discriminator and generator at every step. Args: true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C) Assumed to be ground truth. gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C) Assumed to be fake. name: discriminator scope. Returns: loss: 0-D Tensor, with d_loss + g_loss """ # D - STEP with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE): gan_d_loss, _, fake_logits_stop = self.d_step( true_frames, gen_frames) # G - STEP with tf.variable_scope("%s_discriminator" % name, reuse=True): gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step( gen_frames, fake_logits_stop) gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss) if self.hparams.gan_optimization == "joint": gan_loss = gan_g_loss + gan_d_loss else: curr_step = self.get_iteration_num() gan_loss = tf.cond( tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss, lambda: gan_d_loss) return gan_loss
Get the discriminator + generator loss at every step. This performs an 1:1 update of the discriminator and generator at every step. Args: true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C) Assumed to be ground truth. gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C) Assumed to be fake. name: discriminator scope. Returns: loss: 0-D Tensor, with d_loss + g_loss
def read_lock(self): """Context manager that grants a read lock. Will wait until no active or pending writers. Raises a ``RuntimeError`` if a pending writer tries to acquire a read lock. """ me = self._current_thread() if me in self._pending_writers: raise RuntimeError("Writer %s can not acquire a read lock" " while waiting for the write lock" % me) with self._cond: while True: # No active writer, or we are the writer; # we are good to become a reader. if self._writer is None or self._writer == me: try: self._readers[me] = self._readers[me] + 1 except KeyError: self._readers[me] = 1 break # An active writer; guess we have to wait. self._cond.wait() try: yield self finally: # I am no longer a reader, remove *one* occurrence of myself. # If the current thread acquired two read locks, then it will # still have to remove that other read lock; this allows for # basic reentrancy to be possible. with self._cond: try: me_instances = self._readers[me] if me_instances > 1: self._readers[me] = me_instances - 1 else: self._readers.pop(me) except KeyError: pass self._cond.notify_all()
Context manager that grants a read lock. Will wait until no active or pending writers. Raises a ``RuntimeError`` if a pending writer tries to acquire a read lock.
def locate_point(nodes, x_val, y_val): r"""Find the parameter corresponding to a point on a curve. .. note:: This assumes that the curve :math:`B(s, t)` defined by ``nodes`` lives in :math:`\mathbf{R}^2`. Args: nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve. x_val (float): The :math:`x`-coordinate of the point. y_val (float): The :math:`y`-coordinate of the point. Returns: Optional[float]: The parameter on the curve (if it exists). """ # First, reduce to the true degree of x(s) and y(s). zero1 = _curve_helpers.full_reduce(nodes[[0], :]) - x_val zero2 = _curve_helpers.full_reduce(nodes[[1], :]) - y_val # Make sure we have the lowest degree in front, to make the polynomial # solve have the fewest number of roots. if zero1.shape[1] > zero2.shape[1]: zero1, zero2 = zero2, zero1 # If the "smallest" is a constant, we can't find any roots from it. if zero1.shape[1] == 1: # NOTE: We assume that callers won't pass ``nodes`` that are # degree 0, so if ``zero1`` is a constant, ``zero2`` won't be. zero1, zero2 = zero2, zero1 power_basis1 = poly_to_power_basis(zero1[0, :]) all_roots = roots_in_unit_interval(power_basis1) if all_roots.size == 0: return None # NOTE: We normalize ``power_basis2`` because we want to check for # "zero" values, i.e. f2(s) == 0. power_basis2 = normalize_polynomial(poly_to_power_basis(zero2[0, :])) near_zero = np.abs(polynomial.polyval(all_roots, power_basis2)) index = np.argmin(near_zero) if near_zero[index] < _ZERO_THRESHOLD: return all_roots[index] return None
r"""Find the parameter corresponding to a point on a curve. .. note:: This assumes that the curve :math:`B(s, t)` defined by ``nodes`` lives in :math:`\mathbf{R}^2`. Args: nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve. x_val (float): The :math:`x`-coordinate of the point. y_val (float): The :math:`y`-coordinate of the point. Returns: Optional[float]: The parameter on the curve (if it exists).
def _get_version_for_class_from_state(state, klass): """ retrieves the version of the current klass from the state mapping from old locations to new ones. """ # klass may have been renamed, so we have to look this up in the class rename registry. names = [_importable_name(klass)] # lookup old names, handled by current klass. from .util import class_rename_registry names.extend(class_rename_registry.old_handled_by(klass)) for n in names: try: return state['class_tree_versions'][n] except KeyError: continue # if we did not find a suitable version number return infinity. if _debug: logger.debug('unable to obtain a __serialize_version for class %s', klass) return float('inf')
retrieves the version of the current klass from the state mapping from old locations to new ones.
def get_device_status(host, services=None, zconf=None): """ :param host: Hostname or ip to fetch status from :type host: str :return: The device status as a named tuple. :rtype: pychromecast.dial.DeviceStatus or None """ try: status = _get_status( host, services, zconf, "/setup/eureka_info?options=detail") friendly_name = status.get('name', "Unknown Chromecast") model_name = "Unknown model name" manufacturer = "Unknown manufacturer" if 'detail' in status: model_name = status['detail'].get('model_name', model_name) manufacturer = status['detail'].get('manufacturer', manufacturer) udn = status.get('ssdp_udn', None) cast_type = CAST_TYPES.get(model_name.lower(), CAST_TYPE_CHROMECAST) uuid = None if udn: uuid = UUID(udn.replace('-', '')) return DeviceStatus(friendly_name, model_name, manufacturer, uuid, cast_type) except (requests.exceptions.RequestException, OSError, ValueError): return None
:param host: Hostname or ip to fetch status from :type host: str :return: The device status as a named tuple. :rtype: pychromecast.dial.DeviceStatus or None
def _create_app(self): """ Method for creating a new Application Template. USAGE: cloud-harness create <dir_name> [--destination=<path>] """ template_path = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), self.TEMPLATE_FOLDER, self.TEMPLATE_FILENAME ) new_dir = self._arguments['<dir_name>'] # Make new application directory override_destination = self._arguments.get('--destination', None) if override_destination is not None: if override_destination == '': raise ValueError('Destination path is empty') # Check if the new destination is abs and exists. if os.path.isabs(override_destination) and os.path.isdir(override_destination): new_dir = os.path.join(override_destination, new_dir) else: # Create a path from the cwd, then check if it is valid and exists. override_path = os.path.join(os.getcwd(), override_destination) if not os.path.isabs(override_path) or not os.path.isdir(override_path): raise ValueError('New path parameter %s is not a directory' % override_destination) new_dir = os.path.join(override_path, new_dir) else: if os.path.isabs(new_dir) or os.path.sep in new_dir: raise ValueError("Directory name is invalid") # No override, put the folder in the cwd. new_dir = os.path.join(os.getcwd(), new_dir) os.makedirs(new_dir) new_file_path = os.path.join(new_dir, self.DEFAULT_NEW_APP_FILENAME) # Copy the template the new application location. shutil.copyfile(template_path, new_file_path) printer('New Application created at %s' % new_file_path)
Method for creating a new Application Template. USAGE: cloud-harness create <dir_name> [--destination=<path>]
def getValue(self): ''' Returns str(option_string * DropDown Value) e.g. -vvvvv ''' dropdown_value = self.widget.GetValue() if not str(dropdown_value).isdigit(): return '' arg = str(self.option_string).replace('-', '') repeated_args = arg * int(dropdown_value) return '-' + repeated_args
Returns str(option_string * DropDown Value) e.g. -vvvvv
def repr_imgs(imgs): """Printing of img or imgs""" if isinstance(imgs, string_types): return imgs if isinstance(imgs, collections.Iterable): return '[{}]'.format(', '.join(repr_imgs(img) for img in imgs)) # try get_filename try: filename = imgs.get_filename() if filename is not None: img_str = "{}('{}')".format(imgs.__class__.__name__, filename) else: img_str = "{}(shape={}, affine={})".format(imgs.__class__.__name__, repr(get_shape(imgs)), repr(imgs.get_affine())) except Exception as exc: log.error('Error reading attributes from img.get_filename()') return repr(imgs) else: return img_str
Printing of img or imgs
def _add_games_to_schedule(self, schedule): """ Add game information to list of games. Create a Game instance for the given game in the schedule and add it to the list of games the team has or will play during the season. Parameters ---------- schedule : PyQuery object A PyQuery object pertaining to a team's schedule table. year : string The requested year to pull stats from. """ for item in schedule: if 'class="thead"' in str(item) or \ 'class="over_header thead"' in str(item): continue # pragma: no cover game = Game(item) self._games.append(game)
Add game information to list of games. Create a Game instance for the given game in the schedule and add it to the list of games the team has or will play during the season. Parameters ---------- schedule : PyQuery object A PyQuery object pertaining to a team's schedule table. year : string The requested year to pull stats from.
def main(): ''' Simple examples ''' args = parse_arguments() if args.askpass: password = getpass.getpass("Password: ") else: password = None if args.asksudopass: sudo = True sudo_pass = getpass.getpass("Sudo password[default ssh password]: ") if len(sudo_pass) == 0: sudo_pass = password sudo_user = 'root' else: sudo = False sudo_pass = None sudo_user = None if not args.username: username = getpass.getuser() else: username = args.username host_list = args.hosts os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False" execute_ping(host_list, username, password, sudo=sudo, sudo_user=sudo_user, sudo_pass=sudo_pass)
Simple examples
def where(self, fieldname, value, negate=False): """ Returns a new DataTable with rows only where the value at `fieldname` == `value`. """ if negate: return self.mask([elem != value for elem in self[fieldname]]) else: return self.mask([elem == value for elem in self[fieldname]])
Returns a new DataTable with rows only where the value at `fieldname` == `value`.
def modify_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, SnapshottingClusterId=None, AutomaticFailoverEnabled=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, CacheParameterGroupName=None, NotificationTopicStatus=None, ApplyImmediately=None, EngineVersion=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, CacheNodeType=None, NodeGroupId=None): """ Modifies the settings for a replication group. See also: AWS API Documentation :example: response = client.modify_replication_group( ReplicationGroupId='string', ReplicationGroupDescription='string', PrimaryClusterId='string', SnapshottingClusterId='string', AutomaticFailoverEnabled=True|False, CacheSecurityGroupNames=[ 'string', ], SecurityGroupIds=[ 'string', ], PreferredMaintenanceWindow='string', NotificationTopicArn='string', CacheParameterGroupName='string', NotificationTopicStatus='string', ApplyImmediately=True|False, EngineVersion='string', AutoMinorVersionUpgrade=True|False, SnapshotRetentionLimit=123, SnapshotWindow='string', CacheNodeType='string', NodeGroupId='string' ) :type ReplicationGroupId: string :param ReplicationGroupId: [REQUIRED] The identifier of the replication group to modify. :type ReplicationGroupDescription: string :param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters. :type PrimaryClusterId: string :param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas. :type SnapshottingClusterId: string :param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups. :type AutomaticFailoverEnabled: boolean :param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | false Note ElastiCache Multi-AZ replication groups are not supported on: Redis versions earlier than 2.8.6. Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types. :type CacheSecurityGroupNames: list :param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible. This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default . (string) -- :type SecurityGroupIds: list :param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group. This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC). (string) -- :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 :type NotificationTopicArn: string :param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent. Note The Amazon SNS topic owner must be same as the replication group owner. :type CacheParameterGroupName: string :param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request. :type NotificationTopicStatus: string :param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active . Valid values: active | inactive :type ApplyImmediately: boolean :param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first. Valid values: true | false Default: false :type EngineVersion: string :param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: This parameter is currently disabled. :type SnapshotRetentionLimit: integer :param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. :type SnapshotWindow: string :param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId . Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. :type CacheNodeType: string :param CacheNodeType: A valid cache node type that you want to scale this replication group to. :type NodeGroupId: string :param NodeGroupId: The name of the Node Group (called shard in the console). :rtype: dict :return: { 'ReplicationGroup': { 'ReplicationGroupId': 'string', 'Description': 'string', 'Status': 'string', 'PendingModifiedValues': { 'PrimaryClusterId': 'string', 'AutomaticFailoverStatus': 'enabled'|'disabled' }, 'MemberClusters': [ 'string', ], 'NodeGroups': [ { 'NodeGroupId': 'string', 'Status': 'string', 'PrimaryEndpoint': { 'Address': 'string', 'Port': 123 }, 'Slots': 'string', 'NodeGroupMembers': [ { 'CacheClusterId': 'string', 'CacheNodeId': 'string', 'ReadEndpoint': { 'Address': 'string', 'Port': 123 }, 'PreferredAvailabilityZone': 'string', 'CurrentRole': 'string' }, ] }, ], 'SnapshottingClusterId': 'string', 'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling', 'ConfigurationEndpoint': { 'Address': 'string', 'Port': 123 }, 'SnapshotRetentionLimit': 123, 'SnapshotWindow': 'string', 'ClusterEnabled': True|False, 'CacheNodeType': 'string' } } :returns: Redis versions earlier than 2.8.6. Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types. """ pass
Modifies the settings for a replication group. See also: AWS API Documentation :example: response = client.modify_replication_group( ReplicationGroupId='string', ReplicationGroupDescription='string', PrimaryClusterId='string', SnapshottingClusterId='string', AutomaticFailoverEnabled=True|False, CacheSecurityGroupNames=[ 'string', ], SecurityGroupIds=[ 'string', ], PreferredMaintenanceWindow='string', NotificationTopicArn='string', CacheParameterGroupName='string', NotificationTopicStatus='string', ApplyImmediately=True|False, EngineVersion='string', AutoMinorVersionUpgrade=True|False, SnapshotRetentionLimit=123, SnapshotWindow='string', CacheNodeType='string', NodeGroupId='string' ) :type ReplicationGroupId: string :param ReplicationGroupId: [REQUIRED] The identifier of the replication group to modify. :type ReplicationGroupDescription: string :param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters. :type PrimaryClusterId: string :param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas. :type SnapshottingClusterId: string :param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups. :type AutomaticFailoverEnabled: boolean :param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | false Note ElastiCache Multi-AZ replication groups are not supported on: Redis versions earlier than 2.8.6. Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types. :type CacheSecurityGroupNames: list :param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible. This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default . (string) -- :type SecurityGroupIds: list :param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group. This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC). (string) -- :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 :type NotificationTopicArn: string :param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent. Note The Amazon SNS topic owner must be same as the replication group owner. :type CacheParameterGroupName: string :param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request. :type NotificationTopicStatus: string :param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active . Valid values: active | inactive :type ApplyImmediately: boolean :param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first. Valid values: true | false Default: false :type EngineVersion: string :param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: This parameter is currently disabled. :type SnapshotRetentionLimit: integer :param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. :type SnapshotWindow: string :param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId . Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. :type CacheNodeType: string :param CacheNodeType: A valid cache node type that you want to scale this replication group to. :type NodeGroupId: string :param NodeGroupId: The name of the Node Group (called shard in the console). :rtype: dict :return: { 'ReplicationGroup': { 'ReplicationGroupId': 'string', 'Description': 'string', 'Status': 'string', 'PendingModifiedValues': { 'PrimaryClusterId': 'string', 'AutomaticFailoverStatus': 'enabled'|'disabled' }, 'MemberClusters': [ 'string', ], 'NodeGroups': [ { 'NodeGroupId': 'string', 'Status': 'string', 'PrimaryEndpoint': { 'Address': 'string', 'Port': 123 }, 'Slots': 'string', 'NodeGroupMembers': [ { 'CacheClusterId': 'string', 'CacheNodeId': 'string', 'ReadEndpoint': { 'Address': 'string', 'Port': 123 }, 'PreferredAvailabilityZone': 'string', 'CurrentRole': 'string' }, ] }, ], 'SnapshottingClusterId': 'string', 'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling', 'ConfigurationEndpoint': { 'Address': 'string', 'Port': 123 }, 'SnapshotRetentionLimit': 123, 'SnapshotWindow': 'string', 'ClusterEnabled': True|False, 'CacheNodeType': 'string' } } :returns: Redis versions earlier than 2.8.6. Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
def parse(self, gff_file, strict=False): """Parse the gff file into the following data structures: * lines(list of line_data(dict)) - line_index(int): the index in lines - line_raw(str) - line_type(str in ['feature', 'directive', 'comment', 'blank', 'unknown']) - line_errors(list of str): a list of error messages - line_status(str in ['normal', 'modified', 'removed']) - parents(list of feature(list of line_data(dict))): may have multiple parents - children(list of line_data(dict)) - extra fields depending on line_type * directive - directive(str in ['##gff-version', '##sequence-region', '##feature-ontology', '##attribute-ontology', '##source-ontology', '##species', '##genome-build', '###', '##FASTA']) - extra fields depending on directive * feature - seqid(str): must escape any characters not in the set [a-zA-Z0-9.:^*$@!+_?-|] using RFC 3986 Percent-Encoding - source(str) - type(str in so_types) - start(int) - end(int) - score(float) - strand(str in ['+', '-', '.', '?']) - phase(int in [0, 1, 2]) - attributes(dict of tag(str) to value) - ID(str) - Name(str) - Alias(list of str): multi value - Parent(list of str): multi value - Target(dict) - target_id(str) - start(int) - end(int) - strand(str in ['+', '-', '']) - Gap(str): CIGAR format - Derives_from(str) - Note(list of str): multi value - Dbxref(list of str): multi value - Ontology_term(list of str): multi value - Is_circular(str in ['true']) * fasta_dict(dict of id(str) to sequence_item(dict)) - id(str) - header(str) - seq(str) - line_length(int) * features(dict of feature_id(str in line_data['attributes']['ID']) to feature(list of line_data(dict))) A feature is a list of line_data(dict), since all lines that share an ID collectively represent a single feature. During serialization, line_data(dict) references should be converted into line_index(int) :param gff_file: a string path or file object :param strict: when true, throw exception on syntax and format errors. when false, use best effort to finish parsing while logging errors """ valid_strand = set(('+', '-', '.', '?')) valid_phase = set((0, 1, 2)) multi_value_attributes = set(('Parent', 'Alias', 'Note', 'Dbxref', 'Ontology_term')) valid_attribute_target_strand = set(('+', '-', '')) reserved_attributes = set(('ID', 'Name', 'Alias', 'Parent', 'Target', 'Gap', 'Derives_from', 'Note', 'Dbxref', 'Ontology_term', 'Is_circular')) # illegal character check # Literal use of tab, newline, carriage return, the percent (%) sign, and control characters must be encoded using RFC 3986 Percent-Encoding; no other characters may be encoded. # control characters: \x00-\x1f\x7f this includes tab(\x09), newline(\x0a), carriage return(\x0d) # seqid may contain any characters, but must escape any characters not in the set [a-zA-Z0-9.:^*$@!+_?-|] # URL escaping rules are used for tags or values containing the following characters: ",=;". #>>> timeit("unescaped_seqid('Un.7589')", "import re; unescaped_seqid = re.compile(r'[^a-zA-Z0-9.:^*$@!+_?|%-]|%(?![0-9a-fA-F]{2})').search") #0.4128372745785036 #>>> timeit("unescaped_seqid2('Un.7589')", "import re; unescaped_seqid2 = re.compile(r'^([a-zA-Z0-9.:^*$@!+_?|-]|%[0-9a-fA-F]{2})+$').search") #0.9012313532265175 unescaped_seqid = re.compile(r'[^a-zA-Z0-9.:^*$@!+_?|%-]|%(?![0-9a-fA-F]{2})').search unescaped_field = re.compile(r'[\x00-\x1f\x7f]|%(?![0-9a-fA-F]{2})').search gff_fp = gff_file if isinstance(gff_file, str): gff_fp = open(gff_file, 'r') lines = [] current_line_num = 1 # line numbers start at 1 features = defaultdict(list) # key = the unresolved id, value = a list of line_data(dict) unresolved_parents = defaultdict(list) for line_raw in gff_fp: line_data = { 'line_index': current_line_num - 1, 'line_raw': line_raw, 'line_status': 'normal', 'parents': [], 'children': [], 'line_type': '', 'directive': '', 'line_errors': [], 'type': '', } line_strip = line_raw.strip() if line_strip != line_raw[:len(line_strip)]: self.add_line_error(line_data, {'message': 'White chars not allowed at the start of a line', 'error_type': 'FORMAT', 'location': ''}) if current_line_num == 1 and not line_strip.startswith('##gff-version'): self.add_line_error(line_data, {'message': '"##gff-version" missing from the first line', 'error_type': 'FORMAT', 'location': ''}) if len(line_strip) == 0: line_data['line_type'] = 'blank' continue if line_strip.startswith('##'): line_data['line_type'] = 'directive' if line_strip.startswith('##sequence-region'): # ##sequence-region seqid start end # This element is optional, but strongly encouraged because it allows parsers to perform bounds checking on features. # only one ##sequence-region directive may be given for any given seqid # all features on that landmark feature (having that seqid) must be contained within the range defined by that ##sequence-region diretive. An exception to this rule is allowed when a landmark feature is marked with the Is_circular attribute. line_data['directive'] = '##sequence-region' tokens = list(line_strip.split()[1:]) if len(tokens) != 3: self.add_line_error(line_data, {'message': 'Expecting 3 fields, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) if len(tokens) > 0: line_data['seqid'] = tokens[0] # check for duplicate ##sequence-region seqid if [True for d in lines if ('directive' in d and d['directive'] == '##sequence-region' and 'seqid' in d and d['seqid'] == line_data['seqid'])]: self.add_line_error(line_data, {'message': '##sequence-region seqid: "%s" may only appear once' % line_data['seqid'], 'error_type': 'FORMAT', 'location': ''}) try: all_good = True try: line_data['start'] = int(tokens[1]) if line_data['start'] < 1: self.add_line_error(line_data, {'message': 'Start is not a valid 1-based integer coordinate: "%s"' % tokens[1], 'error_type': 'FORMAT', 'location': ''}) except ValueError: all_good = False self.add_line_error(line_data, {'message': 'Start is not a valid integer: "%s"' % tokens[1], 'error_type': 'FORMAT', 'location': ''}) line_data['start'] = tokens[1] try: line_data['end'] = int(tokens[2]) if line_data['end'] < 1: self.add_line_error(line_data, {'message': 'End is not a valid 1-based integer coordinate: "%s"' % tokens[2], 'error_type': 'FORMAT', 'location': ''}) except ValueError: all_good = False self.add_line_error(line_data, {'message': 'End is not a valid integer: "%s"' % tokens[2], 'error_type': 'FORMAT', 'location': ''}) line_data['start'] = tokens[2] # if all_good then both start and end are int, so we can check if start is not less than or equal to end if all_good and line_data['start'] > line_data['end']: self.add_line_error(line_data, {'message': 'Start is not less than or equal to end', 'error_type': 'FORMAT', 'location': ''}) except IndexError: pass elif line_strip.startswith('##gff-version'): # The GFF version, always 3 in this specification must be present, must be the topmost line of the file and may only appear once in the file. line_data['directive'] = '##gff-version' # check if it appeared before if [True for d in lines if ('directive' in d and d['directive'] == '##gff-version')]: self.add_line_error(line_data, {'message': '##gff-version missing from the first line', 'error_type': 'FORMAT', 'location': ''}) tokens = list(line_strip.split()[1:]) if len(tokens) != 1: self.add_line_error(line_data, {'message': 'Expecting 1 field, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) if len(tokens) > 0: try: line_data['version'] = int(tokens[0]) if line_data['version'] != 3: self.add_line_error(line_data, {'message': 'Version is not "3": "%s"' % tokens[0], 'error_type': 'FORMAT', 'location': ''}) except ValueError: self.add_line_error(line_data, {'message': 'Version is not a valid integer: "%s"' % tokens[0], 'error_type': 'FORMAT', 'location': ''}) line_data['version'] = tokens[0] elif line_strip.startswith('###'): # This directive (three # signs in a row) indicates that all forward references to feature IDs that have been seen to this point have been resolved. line_data['directive'] = '###' elif line_strip.startswith('##FASTA'): # This notation indicates that the annotation portion of the file is at an end and that the # remainder of the file contains one or more sequences (nucleotide or protein) in FASTA format. line_data['directive'] = '##FASTA' self.logger.info('Reading embedded ##FASTA sequence') self.fasta_embedded, count = fasta_file_to_dict(gff_fp) self.logger.info('%d sequences read' % len(self.fasta_embedded)) elif line_strip.startswith('##feature-ontology'): # ##feature-ontology URI # This directive indicates that the GFF3 file uses the ontology of feature types located at the indicated URI or URL. line_data['directive'] = '##feature-ontology' tokens = list(line_strip.split()[1:]) if len(tokens) != 1: self.add_line_error(line_data, {'message': 'Expecting 1 field, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) if len(tokens) > 0: line_data['URI'] = tokens[0] elif line_strip.startswith('##attribute-ontology'): # ##attribute-ontology URI # This directive indicates that the GFF3 uses the ontology of attribute names located at the indicated URI or URL. line_data['directive'] = '##attribute-ontology' tokens = list(line_strip.split()[1:]) if len(tokens) != 1: self.add_line_error(line_data, {'message': 'Expecting 1 field, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) if len(tokens) > 0: line_data['URI'] = tokens[0] elif line_strip.startswith('##source-ontology'): # ##source-ontology URI # This directive indicates that the GFF3 uses the ontology of source names located at the indicated URI or URL. line_data['directive'] = '##source-ontology' tokens = list(line_strip.split()[1:]) if len(tokens) != 1: self.add_line_error(line_data, {'message': 'Expecting 1 field, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) if len(tokens) > 0: line_data['URI'] = tokens[0] elif line_strip.startswith('##species'): # ##species NCBI_Taxonomy_URI # This directive indicates the species that the annotations apply to. line_data['directive'] = '##species' tokens = list(line_strip.split()[1:]) if len(tokens) != 1: self.add_line_error(line_data, {'message': 'Expecting 1 field, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) if len(tokens) > 0: line_data['NCBI_Taxonomy_URI'] = tokens[0] elif line_strip.startswith('##genome-build'): # ##genome-build source buildName # The genome assembly build name used for the coordinates given in the file. line_data['directive'] = '##genome-build' tokens = list(line_strip.split()[1:]) if len(tokens) != 2: self.add_line_error(line_data, {'message': 'Expecting 2 fields, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) if len(tokens) > 0: line_data['source'] = tokens[0] try: line_data['buildName'] = tokens[1] except IndexError: pass else: self.add_line_error(line_data, {'message': 'Unknown directive', 'error_type': 'FORMAT', 'location': ''}) tokens = list(line_strip.split()) line_data['directive'] = tokens[0] elif line_strip.startswith('#'): line_data['line_type'] = 'comment' else: # line_type may be a feature or unknown line_data['line_type'] = 'feature' tokens = list(map(str.strip, line_raw.split('\t'))) if len(tokens) != 9: self.add_line_error(line_data, {'message': 'Features should contain 9 fields, got %d: %s' % (len(tokens) - 1, repr(tokens[1:])), 'error_type': 'FORMAT', 'location': ''}) for i, t in enumerate(tokens): if not t: self.add_line_error(line_data, {'message': 'Empty field: %d, must have a "."' % (i + 1), 'error_type': 'FORMAT', 'location': ''}) try: line_data['seqid'] = tokens[0] if unescaped_seqid(tokens[0]): self.add_line_error(line_data, {'message': 'Seqid must escape any characters not in the set [a-zA-Z0-9.:^*$@!+_?-|]: "%s"' % tokens[0], 'error_type': 'FORMAT', 'location': ''}) line_data['source'] = tokens[1] if unescaped_field(tokens[1]): self.add_line_error(line_data, {'message': 'Source must escape the percent (%%) sign and any control characters: "%s"' % tokens[1], 'error_type': 'FORMAT', 'location': ''}) line_data['type'] = tokens[2] if unescaped_field(tokens[2]): self.add_line_error(line_data, {'message': 'Type must escape the percent (%%) sign and any control characters: "%s"' % tokens[2], 'error_type': 'FORMAT', 'location': ''}) all_good = True try: line_data['start'] = int(tokens[3]) if line_data['start'] < 1: self.add_line_error(line_data, {'message': 'Start is not a valid 1-based integer coordinate: "%s"' % tokens[3], 'error_type': 'FORMAT', 'location': 'start'}) except ValueError: all_good = False line_data['start'] = tokens[3] if line_data['start'] != '.': self.add_line_error(line_data, {'message': 'Start is not a valid integer: "%s"' % line_data['start'], 'error_type': 'FORMAT', 'location': 'start'}) try: line_data['end'] = int(tokens[4]) if line_data['end'] < 1: self.add_line_error(line_data, {'message': 'End is not a valid 1-based integer coordinate: "%s"' % tokens[4], 'error_type': 'FORMAT', 'location': 'end'}) except ValueError: all_good = False line_data['end'] = tokens[4] if line_data['end'] != '.': self.add_line_error(line_data, {'message': 'End is not a valid integer: "%s"' % line_data['end'], 'error_type': 'FORMAT', 'location': 'end'}) # if all_good then both start and end are int, so we can check if start is not less than or equal to end if all_good and line_data['start'] > line_data['end']: self.add_line_error(line_data, {'message': 'Start is not less than or equal to end', 'error_type': 'FORMAT', 'location': 'start,end'}) try: line_data['score'] = float(tokens[5]) except ValueError: line_data['score'] = tokens[5] if line_data['score'] != '.': self.add_line_error(line_data, {'message': 'Score is not a valid floating point number: "%s"' % line_data['score'], 'error_type': 'FORMAT', 'location': ''}) line_data['strand'] = tokens[6] if line_data['strand'] not in valid_strand: # set(['+', '-', '.', '?']) self.add_line_error(line_data, {'message': 'Strand has illegal characters: "%s"' % tokens[6], 'error_type': 'FORMAT', 'location': ''}) try: line_data['phase'] = int(tokens[7]) if line_data['phase'] not in valid_phase: # set([0, 1, 2]) self.add_line_error(line_data, {'message': 'Phase is not 0, 1, or 2: "%s"' % tokens[7], 'error_type': 'FORMAT', 'location': ''}) except ValueError: line_data['phase'] = tokens[7] if line_data['phase'] != '.': self.add_line_error(line_data, {'message': 'Phase is not a valid integer: "%s"' % line_data['phase'], 'error_type': 'FORMAT', 'location': ''}) elif line_data['type'] == 'CDS': self.add_line_error(line_data, {'message': 'Phase is required for all CDS features', 'error_type': 'FORMAT', 'location': ''}) # parse attributes, ex: ID=exon00003;Parent=mRNA00001,mRNA00003;Name=EXON.1 # URL escaping rules are used for tags or values containing the following characters: ",=;". Spaces are allowed in this field, but tabs must be replaced with the %09 URL escape. # Note that attribute names are case sensitive. "Parent" is not the same as "parent". # All attributes that begin with an uppercase letter are reserved for later use. Attributes that begin with a lowercase letter can be used freely by applications. if unescaped_field(tokens[8]): self.add_line_error(line_data, {'message': 'Attributes must escape the percent (%) sign and any control characters', 'error_type': 'FORMAT', 'location': ''}) attribute_tokens = tuple(tuple(t for t in a.split('=')) for a in tokens[8].split(';') if a) line_data['attributes'] = {} if len(attribute_tokens) == 1 and len(attribute_tokens[0]) == 1 and attribute_tokens[0][0] == '.': pass # no attributes else: for a in attribute_tokens: if len(a) != 2: self.add_line_error(line_data, {'message': 'Attributes must contain one and only one equal (=) sign: "%s"' % ('='.join(a)), 'error_type': 'FORMAT', 'location': ''}) try: tag, value = a except ValueError: tag, value = a[0], '' if not tag: self.add_line_error(line_data, {'message': 'Empty attribute tag: "%s"' % '='.join(a), 'error_type': 'FORMAT', 'location': ''}) if not value.strip(): self.add_line_error(line_data, {'message': 'Empty attribute value: "%s"' % '='.join(a), 'error_type': 'FORMAT', 'location': ''}, log_level=logging.WARNING) if tag in line_data['attributes']: self.add_line_error(line_data, {'message': 'Found multiple attribute tags: "%s"' % tag, 'error_type': 'FORMAT', 'location': ''}) if tag in multi_value_attributes: # set(['Parent', 'Alias', 'Note', 'Dbxref', 'Ontology_term']) if value.find(', ') >= 0: self.add_line_error(line_data, {'message': 'Found ", " in %s attribute, possible unescaped ",": "%s"' % (tag, value), 'error_type': 'FORMAT', 'location': ''}, log_level=logging.WARNING) # In addition to Parent, the Alias, Note, Dbxref and Ontology_term attributes can have multiple values. if tag in line_data['attributes']: # if this tag has been seen before if tag == 'Note': # don't check for duplicate notes line_data['attributes'][tag].extend(value.split(',')) else: # only add non duplicate values line_data['attributes'][tag].extend([s for s in value.split(',') if s not in line_data['attributes'][tag]]) else: line_data['attributes'][tag] = value.split(',') # check for duplicate values if tag != 'Note' and len(line_data['attributes'][tag]) != len(set(line_data['attributes'][tag])): count_values = [(len(list(group)), key) for key, group in groupby(sorted(line_data['attributes'][tag]))] self.add_line_error(line_data, {'message': '%s attribute has identical values (count, value): %s' % (tag, ', '.join(['(%d, %s)' % (c, v) for c, v in count_values if c > 1])), 'error_type': 'FORMAT', 'location': ''}) # remove duplicate line_data['attributes'][tag] = list(set(line_data['attributes'][tag])) if tag == 'Parent': for feature_id in line_data['attributes']['Parent']: try: line_data['parents'].append(features[feature_id]) for ld in features[feature_id]: # no need to check if line_data in ld['children'], because it is impossible, each ld maps to only one feature_id, so the ld we get are all different ld['children'].append(line_data) except KeyError: # features[id] self.add_line_error(line_data, {'message': '%s attribute has unresolved forward reference: %s' % (tag, feature_id), 'error_type': 'FORMAT', 'location': ''}) unresolved_parents[feature_id].append(line_data) elif tag == 'Target': if value.find(',') >= 0: self.add_line_error(line_data, {'message': 'Value of %s attribute contains unescaped ",": "%s"' % (tag, value), 'error_type': 'FORMAT', 'location': ''}) target_tokens = value.split(' ') if len(target_tokens) < 3 or len(target_tokens) > 4: self.add_line_error(line_data, {'message': 'Target attribute should have 3 or 4 values, got %d: %s' % (len(target_tokens), repr(tokens)), 'error_type': 'FORMAT', 'location': ''}) line_data['attributes'][tag] = {} try: line_data['attributes'][tag]['target_id'] = target_tokens[0] all_good = True try: line_data['attributes'][tag]['start'] = int(target_tokens[1]) if line_data['attributes'][tag]['start'] < 1: self.add_line_error(line_data, {'message': 'Start value of Target attribute is not a valid 1-based integer coordinate: "%s"' % target_tokens[1], 'error_type': 'FORMAT', 'location': ''}) except ValueError: all_good = False line_data['attributes'][tag]['start'] = target_tokens[1] self.add_line_error(line_data, {'message': 'Start value of Target attribute is not a valid integer: "%s"' % line_data['attributes'][tag]['start'], 'error_type': 'FORMAT', 'location': ''}) try: line_data['attributes'][tag]['end'] = int(target_tokens[2]) if line_data['attributes'][tag]['end'] < 1: self.add_line_error(line_data, {'message': 'End value of Target attribute is not a valid 1-based integer coordinate: "%s"' % target_tokens[2], 'error_type': 'FORMAT', 'location': ''}) except ValueError: all_good = False line_data['attributes'][tag]['end'] = target_tokens[2] self.add_line_error(line_data, {'message': 'End value of Target attribute is not a valid integer: "%s"' % line_data['attributes'][tag]['end'], 'error_type': 'FORMAT', 'location': ''}) # if all_good then both start and end are int, so we can check if start is not less than or equal to end if all_good and line_data['attributes'][tag]['start'] > line_data['attributes'][tag]['end']: self.add_line_error(line_data, {'message': 'Start is not less than or equal to end', 'error_type': 'FORMAT', 'location': ''}) line_data['attributes'][tag]['strand'] = target_tokens[3] if line_data['attributes'][tag]['strand'] not in valid_attribute_target_strand: # set(['+', '-', '']) self.add_line_error(line_data, {'message': 'Strand value of Target attribute has illegal characters: "%s"' % line_data['attributes'][tag]['strand'], 'error_type': 'FORMAT', 'location': ''}) except IndexError: pass else: if value.find(',') >= 0: self.add_line_error(line_data, {'message': 'Value of %s attribute contains unescaped ",": "%s"' % (tag, value), 'error_type': 'FORMAT', 'location': ''}) line_data['attributes'][tag] = value if tag == 'Is_circular' and value != 'true': self.add_line_error(line_data, {'message': 'Value of Is_circular attribute is not "true": "%s"' % value, 'error_type': 'FORMAT', 'location': ''}) elif tag[:1].isupper() and tag not in reserved_attributes: # {'ID', 'Name', 'Alias', 'Parent', 'Target', 'Gap', 'Derives_from', 'Note', 'Dbxref', 'Ontology_term', 'Is_circular'} self.add_line_error(line_data, {'message': 'Unknown reserved (uppercase) attribute: "%s"' % tag, 'error_type': 'FORMAT', 'location': ''}) elif tag == 'ID': # check for duplicate ID in non-adjacent lines if value in features and lines[-1]['attributes'][tag] != value: self.add_line_error(line_data, {'message': 'Duplicate ID: "%s" in non-adjacent lines: %s' % (value, ','.join([str(f['line_index'] + 1) for f in features[value]])), 'error_type': 'FORMAT', 'location': ''}, log_level=logging.WARNING) features[value].append(line_data) except IndexError: pass current_line_num += 1 lines.append(line_data) if isinstance(gff_file, str): gff_fp.close() # global look up of unresolved parents for feature_id in unresolved_parents: if feature_id in features: for line in unresolved_parents[feature_id]: self.add_line_error(line, {'message': 'Unresolved forward reference: "%s", found defined in lines: %s' % (feature_id, ','.join([str(ld['line_index'] + 1) for ld in features[feature_id]])), 'error_type': 'FORMAT', 'location': ''}) self.lines = lines self.features = features return 1
Parse the gff file into the following data structures: * lines(list of line_data(dict)) - line_index(int): the index in lines - line_raw(str) - line_type(str in ['feature', 'directive', 'comment', 'blank', 'unknown']) - line_errors(list of str): a list of error messages - line_status(str in ['normal', 'modified', 'removed']) - parents(list of feature(list of line_data(dict))): may have multiple parents - children(list of line_data(dict)) - extra fields depending on line_type * directive - directive(str in ['##gff-version', '##sequence-region', '##feature-ontology', '##attribute-ontology', '##source-ontology', '##species', '##genome-build', '###', '##FASTA']) - extra fields depending on directive * feature - seqid(str): must escape any characters not in the set [a-zA-Z0-9.:^*$@!+_?-|] using RFC 3986 Percent-Encoding - source(str) - type(str in so_types) - start(int) - end(int) - score(float) - strand(str in ['+', '-', '.', '?']) - phase(int in [0, 1, 2]) - attributes(dict of tag(str) to value) - ID(str) - Name(str) - Alias(list of str): multi value - Parent(list of str): multi value - Target(dict) - target_id(str) - start(int) - end(int) - strand(str in ['+', '-', '']) - Gap(str): CIGAR format - Derives_from(str) - Note(list of str): multi value - Dbxref(list of str): multi value - Ontology_term(list of str): multi value - Is_circular(str in ['true']) * fasta_dict(dict of id(str) to sequence_item(dict)) - id(str) - header(str) - seq(str) - line_length(int) * features(dict of feature_id(str in line_data['attributes']['ID']) to feature(list of line_data(dict))) A feature is a list of line_data(dict), since all lines that share an ID collectively represent a single feature. During serialization, line_data(dict) references should be converted into line_index(int) :param gff_file: a string path or file object :param strict: when true, throw exception on syntax and format errors. when false, use best effort to finish parsing while logging errors
def _cleanup_and_die(data): """ cleanup func for step 1 """ tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq")) tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p")) for tmpf in tmpfiles: os.remove(tmpf)
cleanup func for step 1
def delete(self, object_id): """ Delete an object by its id :param object_id: the objects id. :return: the deleted object :raises: :class: NoResultFound when the object could not be found """ obj = self.session.query(self.cls).filter_by(id=object_id).one() self.session.delete(obj) return obj
Delete an object by its id :param object_id: the objects id. :return: the deleted object :raises: :class: NoResultFound when the object could not be found
def calculate_bins(array, _=None, *args, **kwargs) -> BinningBase: """Find optimal binning from arguments. Parameters ---------- array: arraylike Data from which the bins should be decided (sometimes used, sometimes not) _: int or str or Callable or arraylike or Iterable or BinningBase To-be-guessed parameter that specifies what kind of binning should be done check_nan: bool Check for the presence of nan's in array? Default: True range: tuple Limit values to a range. Some of the binning methods also (subsequently) use this parameter for the bin shape. Returns ------- BinningBase A two-dimensional array with pairs of bin edges (not necessarily consecutive). """ if array is not None: if kwargs.pop("check_nan", True): if np.any(np.isnan(array)): raise RuntimeError("Cannot calculate bins in presence of NaN's.") if kwargs.get("range", None): # TODO: re-consider the usage of this parameter array = array[(array >= kwargs["range"][0]) & (array <= kwargs["range"][1])] if _ is None: bin_count = 10 # kwargs.pop("bins", ideal_bin_count(data=array)) - same as numpy binning = numpy_binning(array, bin_count, *args, **kwargs) elif isinstance(_, BinningBase): binning = _ elif isinstance(_, int): binning = numpy_binning(array, _, *args, **kwargs) elif isinstance(_, str): # What about the ranges??? if _ in bincount_methods: bin_count = ideal_bin_count(array, method=_) binning = numpy_binning(array, bin_count, *args, **kwargs) elif _ in binning_methods: method = binning_methods[_] binning = method(array, *args, **kwargs) else: raise RuntimeError("No binning method {0} available.".format(_)) elif callable(_): binning = _(array, *args, **kwargs) elif np.iterable(_): binning = static_binning(array, _, *args, **kwargs) else: raise RuntimeError("Binning {0} not understood.".format(_)) return binning
Find optimal binning from arguments. Parameters ---------- array: arraylike Data from which the bins should be decided (sometimes used, sometimes not) _: int or str or Callable or arraylike or Iterable or BinningBase To-be-guessed parameter that specifies what kind of binning should be done check_nan: bool Check for the presence of nan's in array? Default: True range: tuple Limit values to a range. Some of the binning methods also (subsequently) use this parameter for the bin shape. Returns ------- BinningBase A two-dimensional array with pairs of bin edges (not necessarily consecutive).
def mav_to_gpx(infilename, outfilename): '''convert a mavlink log file to a GPX file''' mlog = mavutil.mavlink_connection(infilename) outf = open(outfilename, mode='w') def process_packet(timestamp, lat, lon, alt, hdg, v): t = time.localtime(timestamp) outf.write('''<trkpt lat="%s" lon="%s"> <ele>%s</ele> <time>%s</time> <course>%s</course> <speed>%s</speed> <fix>3d</fix> </trkpt> ''' % (lat, lon, alt, time.strftime("%Y-%m-%dT%H:%M:%SZ", t), hdg, v)) def add_header(): outf.write('''<?xml version="1.0" encoding="UTF-8"?> <gpx version="1.0" creator="pymavlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.topografix.com/GPX/1/0" xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd"> <trk> <trkseg> ''') def add_footer(): outf.write('''</trkseg> </trk> </gpx> ''') add_header() count=0 lat=0 lon=0 fix=0 while True: m = mlog.recv_match(type=['GPS_RAW', 'GPS_RAW_INT', 'GPS', 'GPS2'], condition=args.condition) if m is None: break if m.get_type() == 'GPS_RAW_INT': lat = m.lat/1.0e7 lon = m.lon/1.0e7 alt = m.alt/1.0e3 v = m.vel/100.0 hdg = m.cog/100.0 timestamp = m._timestamp fix = m.fix_type elif m.get_type() == 'GPS_RAW': lat = m.lat lon = m.lon alt = m.alt v = m.v hdg = m.hdg timestamp = m._timestamp fix = m.fix_type elif m.get_type() == 'GPS' or m.get_type() == 'GPS2': lat = m.Lat lon = m.Lng alt = m.Alt v = m.Spd hdg = m.GCrs timestamp = m._timestamp fix = m.Status else: pass if fix < 2 and not args.nofixcheck: continue if lat == 0.0 or lon == 0.0: continue process_packet(timestamp, lat, lon, alt, hdg, v) count += 1 add_footer() print("Created %s with %u points" % (outfilename, count))
convert a mavlink log file to a GPX file
def _run_config_cmds(self, commands, server): """Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param server: Server endpoint on the Arista switch to be configured """ command_start = ['enable', 'configure'] command_end = ['exit'] full_command = command_start + commands + command_end self._run_eos_cmds(full_command, server)
Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param server: Server endpoint on the Arista switch to be configured
def plugin(module, *args, **kwargs): """ Decorator to extend a package to a view. The module can be a class or function. It will copy all the methods to the class ie: # Your module.py my_ext(view, **kwargs): class MyExtension(object): def my_view(self): return {} return MyExtension # Your view.py @plugin(my_ext) class Index(View): pass :param module: object :param args: :param kwargs: :return: """ def wrap(f): m = module(f, *args, **kwargs) if inspect.isclass(m): for k, v in m.__dict__.items(): if not k.startswith("__"): setattr(f, k, v) elif inspect.isfunction(m): setattr(f, kls.__name__, m) return f return wrap
Decorator to extend a package to a view. The module can be a class or function. It will copy all the methods to the class ie: # Your module.py my_ext(view, **kwargs): class MyExtension(object): def my_view(self): return {} return MyExtension # Your view.py @plugin(my_ext) class Index(View): pass :param module: object :param args: :param kwargs: :return: