code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def make_data(self, message): """ make data string from message according to transport_content_type Returns: str: message data """ if not isinstance(message, Message): return message return message.export(self.transport_content_type)
make data string from message according to transport_content_type Returns: str: message data
def iyang(imgIn, krnl, imgSeg, Cnt, itr=5): '''partial volume correction using iterative Yang method imgIn: input image which is blurred due to the PSF of the scanner krnl: shift invariant kernel of the PSF imgSeg: segmentation into regions starting with 0 (e.g., background) and then next integer numbers itr: number of iteration (default 5) ''' dim = imgIn.shape m = np.int32(np.max(imgSeg)) m_a = np.zeros(( m+1, itr ), dtype=np.float32) for jr in range(0,m+1): m_a[jr, 0] = np.mean( imgIn[imgSeg==jr] ) # init output image imgOut = np.copy(imgIn) # iterative Yang algorithm: for i in range(0, itr): if Cnt['VERBOSE']: print 'i> PVC Yang iteration =', i # piece-wise constant image imgPWC = imgOut imgPWC[imgPWC<0] = 0 for jr in range(0,m+1): imgPWC[imgSeg==jr] = np.mean( imgPWC[imgSeg==jr] ) #> blur the piece-wise constant image using either: #> (1) GPU convolution with a separable kernel (x,y,z), or #> (2) CPU, Python-based convolution if 'CCARCH' in Cnt and 'compute' in Cnt['CCARCH']: #> convert to dimensions of GPU processing [y,x,z] imin_d = np.transpose(imgPWC, (1, 2, 0)) imout_d = np.zeros(imin_d.shape, dtype=np.float32) improc.convolve(imout_d, imin_d, krnl, Cnt) imgSmo = np.transpose(imout_d, (2,0,1)) else: hxy = np.outer(krnl[1,:], krnl[2,:]) hxyz = np.multiply.outer(krnl[0,:], hxy) imgSmo = ndi.convolve(imgPWC, hxyz, mode='constant', cval=0.) # correction factors imgCrr = np.ones(dim, dtype=np.float32) imgCrr[imgSmo>0] = imgPWC[imgSmo>0] / imgSmo[imgSmo>0] imgOut = imgIn * imgCrr; for jr in range(0,m+1): m_a[jr, i] = np.mean( imgOut[imgSeg==jr] ) return imgOut, m_a
partial volume correction using iterative Yang method imgIn: input image which is blurred due to the PSF of the scanner krnl: shift invariant kernel of the PSF imgSeg: segmentation into regions starting with 0 (e.g., background) and then next integer numbers itr: number of iteration (default 5)
def convert_references_json(ref_content, soup=None): "Check for references that will not pass schema validation, fix or convert them to unknown" # Convert reference to unkonwn if still missing important values if ( (ref_content.get("type") == "other") or (ref_content.get("type") == "book-chapter" and "editors" not in ref_content) or (ref_content.get("type") == "journal" and "articleTitle" not in ref_content) or (ref_content.get("type") in ["journal", "book-chapter"] and not "pages" in ref_content) or (ref_content.get("type") == "journal" and "journal" not in ref_content) or (ref_content.get("type") in ["book", "book-chapter", "report", "thesis", "software"] and "publisher" not in ref_content) or (ref_content.get("type") == "book" and "bookTitle" not in ref_content) or (ref_content.get("type") == "data" and "source" not in ref_content) or (ref_content.get("type") == "conference-proceeding" and "conference" not in ref_content) ): ref_content = references_json_to_unknown(ref_content, soup) return ref_content
Check for references that will not pass schema validation, fix or convert them to unknown
def post(self, request, bot_id, format=None): """ Add a new state --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request """ return super(StateList, self).post(request, bot_id, format)
Add a new state --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request
def _cleanup_pods(namespace, labels): """ Remove all pods with these labels in this namespace """ api = kubernetes.client.CoreV1Api() pods = api.list_namespaced_pod(namespace, label_selector=format_labels(labels)) for pod in pods.items: try: api.delete_namespaced_pod(pod.metadata.name, namespace) logger.info('Deleted pod: %s', pod.metadata.name) except kubernetes.client.rest.ApiException as e: # ignore error if pod is already removed if e.status != 404: raise
Remove all pods with these labels in this namespace
def update_node_attributes(self, attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)): """ Updates the Node attributes. :param attributes_flags: Attributes flags. :type attributes_flags: int :return: Method success. :rtype: bool """ self.traced.value = foundations.trace.is_traced(self.__module) self.traced.roles[Qt.DisplayRole] = foundations.strings.to_string(self.traced.value).title()
Updates the Node attributes. :param attributes_flags: Attributes flags. :type attributes_flags: int :return: Method success. :rtype: bool
def run(self): """ Solves a state estimation problem. """ case = self.case baseMVA = case.base_mva buses = self.case.connected_buses branches = case.online_branches generators = case.online_generators meas = self.measurements # Update indices. self.case.index_buses() self.case.index_branches() # Index buses. # ref = [b._i for b in buses if b.type == REFERENCE] pv = [b._i for b in buses if b.type == PV] pq = [b._i for b in buses if b.type == PQ] # Build admittance matrices. Ybus, Yf, Yt = case.Y # Prepare initial guess. V0 = self.getV0(self.v_mag_guess, buses, generators) # Start the clock. t0 = time() # Initialise SE. converged = False i = 0 V = V0 Va = angle(V0) Vm = abs(V0) nb = Ybus.shape[0] f = [b.from_bus._i for b in branches] t = [b.to_bus._i for b in branches] nonref = pv + pq # Form measurement vector. z = array([m.value for m in meas]) # Form measurement index vectors. idx_zPf = [m.b_or_l._i for m in meas if m.type == PF] idx_zPt = [m.b_or_l._i for m in meas if m.type == PT] idx_zQf = [m.b_or_l._i for m in meas if m.type == QF] idx_zQt = [m.b_or_l._i for m in meas if m.type == QT] idx_zPg = [m.b_or_l._i for m in meas if m.type == PG] idx_zQg = [m.b_or_l._i for m in meas if m.type == QG] idx_zVm = [m.b_or_l._i for m in meas if m.type == VM] idx_zVa = [m.b_or_l._i for m in meas if m.type == VA] def col(seq): return [[k] for k in seq] # Create inverse of covariance matrix with all measurements. # full_scale = 30 # sigma = [ # 0.02 * abs(Sf) + 0.0052 * full_scale * ones(nbr,1), # 0.02 * abs(St) + 0.0052 * full_scale * ones(nbr,1), # 0.02 * abs(Sbus) + 0.0052 * full_scale * ones(nb,1), # 0.2 * pi/180 * 3*ones(nb,1), # 0.02 * abs(Sf) + 0.0052 * full_scale * ones(nbr,1), # 0.02 * abs(St) + 0.0052 * full_scale * ones(nbr,1), # 0.02 * abs(Sbus) + 0.0052 * full_scale * ones(nb,1), # 0.02 * abs(V0) + 0.0052 * 1.1 * ones(nb,1), # ] ./ 3 # Get R inverse matrix. sigma_vector = r_[ self.sigma[0] * ones(len(idx_zPf)), self.sigma[1] * ones(len(idx_zPt)), self.sigma[2] * ones(len(idx_zQf)), self.sigma[3] * ones(len(idx_zQt)), self.sigma[4] * ones(len(idx_zPg)), self.sigma[5] * ones(len(idx_zQg)), self.sigma[6] * ones(len(idx_zVm)), self.sigma[7] * ones(len(idx_zVa)) ] sigma_squared = sigma_vector**2 rsig = range(len(sigma_squared)) Rinv = csr_matrix((1.0 / sigma_squared, (rsig, rsig))) # Do Newton iterations. while (not converged) and (i < self.max_iter): i += 1 # Compute estimated measurement. Sfe = V[f] * conj(Yf * V) Ste = V[t] * conj(Yt * V) # Compute net injection at generator buses. gbus = [g.bus._i for g in generators] Sgbus = V[gbus] * conj(Ybus[gbus, :] * V) # inj S + local Sd Sd = array([complex(b.p_demand, b.q_demand) for b in buses]) Sgen = (Sgbus * baseMVA + Sd) / baseMVA z_est = r_[ Sfe[idx_zPf].real, Ste[idx_zPt].real, Sfe[idx_zQf].imag, Ste[idx_zQt].imag, Sgen[idx_zPg].real, Sgen[idx_zQg].imag, abs(V[idx_zVm]), angle(V[idx_zVa]) ] # Get H matrix. dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V) dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, _, _ = case.dSbr_dV(Yf, Yt,V) # Get sub-matrix of H relating to line flow. dPF_dVa = dSf_dVa.real # from end dQF_dVa = dSf_dVa.imag dPF_dVm = dSf_dVm.real dQF_dVm = dSf_dVm.imag dPT_dVa = dSt_dVa.real # to end dQT_dVa = dSt_dVa.imag dPT_dVm = dSt_dVm.real dQT_dVm = dSt_dVm.imag # Get sub-matrix of H relating to generator output. dPG_dVa = dSbus_dVa[gbus, :].real dQG_dVa = dSbus_dVa[gbus, :].imag dPG_dVm = dSbus_dVm[gbus, :].real dQG_dVm = dSbus_dVm[gbus, :].imag # Get sub-matrix of H relating to voltage angle. dVa_dVa = csr_matrix((ones(nb), (range(nb), range(nb)))) dVa_dVm = csr_matrix((nb, nb)) # Get sub-matrix of H relating to voltage magnitude. dVm_dVa = csr_matrix((nb, nb)) dVm_dVm = csr_matrix((ones(nb), (range(nb), range(nb)))) h = [(col(idx_zPf), dPF_dVa, dPF_dVm), (col(idx_zQf), dQF_dVa, dQF_dVm), (col(idx_zPt), dPT_dVa, dPT_dVm), (col(idx_zQt), dQT_dVa, dQT_dVm), (col(idx_zPg), dPG_dVa, dPG_dVm), (col(idx_zQg), dQG_dVa, dQG_dVm), (col(idx_zVm), dVm_dVa, dVm_dVm), (col(idx_zVa), dVa_dVa, dVa_dVm)] H = vstack([hstack([dVa[idx, nonref], dVm[idx, nonref]]) for idx, dVa, dVm in h if len(idx) > 0 ]) # Compute update step. J = H.T * Rinv * H F = H.T * Rinv * (z - z_est) # evalute F(x) dx = spsolve(J, F) # Check for convergence. normF = linalg.norm(F, Inf) if self.verbose: logger.info("Iteration [%d]: Norm of mismatch: %.3f" % (i, normF)) if normF < self.tolerance: converged = True # Update voltage. npvpq = len(nonref) Va[nonref] = Va[nonref] + dx[:npvpq] Vm[nonref] = Vm[nonref] + dx[npvpq:2 * npvpq] V = Vm * exp(1j * Va) Va = angle(V) Vm = abs(V) # Weighted sum squares of error. error_sqrsum = sum((z - z_est)**2 / sigma_squared) # Update case with solution. case.pf_solution(Ybus, Yf, Yt, V) # Stop the clock. elapsed = time() - t0 if self.verbose and converged: print "State estimation converged in: %.3fs (%d iterations)" % \ (elapsed, i) # self.output_solution(sys.stdout, z, z_est) solution = {"V": V, "converged": converged, "iterations": i, "z": z, "z_est": z_est, "error_sqrsum": error_sqrsum, "elapsed": elapsed} return solution
Solves a state estimation problem.
def check_error(self, response, status, err_cd): " Check an error in the response." if 'status' not in response: return False if response['status'] != status: return False if 'msgs' not in response: return False if not isinstance(response['msgs'], list): return False for msg in response['msgs']: if 'LVL' in msg and msg['LVL'] != 'ERROR': continue if 'ERR_CD' in msg and msg['ERR_CD'] == err_cd: return True return False
Check an error in the response.
def link(url, text='', classes='', target='', get="", **kwargs): ''' Output a link tag. ''' if not (url.startswith('http') or url.startswith('/')): # Handle additional reverse args. urlargs = {} for arg, val in kwargs.items(): if arg[:4] == "url_": urlargs[arg[4:]] = val url = reverse(url, kwargs=urlargs) if get: url += '?' + get return html.tag('a', text or url, { 'class': classes, 'target': target, 'href': url})
Output a link tag.
def newline(self): """ Advances the cursor position ot the left hand side, and to the next line. If the cursor is on the lowest line, the displayed contents are scrolled, causing the top line to be lost. """ self.carriage_return() if self._cy + (2 * self._ch) >= self._device.height: # Simulate a vertical scroll copy = self._backing_image.crop((0, self._ch, self._device.width, self._device.height)) self._backing_image.paste(copy, (0, 0)) self._canvas.rectangle((0, copy.height, self._device.width, self._device.height), fill=self.default_bgcolor) else: self._cy += self._ch self.flush() if self.animate: time.sleep(0.2)
Advances the cursor position ot the left hand side, and to the next line. If the cursor is on the lowest line, the displayed contents are scrolled, causing the top line to be lost.
def add_secondary_ip(self, ip_address, interface=1): """Adds an IP address as a secondary IP address :param ip_address: String IP address to add as a secondary IP :param interface: Integer associated to the interface/device number :return: None :raises: AWSAPIError, EC2UtilError """ log = logging.getLogger(self.cls_logger + '.add_secondary_ip') # Get the ENI ID eni_id = self.get_eni_id(interface) # Verify the ENI ID was found if eni_id is None: msg = 'Unable to find the corresponding ENI ID for interface: {i}'. \ format(i=interface) log.error(msg) raise EC2UtilError(msg) else: log.info('Found ENI ID: {e}'.format(e=eni_id)) # Assign the secondary IP address log.info('Attempting to assign the secondary IP address...') try: self.client.assign_private_ip_addresses( NetworkInterfaceId=eni_id, PrivateIpAddresses=[ ip_address, ], AllowReassignment=True ) except ClientError: _, ex, trace = sys.exc_info() msg = 'Unable to assign secondary IP address\n{e}'.format(e=str(ex)) log.error(msg) raise AWSAPIError, msg, trace log.info('Successfully added secondary IP address {s} to ENI ID {e} on interface {i}'.format( s=ip_address, e=eni_id, i=interface))
Adds an IP address as a secondary IP address :param ip_address: String IP address to add as a secondary IP :param interface: Integer associated to the interface/device number :return: None :raises: AWSAPIError, EC2UtilError
def _systemd_notify_once(): """Send notification once to Systemd that service is ready. Systemd sets NOTIFY_SOCKET environment variable with the name of the socket listening for notifications from services. This method removes the NOTIFY_SOCKET environment variable to ensure notification is sent only once. """ notify_socket = os.getenv('NOTIFY_SOCKET') if notify_socket: if notify_socket.startswith('@'): # abstract namespace socket notify_socket = '\0%s' % notify_socket[1:] sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) with contextlib.closing(sock): try: sock.connect(notify_socket) sock.sendall(b'READY=1') del os.environ['NOTIFY_SOCKET'] except EnvironmentError: LOG.debug("Systemd notification failed", exc_info=True)
Send notification once to Systemd that service is ready. Systemd sets NOTIFY_SOCKET environment variable with the name of the socket listening for notifications from services. This method removes the NOTIFY_SOCKET environment variable to ensure notification is sent only once.
def gateways_info(): """Returns gateways data. """ data = netifaces.gateways() results = {'default': {}} with suppress(KeyError): results['ipv4'] = data[netifaces.AF_INET] results['default']['ipv4'] = data['default'][netifaces.AF_INET] with suppress(KeyError): results['ipv6'] = data[netifaces.AF_INET6] results['default']['ipv6'] = data['default'][netifaces.AF_INET6] return results
Returns gateways data.
def _set_visible(self, visibility, grid_index=None): """Sets the visibility property of all axes.""" if grid_index is None: for ax in self.flat_grid: ax.set_visible(visibility) else: if grid_index < 0 or grid_index >= len(self.grids): raise IndexError('Valid indices : 0 to {}'.format(len(self.grids) - 1)) for ax in self.grids[grid_index]: ax.set_visible(visibility)
Sets the visibility property of all axes.
def _has_y(self, kwargs): '''Returns True if y is explicitly defined in kwargs''' return (('y' in kwargs) or (self._element_y in kwargs) or (self._type == 3 and self._element_1my in kwargs))
Returns True if y is explicitly defined in kwargs
def save_image(image, filename=None): """ Saves a Docker image from the remote to a local files. For performance reasons, uses the Docker command line client on the host, generates a gzip-tarball and downloads that. :param image: Image name or id. :type image: unicode :param filename: File name to store the local file. If not provided, will use ``<image>.tar.gz`` in the current working directory. :type filename: unicode """ local_name = filename or '{0}.tar.gz'.format(image) cli.save_image(image, local_name)
Saves a Docker image from the remote to a local files. For performance reasons, uses the Docker command line client on the host, generates a gzip-tarball and downloads that. :param image: Image name or id. :type image: unicode :param filename: File name to store the local file. If not provided, will use ``<image>.tar.gz`` in the current working directory. :type filename: unicode
def body(quantity=2, separator='\n\n', wrap_start='', wrap_end='', html=False, sentences_quantity=3, as_list=False): """Return a random email text.""" return lorem_ipsum.paragraphs(quantity=quantity, separator=separator, wrap_start=wrap_start, wrap_end=wrap_end, html=html, sentences_quantity=sentences_quantity, as_list=as_list)
Return a random email text.
def select_many_with_index( self, collection_selector=IndexedElement, result_selector=lambda source_element, collection_element: collection_element): '''Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable. ''' if self.closed(): raise ValueError("Attempt to call select_many_with_index() on a " "closed Queryable.") if not is_callable(collection_selector): raise TypeError("select_many_with_index() parameter " "projector={0} is not callable".format(repr(collection_selector))) if not is_callable(result_selector): raise TypeError("select_many_with_index() parameter " "selector={0} is not callable".format(repr(result_selector))) return self._create( self._generate_select_many_with_index(collection_selector, result_selector))
Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable.
def tag_labels(self): """Tag named entity labels in the ``words`` layer.""" if not self.is_tagged(ANALYSIS): self.tag_analysis() if self.__ner_tagger is None: self.__ner_tagger = load_default_ner_tagger() self.__ner_tagger.tag_document(self) return self
Tag named entity labels in the ``words`` layer.
def stats_set_value(self, key, value=1): """Set the specified key/value in the per-message measurements .. versionadded:: 3.13.0 .. note:: If this method is called when there is not a message being processed, a message will be logged at the ``warning`` level to indicate the value is being dropped. To suppress these warnings, set the :attr:`rejected.consumer.Consumer.IGNORE_OOB_STATS` attribute to :data:`True`. :param key: The key to set the value for :type key: :class:`str` :param value: The value :type value: :class:`int` or :class:`float` """ if not self._measurement: if not self.IGNORE_OOB_STATS: self.logger.warning( 'stats_set_value invoked outside execution') return self._measurement.set_value(key, value)
Set the specified key/value in the per-message measurements .. versionadded:: 3.13.0 .. note:: If this method is called when there is not a message being processed, a message will be logged at the ``warning`` level to indicate the value is being dropped. To suppress these warnings, set the :attr:`rejected.consumer.Consumer.IGNORE_OOB_STATS` attribute to :data:`True`. :param key: The key to set the value for :type key: :class:`str` :param value: The value :type value: :class:`int` or :class:`float`
def save_to_file(self, file_path, labels=None, predict_proba=True, show_predicted_value=True, **kwargs): """Saves html explanation to file. . Params: file_path: file to save explanations to See as_html() for additional parameters. """ file_ = open(file_path, 'w', encoding='utf8') file_.write(self.as_html(labels=labels, predict_proba=predict_proba, show_predicted_value=show_predicted_value, **kwargs)) file_.close()
Saves html explanation to file. . Params: file_path: file to save explanations to See as_html() for additional parameters.
def prepare_transaction(*, operation='CREATE', signers=None, recipients=None, asset=None, metadata=None, inputs=None): """Prepares a transaction payload, ready to be fulfilled. Depending on the value of ``operation``, simply dispatches to either :func:`~.prepare_create_transaction` or :func:`~.prepare_transfer_transaction`. Args: operation (str): The operation to perform. Must be ``'CREATE'`` or ``'TRANSFER'``. Case insensitive. Defaults to ``'CREATE'``. signers (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the issuer(s) of the asset being created. Only applies for ``'CREATE'`` operations. Defaults to ``None``. recipients (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the new recipients(s) of the asset being created or transferred. Defaults to ``None``. asset (:obj:`dict`, optional): The asset to be created or transferred. MUST be supplied for ``'TRANSFER'`` operations. Defaults to ``None``. metadata (:obj:`dict`, optional): Metadata associated with the transaction. Defaults to ``None``. inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`, optional): One or more inputs holding the condition(s) that this transaction intends to fulfill. Each input is expected to be a :obj:`dict`. Only applies to, and MUST be supplied for, ``'TRANSFER'`` operations. Returns: dict: The prepared transaction. Raises: :class:`~.exceptions.BigchaindbException`: If ``operation`` is not ``'CREATE'`` or ``'TRANSFER'``. .. important:: **CREATE operations** * ``signers`` MUST be set. * ``recipients``, ``asset``, and ``metadata`` MAY be set. * If ``asset`` is set, it MUST be in the form of:: { 'data': { ... } } * The argument ``inputs`` is ignored. * If ``recipients`` is not given, or evaluates to ``False``, it will be set equal to ``signers``:: if not recipients: recipients = signers **TRANSFER operations** * ``recipients``, ``asset``, and ``inputs`` MUST be set. * ``asset`` MUST be in the form of:: { 'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>' } * ``metadata`` MAY be set. * The argument ``signers`` is ignored. """ operation = _normalize_operation(operation) return _prepare_transaction( operation, signers=signers, recipients=recipients, asset=asset, metadata=metadata, inputs=inputs, )
Prepares a transaction payload, ready to be fulfilled. Depending on the value of ``operation``, simply dispatches to either :func:`~.prepare_create_transaction` or :func:`~.prepare_transfer_transaction`. Args: operation (str): The operation to perform. Must be ``'CREATE'`` or ``'TRANSFER'``. Case insensitive. Defaults to ``'CREATE'``. signers (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the issuer(s) of the asset being created. Only applies for ``'CREATE'`` operations. Defaults to ``None``. recipients (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the new recipients(s) of the asset being created or transferred. Defaults to ``None``. asset (:obj:`dict`, optional): The asset to be created or transferred. MUST be supplied for ``'TRANSFER'`` operations. Defaults to ``None``. metadata (:obj:`dict`, optional): Metadata associated with the transaction. Defaults to ``None``. inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`, optional): One or more inputs holding the condition(s) that this transaction intends to fulfill. Each input is expected to be a :obj:`dict`. Only applies to, and MUST be supplied for, ``'TRANSFER'`` operations. Returns: dict: The prepared transaction. Raises: :class:`~.exceptions.BigchaindbException`: If ``operation`` is not ``'CREATE'`` or ``'TRANSFER'``. .. important:: **CREATE operations** * ``signers`` MUST be set. * ``recipients``, ``asset``, and ``metadata`` MAY be set. * If ``asset`` is set, it MUST be in the form of:: { 'data': { ... } } * The argument ``inputs`` is ignored. * If ``recipients`` is not given, or evaluates to ``False``, it will be set equal to ``signers``:: if not recipients: recipients = signers **TRANSFER operations** * ``recipients``, ``asset``, and ``inputs`` MUST be set. * ``asset`` MUST be in the form of:: { 'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>' } * ``metadata`` MAY be set. * The argument ``signers`` is ignored.
def read_line(self, line): """Read a new line""" if self.ignore: return for i, char in enumerate(line): if char not in ['"', "'"]: continue # Is the char escaped? if line[i - 1:i] == '\\': continue if self.single == char: self.single = None continue if self.single is not None: continue if not self.python: continue if self.triple == char: if line[i - 2:i + 1] == 3 * char: self.triple = None continue if self.triple is not None: continue if line[i - 2:i + 1] == 3 * char: self.triple = char continue self.single = char # Line ended if self.python: self.single = None
Read a new line
def translate_wp_comment(self, e): """ <wp:comment> <wp:comment_id>1234</wp:comment_id> <wp:comment_author><![CDATA[John Doe]]></wp:comment_author> <wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email> <wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url> <wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP> <wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date> <wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt> <wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content> <wp:comment_approved><![CDATA[1]]></wp:comment_approved> <wp:comment_type><![CDATA[]]></wp:comment_type> <wp:comment_parent>0</wp:comment_parent> <wp:comment_user_id>0</wp:comment_user_id> </wp:comment> """ comment_dict = {} comment_dict['ID'] = e.find('./{wp}comment_id').text comment_dict['date'] = e.find('{wp}comment_date').text comment_dict['content'] = e.find('{wp}comment_content').text comment_dict['status'] = e.find('{wp}comment_approved').text comment_dict['status'] = "approved" if comment_dict['status'] == "1" else "rejected" comment_dict['parent'] = e.find('{wp}comment_parent').text comment_dict['author'] = e.find('{wp}comment_author').text comment_dict['date'] = time.strptime(comment_dict['date'], '%Y-%m-%d %H:%M:%S') comment_dict['date'] = time.strftime('%Y-%m-%dT%H:%M:%S', comment_dict['date']) return comment_dict
<wp:comment> <wp:comment_id>1234</wp:comment_id> <wp:comment_author><![CDATA[John Doe]]></wp:comment_author> <wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email> <wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url> <wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP> <wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date> <wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt> <wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content> <wp:comment_approved><![CDATA[1]]></wp:comment_approved> <wp:comment_type><![CDATA[]]></wp:comment_type> <wp:comment_parent>0</wp:comment_parent> <wp:comment_user_id>0</wp:comment_user_id> </wp:comment>
def charts_slug_get(self, slug, **kwargs): """ Chart A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.charts_slug_get(slug, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str slug: Unique identifier for a Chart (required) :return: Chart If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.charts_slug_get_with_http_info(slug, **kwargs) else: (data) = self.charts_slug_get_with_http_info(slug, **kwargs) return data
Chart A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.charts_slug_get(slug, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str slug: Unique identifier for a Chart (required) :return: Chart If the method is called asynchronously, returns the request thread.
def match(cls, event): """Match a given cwe event as cloudtrail with an api call That has its information filled out. """ if 'detail' not in event: return False if 'eventName' not in event['detail']: return False k = event['detail']['eventName'] # We want callers to use a compiled expression, but want to avoid # initialization cost of doing it without cause. Not thread safe, # but usage context is lambda entry. if k in cls.trail_events: v = dict(cls.trail_events[k]) if isinstance(v['ids'], six.string_types): v['ids'] = e = jmespath.compile('detail.%s' % v['ids']) cls.trail_events[k]['ids'] = e return v return False
Match a given cwe event as cloudtrail with an api call That has its information filled out.
def is_rfc822(self) -> bool: """True if the content-type of the message is ``message/rfc822``.""" ct_hdr = self.header.parsed.content_type if ct_hdr is None: return False else: return ct_hdr.content_type == 'message/rfc822'
True if the content-type of the message is ``message/rfc822``.
def register_layouts(layouts, app, url="/api/props/", brand="Pyxley"): """ register UILayout with the flask app create a function that will send props for each UILayout Args: layouts (dict): dict of UILayout objects by name app (object): flask app url (string): address of props; default is /api/props/ """ def props(name): if name not in layouts: # cast as list for python3 name = list(layouts.keys())[0] return jsonify({"layouts": layouts[name]["layout"]}) def apps(): paths = [] for i, k in enumerate(layouts.keys()): if i == 0: paths.append({ "path": "/", "label": layouts[k].get("title", k) }) paths.append({ "path": "/"+k, "label": layouts[k].get("title", k) }) return jsonify({"brand": brand, "navlinks": paths}) app.add_url_rule(url+"<string:name>/", view_func=props) app.add_url_rule(url, view_func=apps)
register UILayout with the flask app create a function that will send props for each UILayout Args: layouts (dict): dict of UILayout objects by name app (object): flask app url (string): address of props; default is /api/props/
def register_preset(cls, name, preset): """ Register a preset instance with the class of the hub it corresponds to. This allows individual plugin objects to automatically register themselves with a preset by using a classmethod of their own with only the name of the preset to register with. """ if cls._presets is None: cls._presets = {} cls._presets[name] = preset
Register a preset instance with the class of the hub it corresponds to. This allows individual plugin objects to automatically register themselves with a preset by using a classmethod of their own with only the name of the preset to register with.
def get_thread_info(self, enforce_re=True, latest_date=None): """Return a json list with information about threads in the group. :param enforce_re=True: Whether to require titles to match regexp in self.topic_re. :param latest_date=None: Optional datetime.datetime for latest date to consider. Things past this are ignored. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :return: List of github items found. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Return a json list with information about threads in the group. Along with latest_date, this can be used to show issues. """ result = [] my_re = re.compile(self.topic_re) url = '%s/issues?sort=updated' % (self.base_url) latest_date = self.parse_date(latest_date) if latest_date else None while url: kwargs = {} if not self.gh_info.user else {'auth': ( self.gh_info.user, self.gh_info.token)} my_req = requests.get(url, params=self.params, **kwargs) my_json = my_req.json() for item in my_json: if (not enforce_re) or my_re.search(item['title']): idate = self.parse_date(item['updated_at']) if (latest_date is not None and idate > latest_date): logging.debug('Skip %s since updated at %s > %s', item['title'], idate, latest_date) continue result.append(item) if self.max_threads is not None and len( result) >= self.max_threads: logging.debug('Stopping after max_threads=%i threads.', len(result)) return result url = None if 'link' in my_req.headers: link = my_req.headers['link'].split(',') for thing in link: potential_url, part = thing.split('; ') if part == 'rel="next"': url = potential_url.lstrip(' <').rstrip('> ') return result
Return a json list with information about threads in the group. :param enforce_re=True: Whether to require titles to match regexp in self.topic_re. :param latest_date=None: Optional datetime.datetime for latest date to consider. Things past this are ignored. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :return: List of github items found. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Return a json list with information about threads in the group. Along with latest_date, this can be used to show issues.
def _logpdf(self, **kwargs): """Returns the log of the pdf at the given values. The keyword arguments must contain all of parameters in self's params. Unrecognized arguments are ignored. """ for p in self._params: if p not in kwargs.keys(): raise ValueError( 'Missing parameter {} to construct pdf.'.format(p)) if kwargs in self: log_pdf = self._lognorm + \ (self.dim - 1) * \ numpy.log([kwargs[p] for p in self._params]).sum() return log_pdf else: return -numpy.inf
Returns the log of the pdf at the given values. The keyword arguments must contain all of parameters in self's params. Unrecognized arguments are ignored.
def dict_factory(self, cursor, row): """ Sqlite callback accepting the cursor and the original row as a tuple. Simple return of JSON safe types. Args: cursor (sqlite cursor): Original cursory row (sqlite row tuple): Original row. Returns: dict: modified row. """ d = {} for idx, col in enumerate(cursor.description): val = row[idx] name = col[0] if name == Field.Time_Stamp: d[col[0]] = str(val) continue if name == "Raw_A" or name == "Raw_B": # or name == Field.Meter_Time: continue if name not in self.m_all_fields: continue if (str(val) != "None") and ((val > 0) or (val < 0)): d[name] = str(val) return d
Sqlite callback accepting the cursor and the original row as a tuple. Simple return of JSON safe types. Args: cursor (sqlite cursor): Original cursory row (sqlite row tuple): Original row. Returns: dict: modified row.
def linestrings_intersect(line1, line2): """ To valid whether linestrings from geojson are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line geojson object line2 -- second line geojson object if(line1 intersects with other) return intersect point array else empty array """ intersects = [] for i in range(0, len(line1['coordinates']) - 1): for j in range(0, len(line2['coordinates']) - 1): a1_x = line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x = line1['coordinates'][i + 1][1] a2_y = line1['coordinates'][i + 1][0] b1_x = line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x = line2['coordinates'][j + 1][1] b2_y = line2['coordinates'][j + 1][0] ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \ (b2_y - b1_y) * (a1_x - b1_x) ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \ (a2_y - a1_y) * (a1_x - b1_x) u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y) if not u_b == 0: u_a = ua_t / u_b u_b = ub_t / u_b if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1: intersects.append({'type': 'Point', 'coordinates': [ a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]}) # if len(intersects) == 0: # intersects = False return intersects
To valid whether linestrings from geojson are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line geojson object line2 -- second line geojson object if(line1 intersects with other) return intersect point array else empty array
def spawn_missing_classes(self, context=None): """ Creates the appropriate python user relation classes from tables in the schema and places them in the context. :param context: alternative context to place the missing classes into, e.g. locals() """ if context is None: if self.context is not None: context = self.context else: # if context is missing, use the calling namespace frame = inspect.currentframe().f_back context = frame.f_locals del frame tables = [ row[0] for row in self.connection.query('SHOW TABLES in `%s`' % self.database) if lookup_class_name('`{db}`.`{tab}`'.format(db=self.database, tab=row[0]), context, 0) is None] master_classes = (Lookup, Manual, Imported, Computed) part_tables = [] for table_name in tables: class_name = to_camel_case(table_name) if class_name not in context: try: cls = next(cls for cls in master_classes if re.fullmatch(cls.tier_regexp, table_name)) except StopIteration: if re.fullmatch(Part.tier_regexp, table_name): part_tables.append(table_name) else: # declare and decorate master relation classes context[class_name] = self(type(class_name, (cls,), dict())) # attach parts to masters for table_name in part_tables: groups = re.fullmatch(Part.tier_regexp, table_name).groupdict() class_name = to_camel_case(groups['part']) try: master_class = context[to_camel_case(groups['master'])] except KeyError: raise DataJointError('The table %s does not follow DataJoint naming conventions' % table_name) part_class = type(class_name, (Part,), dict(definition=...)) part_class._master = master_class self.process_relation_class(part_class, context=context, assert_declared=True) setattr(master_class, class_name, part_class)
Creates the appropriate python user relation classes from tables in the schema and places them in the context. :param context: alternative context to place the missing classes into, e.g. locals()
def dict_2_mat(data, fill = True): """ Creates a NumPy array from a dictionary with only integers as keys and NumPy arrays as values. Dimension 0 of the resulting array is formed from data.keys(). Missing values in keys can be filled up with np.nan (default) or ignored. Parameters ---------- data : dict a dictionary with integers as keys and array-likes of the same shape as values fill : boolean flag specifying if the resulting matrix will keep a correspondence between dictionary keys and matrix indices by filling up missing keys with matrices of NaNs. Defaults to True Returns ------- numpy array with one more dimension than the values of the input dict """ if any([type(k) != int for k in list(data.keys())]): raise RuntimeError("Dictionary cannot be converted to matrix, " + "not all keys are ints") base_shape = np.array(list(data.values())[0]).shape result_shape = list(base_shape) if fill: result_shape.insert(0, max(data.keys()) + 1) else: result_shape.insert(0, len(list(data.keys()))) result = np.empty(result_shape) + np.nan for (i, (k, v)) in enumerate(data.items()): v = np.array(v) if v.shape != base_shape: raise RuntimeError("Dictionary cannot be converted to matrix, " + "not all values have same dimensions") result[fill and [k][0] or [i][0]] = v return result
Creates a NumPy array from a dictionary with only integers as keys and NumPy arrays as values. Dimension 0 of the resulting array is formed from data.keys(). Missing values in keys can be filled up with np.nan (default) or ignored. Parameters ---------- data : dict a dictionary with integers as keys and array-likes of the same shape as values fill : boolean flag specifying if the resulting matrix will keep a correspondence between dictionary keys and matrix indices by filling up missing keys with matrices of NaNs. Defaults to True Returns ------- numpy array with one more dimension than the values of the input dict
def parse_version(package): """ Parse versions """ init_file = f'{PACKAGE_ROOT}/{package}/__init__.py' with open(init_file, 'r', encoding='utf-8') as f: for line in f.readlines(): if '__version__' in line: return line.split('=')[1].strip()[1:-1] return ''
Parse versions
def multigrad_dict(fun): "Takes gradients wrt all arguments simultaneously," "returns a dict mapping 'argname' to 'gradval'" import funcsigs sig = funcsigs.signature(fun) def select(preds, lst): idx = lambda item: next( (i for i, pred in enumerate(preds) if pred(item)), len(preds)) results = [[] for _ in preds] + [[]] for item in lst: results[idx(item)].append(item) return results is_var_pos = lambda name: sig.parameters[name].kind == sig.parameters[name].VAR_POSITIONAL is_var_kwd = lambda name: sig.parameters[name].kind == sig.parameters[name].VAR_KEYWORD var_pos, var_kwd, argnames = select([is_var_pos, is_var_kwd], sig.parameters) todict = lambda dct: {key:dct[key] for key in dct} def apply_defaults(arguments): defaults = {name: param.default for name, param in sig.parameters.items() if param.default is not param.empty} return OrderedDict((name, arguments[name] if name in arguments else defaults[name]) for name in sig.parameters) def gradfun(*args, **kwargs): bindings = sig.bind(*args, **kwargs) args = lambda dct: tuple(dct[var_pos[0]]) if var_pos else () kwargs = lambda dct: todict(dct[var_kwd[0]]) if var_kwd else {} others = lambda dct: tuple(dct[argname] for argname in argnames if argname not in var_kwd + var_pos) newfun = lambda dct: fun(*(others(dct) + args(dct)), **kwargs(dct)) argdict = apply_defaults(bindings.arguments) grad_dict = grad(newfun)(dict(argdict)) return OrderedDict((argname, grad_dict[argname]) for argname in argdict) return gradfun
Takes gradients wrt all arguments simultaneously,
def isprintable(string): """Return if all characters in string are printable. >>> isprintable('abc') True >>> isprintable(b'\01') False """ string = string.strip() if not string: return True if sys.version_info[0] == 3: try: return string.isprintable() except Exception: pass try: return string.decode('utf-8').isprintable() except Exception: pass else: if string.isalnum(): return True printable = ('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST' 'UVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c') return all(c in printable for c in string)
Return if all characters in string are printable. >>> isprintable('abc') True >>> isprintable(b'\01') False
def nlp(self, inputString, sourceTime=None, version=None): """Utilizes parse() after making judgements about what datetime information belongs together. It makes logical groupings based on proximity and returns a parsed datetime for each matched grouping of datetime text, along with location info within the given inputString. @type inputString: string @param inputString: natural language text to evaluate @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @type version: integer @param version: style version, default will use L{Calendar} parameter version value @rtype: tuple or None @return: tuple of tuples in the format (parsed_datetime as datetime.datetime, flags as int, start_pos as int, end_pos as int, matched_text as string) or None if there were no matches """ orig_inputstring = inputString # replace periods at the end of sentences w/ spaces # opposed to removing them altogether in order to # retain relative positions (identified by alpha, period, space). # this is required for some of the regex patterns to match inputString = re.sub(r'(\w)(\.)(\s)', r'\1 \3', inputString).lower() inputString = re.sub(r'(\w)(\'|")(\s|$)', r'\1 \3', inputString) inputString = re.sub(r'(\s|^)(\'|")(\w)', r'\1 \3', inputString) startpos = 0 # the start position in the inputString during the loop # list of lists in format: # [startpos, endpos, matchedstring, flags, type] matches = [] while startpos < len(inputString): # empty match leftmost_match = [0, 0, None, 0, None] # Modifier like next\prev.. m = self.ptc.CRE_MODIFIER.search(inputString[startpos:]) if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 0 leftmost_match[4] = 'modifier' # Quantity + Units m = self.ptc.CRE_UNITS.search(inputString[startpos:]) if m is not None: debug and log.debug('CRE_UNITS matched') if self._UnitsTrapped(inputString[startpos:], m, 'units'): debug and log.debug('day suffix trapped by unit match') else: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start('qty') + startpos: leftmost_match[0] = m.start('qty') + startpos leftmost_match[1] = m.end('qty') + startpos leftmost_match[2] = m.group('qty') leftmost_match[3] = 3 leftmost_match[4] = 'units' if m.start('qty') > 0 and \ inputString[m.start('qty') - 1] == '-': leftmost_match[0] = leftmost_match[0] - 1 leftmost_match[2] = '-' + leftmost_match[2] # Quantity + Units m = self.ptc.CRE_QUNITS.search(inputString[startpos:]) if m is not None: debug and log.debug('CRE_QUNITS matched') if self._UnitsTrapped(inputString[startpos:], m, 'qunits'): debug and log.debug('day suffix trapped by qunit match') else: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start('qty') + startpos: leftmost_match[0] = m.start('qty') + startpos leftmost_match[1] = m.end('qty') + startpos leftmost_match[2] = m.group('qty') leftmost_match[3] = 3 leftmost_match[4] = 'qunits' if m.start('qty') > 0 and \ inputString[m.start('qty') - 1] == '-': leftmost_match[0] = leftmost_match[0] - 1 leftmost_match[2] = '-' + leftmost_match[2] m = self.ptc.CRE_DATE3.search(inputString[startpos:]) # NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW # for match in self.ptc.CRE_DATE3.finditer(inputString[startpos:]): # to prevent "HH:MM(:SS) time strings" expressions from # triggering this regex, we checks if the month field exists # in the searched expression, if it doesn't exist, the date # field is not valid # if match.group('mthname'): # m = self.ptc.CRE_DATE3.search(inputString[startpos:], # match.start()) # break # String date format if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start('date') + startpos: leftmost_match[0] = m.start('date') + startpos leftmost_match[1] = m.end('date') + startpos leftmost_match[2] = m.group('date') leftmost_match[3] = 1 leftmost_match[4] = 'dateStr' # Standard date format m = self.ptc.CRE_DATE.search(inputString[startpos:]) if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start('date') + startpos: leftmost_match[0] = m.start('date') + startpos leftmost_match[1] = m.end('date') + startpos leftmost_match[2] = m.group('date') leftmost_match[3] = 1 leftmost_match[4] = 'dateStd' # Natural language day strings m = self.ptc.CRE_DAY.search(inputString[startpos:]) if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 1 leftmost_match[4] = 'dayStr' # Weekday m = self.ptc.CRE_WEEKDAY.search(inputString[startpos:]) if m is not None: if inputString[startpos:] not in self.ptc.dayOffsets: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 1 leftmost_match[4] = 'weekdy' # Natural language time strings m = self.ptc.CRE_TIME.search(inputString[startpos:]) if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 2 leftmost_match[4] = 'timeStr' # HH:MM(:SS) am/pm time strings m = self.ptc.CRE_TIMEHMS2.search(inputString[startpos:]) if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start('hours') + startpos: leftmost_match[0] = m.start('hours') + startpos leftmost_match[1] = m.end('meridian') + startpos leftmost_match[2] = inputString[leftmost_match[0]: leftmost_match[1]] leftmost_match[3] = 2 leftmost_match[4] = 'meridian' # HH:MM(:SS) time strings m = self.ptc.CRE_TIMEHMS.search(inputString[startpos:]) if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start('hours') + startpos: leftmost_match[0] = m.start('hours') + startpos if m.group('seconds') is not None: leftmost_match[1] = m.end('seconds') + startpos else: leftmost_match[1] = m.end('minutes') + startpos leftmost_match[2] = inputString[leftmost_match[0]: leftmost_match[1]] leftmost_match[3] = 2 leftmost_match[4] = 'timeStd' # Units only; must be preceded by a modifier if len(matches) > 0 and matches[-1][3] == 0: m = self.ptc.CRE_UNITS_ONLY.search(inputString[startpos:]) # Ensure that any match is immediately proceded by the # modifier. "Next is the word 'month'" should not parse as a # date while "next month" should if m is not None and \ inputString[startpos:startpos + m.start()].strip() == '': debug and log.debug('CRE_UNITS_ONLY matched [%s]', m.group()) if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 3 leftmost_match[4] = 'unitsOnly' # set the start position to the end pos of the leftmost match startpos = leftmost_match[1] # nothing was detected # so break out of the loop if startpos == 0: startpos = len(inputString) else: if leftmost_match[3] > 0: m = self.ptc.CRE_NLP_PREFIX.search( inputString[:leftmost_match[0]] + ' ' + str(leftmost_match[3])) if m is not None: leftmost_match[0] = m.start('nlp_prefix') leftmost_match[2] = inputString[leftmost_match[0]: leftmost_match[1]] matches.append(leftmost_match) # find matches in proximity with one another and # return all the parsed values proximity_matches = [] if len(matches) > 1: combined = '' from_match_index = 0 date = matches[0][3] == 1 time = matches[0][3] == 2 units = matches[0][3] == 3 for i in range(1, len(matches)): # test proximity (are there characters between matches?) endofprevious = matches[i - 1][1] begofcurrent = matches[i][0] if orig_inputstring[endofprevious: begofcurrent].lower().strip() != '': # this one isn't in proximity, but maybe # we have enough to make a datetime # TODO: make sure the combination of # formats (modifier, dateStd, etc) makes logical sense # before parsing together if date or time or units: combined = orig_inputstring[matches[from_match_index] [0]:matches[i - 1][1]] parsed_datetime, flags = self.parse(combined, sourceTime, version) proximity_matches.append(( datetime.datetime(*parsed_datetime[:6]), flags, matches[from_match_index][0], matches[i - 1][1], combined)) # not in proximity, reset starting from current from_match_index = i date = matches[i][3] == 1 time = matches[i][3] == 2 units = matches[i][3] == 3 continue else: if matches[i][3] == 1: date = True if matches[i][3] == 2: time = True if matches[i][3] == 3: units = True # check last # we have enough to make a datetime if date or time or units: combined = orig_inputstring[matches[from_match_index][0]: matches[len(matches) - 1][1]] parsed_datetime, flags = self.parse(combined, sourceTime, version) proximity_matches.append(( datetime.datetime(*parsed_datetime[:6]), flags, matches[from_match_index][0], matches[len(matches) - 1][1], combined)) elif len(matches) == 0: return None else: if matches[0][3] == 0: # not enough info to parse return None else: combined = orig_inputstring[matches[0][0]:matches[0][1]] parsed_datetime, flags = self.parse(matches[0][2], sourceTime, version) proximity_matches.append(( datetime.datetime(*parsed_datetime[:6]), flags, matches[0][0], matches[0][1], combined)) return tuple(proximity_matches)
Utilizes parse() after making judgements about what datetime information belongs together. It makes logical groupings based on proximity and returns a parsed datetime for each matched grouping of datetime text, along with location info within the given inputString. @type inputString: string @param inputString: natural language text to evaluate @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @type version: integer @param version: style version, default will use L{Calendar} parameter version value @rtype: tuple or None @return: tuple of tuples in the format (parsed_datetime as datetime.datetime, flags as int, start_pos as int, end_pos as int, matched_text as string) or None if there were no matches
def wifi_status(self): """Get the wifi status.""" return self._info_json.get(CONST.STATUS, {}).get(CONST.WIFI_LINK)
Get the wifi status.
def get_usedby_and_readonly(self, id): """ Gets the build plans details os teh selected plan script as per the selected attributes. Args: id: ID of the Plan Script. Returns: array of build plans """ uri = self.URI + "/" + id + "/usedby/readonly" return self._client.get(uri)
Gets the build plans details os teh selected plan script as per the selected attributes. Args: id: ID of the Plan Script. Returns: array of build plans
def update_body(app, pagename, templatename, context, doctree): """ Add Read the Docs content to Sphinx body content. This is the most reliable way to inject our content into the page. """ STATIC_URL = context.get('STATIC_URL', DEFAULT_STATIC_URL) online_builders = [ 'readthedocs', 'readthedocsdirhtml', 'readthedocssinglehtml' ] if app.builder.name == 'readthedocssinglehtmllocalmedia': if 'html_theme' in context and context['html_theme'] == 'sphinx_rtd_theme': theme_css = '_static/css/theme.css' else: theme_css = '_static/css/badge_only.css' elif app.builder.name in online_builders: if 'html_theme' in context and context['html_theme'] == 'sphinx_rtd_theme': theme_css = '%scss/sphinx_rtd_theme.css' % STATIC_URL else: theme_css = '%scss/badge_only.css' % STATIC_URL else: # Only insert on our HTML builds return inject_css = True # Starting at v0.4.0 of the sphinx theme, the theme CSS should not be injected # This decouples the theme CSS (which is versioned independently) from readthedocs.org if theme_css.endswith('sphinx_rtd_theme.css'): try: import sphinx_rtd_theme inject_css = LooseVersion(sphinx_rtd_theme.__version__) < LooseVersion('0.4.0') except ImportError: pass if inject_css and theme_css not in app.builder.css_files: if sphinx.version_info < (1, 8): app.builder.css_files.insert(0, theme_css) else: app.add_css_file(theme_css) # This is monkey patched on the signal because we can't know what the user # has done with their `app.builder.templates` before now. if not hasattr(app.builder.templates.render, '_patched'): # Janky monkey patch of template rendering to add our content old_render = app.builder.templates.render def rtd_render(self, template, render_context): """ A decorator that renders the content with the users template renderer, then adds the Read the Docs HTML content at the end of body. """ # Render Read the Docs content template_context = render_context.copy() template_context['rtd_css_url'] = '{}css/readthedocs-doc-embed.css'.format(STATIC_URL) template_context['rtd_analytics_url'] = '{}javascript/readthedocs-analytics.js'.format( STATIC_URL, ) source = os.path.join( os.path.abspath(os.path.dirname(__file__)), '_templates', 'readthedocs-insert.html.tmpl' ) templ = open(source).read() rtd_content = app.builder.templates.render_string(templ, template_context) # Handle original render function content = old_render(template, render_context) end_body = content.lower().find('</head>') # Insert our content at the end of the body. if end_body != -1: content = content[:end_body] + rtd_content + "\n" + content[end_body:] else: log.debug("File doesn't look like HTML. Skipping RTD content addition") return content rtd_render._patched = True app.builder.templates.render = types.MethodType(rtd_render, app.builder.templates)
Add Read the Docs content to Sphinx body content. This is the most reliable way to inject our content into the page.
async def get_state(self): """Get the latest state change of QTM. If the :func:`~qtm.connect` on_event callback was set the callback will be called as well. :rtype: A :class:`qtm.QRTEvent` """ await self._protocol.send_command("getstate", callback=False) return await self._protocol.await_event()
Get the latest state change of QTM. If the :func:`~qtm.connect` on_event callback was set the callback will be called as well. :rtype: A :class:`qtm.QRTEvent`
def get_userinfo(self, access_token, id_token, payload): """Return user details dictionary. The id_token and payload are not used in the default implementation, but may be used when overriding this method""" user_response = requests.get( self.OIDC_OP_USER_ENDPOINT, headers={ 'Authorization': 'Bearer {0}'.format(access_token) }, verify=self.get_settings('OIDC_VERIFY_SSL', True)) user_response.raise_for_status() return user_response.json()
Return user details dictionary. The id_token and payload are not used in the default implementation, but may be used when overriding this method
def status_message(self): """Detailed message about whether the dependency is installed. :rtype: str """ if self.is_available: return "INSTALLED {0!s}" elif self.why and self.package: return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package" elif self.why: return "MISSING {0!s:<20}needed for {0.why}" elif self.package: return "MISSING {0!s:<20}part of the {0.package} package" else: return "MISSING {0!s:<20}"
Detailed message about whether the dependency is installed. :rtype: str
def P(self): """Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix """ try: return self._diff_op except AttributeError: self._diff_op = normalize(self.kernel, 'l1', axis=1) return self._diff_op
Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix
def _setup_aggregation(self, aggregator=None): """ Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true. """ from nefertari.elasticsearch import ES if aggregator is None: aggregator = ESAggregator aggregations_enabled = ( ES.settings and ES.settings.asbool('enable_aggregations')) if not aggregations_enabled: log.debug('Elasticsearch aggregations are not enabled') return index = getattr(self, 'index', None) index_defined = index and index != self.not_allowed_action if index_defined: self.index = aggregator(self).wrap(self.index)
Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true.
def load(robot, container_name, slot, label=None, share=False): """ Examples -------- >>> from opentrons import containers >>> containers.load('96-flat', '1') <Deck>/<Slot 1>/<Container 96-flat> >>> containers.load('96-flat', '4', 'plate') <Deck>/<Slot 4>/<Container plate> >>> containers.load('non-existent-type', '4') # doctest: +ELLIPSIS Exception: Container type "non-existent-type" not found in file ... """ # OT-One users specify columns in the A1, B3 fashion # below methods help convert to the 1, 2, etc integer names def is_ot_one_slot_name(s): return isinstance(s, str) and len(s) == 2 and s[0] in 'ABCD' def convert_ot_one_slot_names(s): col = 'ABCD'.index(slot[0]) row = int(slot[1]) - 1 slot_number = col + (row * robot.get_max_robot_cols()) + 1 log.warning('Changing deprecated slot name "{}" to "{}"'.format( slot, slot_number)) return slot_number if isinstance(slot, str): # convert to integer try: slot = int(slot) except (ValueError, TypeError): if is_ot_one_slot_name(slot): slot = convert_ot_one_slot_names(slot) if helpers.is_number(slot): # test that it is within correct range if not (1 <= slot <= len(robot.deck)): raise ValueError('Unknown slot: {}'.format(slot)) slot = str(slot) return robot.add_container(container_name, slot, label, share)
Examples -------- >>> from opentrons import containers >>> containers.load('96-flat', '1') <Deck>/<Slot 1>/<Container 96-flat> >>> containers.load('96-flat', '4', 'plate') <Deck>/<Slot 4>/<Container plate> >>> containers.load('non-existent-type', '4') # doctest: +ELLIPSIS Exception: Container type "non-existent-type" not found in file ...
def put_method(restApiId=None, resourceId=None, httpMethod=None, authorizationType=None, authorizerId=None, apiKeyRequired=None, operationName=None, requestParameters=None, requestModels=None, requestValidatorId=None): """ Add a method to an existing Resource resource. See also: AWS API Documentation :example: response = client.put_method( restApiId='string', resourceId='string', httpMethod='string', authorizationType='string', authorizerId='string', apiKeyRequired=True|False, operationName='string', requestParameters={ 'string': True|False }, requestModels={ 'string': 'string' }, requestValidatorId='string' ) :type restApiId: string :param restApiId: [REQUIRED] The RestApi identifier for the new Method resource. :type resourceId: string :param resourceId: [REQUIRED] The Resource identifier for the new Method resource. :type httpMethod: string :param httpMethod: [REQUIRED] Specifies the method request's HTTP method type. :type authorizationType: string :param authorizationType: [REQUIRED] The method's authorization type. Valid values are NONE for open access, AWS_IAM for using AWS IAM permissions, CUSTOM for using a custom authorizer, or COGNITO_USER_POOLS for using a Cognito user pool. :type authorizerId: string :param authorizerId: Specifies the identifier of an Authorizer to use on this Method, if the type is CUSTOM. :type apiKeyRequired: boolean :param apiKeyRequired: Specifies whether the method required a valid ApiKey . :type operationName: string :param operationName: A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example. :type requestParameters: dict :param requestParameters: A key-value map defining required or optional method request parameters that can be accepted by Amazon API Gateway. A key defines a method request parameter name matching the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name is a valid and unique parameter name. The value associated with the key is a Boolean flag indicating whether the parameter is required (true ) or optional (false ). The method request parameter names defined here are available in Integration to be mapped to integration request parameters or body-mapping templates. (string) -- (boolean) -- :type requestModels: dict :param requestModels: Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value. (string) -- (string) -- :type requestValidatorId: string :param requestValidatorId: The identifier of a RequestValidator for validating the method request. :rtype: dict :return: { 'httpMethod': 'string', 'authorizationType': 'string', 'authorizerId': 'string', 'apiKeyRequired': True|False, 'requestValidatorId': 'string', 'operationName': 'string', 'requestParameters': { 'string': True|False }, 'requestModels': { 'string': 'string' }, 'methodResponses': { 'string': { 'statusCode': 'string', 'responseParameters': { 'string': True|False }, 'responseModels': { 'string': 'string' } } }, 'methodIntegration': { 'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', 'httpMethod': 'string', 'uri': 'string', 'credentials': 'string', 'requestParameters': { 'string': 'string' }, 'requestTemplates': { 'string': 'string' }, 'passthroughBehavior': 'string', 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT', 'cacheNamespace': 'string', 'cacheKeyParameters': [ 'string', ], 'integrationResponses': { 'string': { 'statusCode': 'string', 'selectionPattern': 'string', 'responseParameters': { 'string': 'string' }, 'responseTemplates': { 'string': 'string' }, 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' } } } } :returns: (string) -- (boolean) -- """ pass
Add a method to an existing Resource resource. See also: AWS API Documentation :example: response = client.put_method( restApiId='string', resourceId='string', httpMethod='string', authorizationType='string', authorizerId='string', apiKeyRequired=True|False, operationName='string', requestParameters={ 'string': True|False }, requestModels={ 'string': 'string' }, requestValidatorId='string' ) :type restApiId: string :param restApiId: [REQUIRED] The RestApi identifier for the new Method resource. :type resourceId: string :param resourceId: [REQUIRED] The Resource identifier for the new Method resource. :type httpMethod: string :param httpMethod: [REQUIRED] Specifies the method request's HTTP method type. :type authorizationType: string :param authorizationType: [REQUIRED] The method's authorization type. Valid values are NONE for open access, AWS_IAM for using AWS IAM permissions, CUSTOM for using a custom authorizer, or COGNITO_USER_POOLS for using a Cognito user pool. :type authorizerId: string :param authorizerId: Specifies the identifier of an Authorizer to use on this Method, if the type is CUSTOM. :type apiKeyRequired: boolean :param apiKeyRequired: Specifies whether the method required a valid ApiKey . :type operationName: string :param operationName: A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example. :type requestParameters: dict :param requestParameters: A key-value map defining required or optional method request parameters that can be accepted by Amazon API Gateway. A key defines a method request parameter name matching the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name is a valid and unique parameter name. The value associated with the key is a Boolean flag indicating whether the parameter is required (true ) or optional (false ). The method request parameter names defined here are available in Integration to be mapped to integration request parameters or body-mapping templates. (string) -- (boolean) -- :type requestModels: dict :param requestModels: Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value. (string) -- (string) -- :type requestValidatorId: string :param requestValidatorId: The identifier of a RequestValidator for validating the method request. :rtype: dict :return: { 'httpMethod': 'string', 'authorizationType': 'string', 'authorizerId': 'string', 'apiKeyRequired': True|False, 'requestValidatorId': 'string', 'operationName': 'string', 'requestParameters': { 'string': True|False }, 'requestModels': { 'string': 'string' }, 'methodResponses': { 'string': { 'statusCode': 'string', 'responseParameters': { 'string': True|False }, 'responseModels': { 'string': 'string' } } }, 'methodIntegration': { 'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', 'httpMethod': 'string', 'uri': 'string', 'credentials': 'string', 'requestParameters': { 'string': 'string' }, 'requestTemplates': { 'string': 'string' }, 'passthroughBehavior': 'string', 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT', 'cacheNamespace': 'string', 'cacheKeyParameters': [ 'string', ], 'integrationResponses': { 'string': { 'statusCode': 'string', 'selectionPattern': 'string', 'responseParameters': { 'string': 'string' }, 'responseTemplates': { 'string': 'string' }, 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' } } } } :returns: (string) -- (boolean) --
def url(self): """The url on jupyter nbviewer for this notebook or None if unknown""" if self._url is not None: url = self._url else: url = getattr(self.nb.metadata, 'url', None) if url is not None: return nbviewer_link(url)
The url on jupyter nbviewer for this notebook or None if unknown
def convert_ages_to_calendar_year(self, er_ages_rec): """ convert all age units to calendar year Parameters ---------- er_ages_rec : Dict type object containing preferbly at least keys 'age', 'age_unit', and either 'age_range_high', 'age_range_low' or 'age_sigma' Returns ------- er_ages_rec : Same dict object input but altered to have new records 'age_cal_year_range_low' and 'age_cal_year_range_high' """ if "age" not in list(er_ages_rec.keys()): return(er_ages_rec) if "age_unit" not in list(er_ages_rec.keys()): return(er_ages_rec) if er_ages_rec["age_unit"] == "": return(er_ages_rec) if er_ages_rec["age"] == "": if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": er_ages_rec["age"] = scipy.mean( [float(er_ages_rec["age_range_high"]), float(er_ages_rec["age_range_low"])]) if er_ages_rec["age"] == "": return(er_ages_rec) age_unit = er_ages_rec["age_unit"] # Fix 'age': mutliplier = 1 if age_unit == "Ga": mutliplier = -1e9 if age_unit == "Ma": mutliplier = -1e6 if age_unit == "Ka": mutliplier = -1e3 if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)": mutliplier = 1 if age_unit == "Years BP" or age_unit == "Years Cal BP": mutliplier = 1 age = float(er_ages_rec["age"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age = 1950-age er_ages_rec['age_cal_year'] = age # Fix 'age_range_low': age_range_low = age age_range_high = age age_sigma = 0 if "age_sigma" in list(er_ages_rec.keys()) and er_ages_rec["age_sigma"] != "": age_sigma = float(er_ages_rec["age_sigma"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_sigma = 1950-age_sigma age_range_low = age-age_sigma age_range_high = age+age_sigma if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": age_range_high = float( er_ages_rec["age_range_high"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_high = 1950-age_range_high age_range_low = float(er_ages_rec["age_range_low"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_low = 1950-age_range_low er_ages_rec['age_cal_year_range_low'] = age_range_low er_ages_rec['age_cal_year_range_high'] = age_range_high return(er_ages_rec)
convert all age units to calendar year Parameters ---------- er_ages_rec : Dict type object containing preferbly at least keys 'age', 'age_unit', and either 'age_range_high', 'age_range_low' or 'age_sigma' Returns ------- er_ages_rec : Same dict object input but altered to have new records 'age_cal_year_range_low' and 'age_cal_year_range_high'
def query(self): """ The mongo query object which would be executed if this Query object were used """ if self._rawquery==True: return self.__query return flatten(self.__query)
The mongo query object which would be executed if this Query object were used
def _create_raw_data(self): """ Gathers the different sections ids and creates a string as first cookie data. :return: A dictionary like: {'analyses':'all','analysisrequest':'all','worksheets':'all'} """ result = {} for section in self.get_sections(): result[section.get('id')] = 'all' return result
Gathers the different sections ids and creates a string as first cookie data. :return: A dictionary like: {'analyses':'all','analysisrequest':'all','worksheets':'all'}
def start(self): """Start the message sending loop.""" if self._send_greenlet is None: self._send_greenlet = gevent.spawn(self._send_loop)
Start the message sending loop.
def strip_unreferenced_labels(asm_lines): """Strip all labels, which are never referenced.""" asm_stripped = [] for line in asm_lines: if re.match(r'^\S+:', line): # Found label label = line[0:line.find(':')] # Search for references to current label if not any([re.match(r'^[^#]*\s' + re.escape(label) + '[\s,]?.*$', l) for l in asm_lines]): # Skip labels without seen reference line = '' asm_stripped.append(line) return asm_stripped
Strip all labels, which are never referenced.
def getattribute(value, arg): """Gets an attribute of an object dynamically from a string name""" if hasattr(value, str(arg)): return getattr(value, arg) elif hasattr(value, 'has_key') and value.has_key(arg): return value[arg] elif numeric_test.match(str(arg)) and len(value) > int(arg): return value[int(arg)] else: return settings.TEMPLATE_STRING_IF_INVALID
Gets an attribute of an object dynamically from a string name
def main(): """ simple user-interface """ print("\t\tCalculator\n\n") while True: user_input = input("expression or exit: ") if user_input == "exit": break try: print("The result is {0}".format(evaluate(user_input))) except Exception: print("invalid syntax!") user_input = input("expression or exit: ") print("program end")
simple user-interface
def pad_equal_whitespace(string, pad=None): """Given a multiline string, add whitespaces to every line so that every line has the same length.""" if pad is None: pad = max(map(len, string.split('\n'))) + 1 return '\n'.join(('{0: <%i}' % pad).format(line) for line in string.split('\n'))
Given a multiline string, add whitespaces to every line so that every line has the same length.
def _rts_smoother_update_step(k, p_m , p_P, p_m_pred, p_P_pred, p_m_prev_step, p_P_prev_step, p_dynamic_callables): """ Rauch–Tung–Striebel(RTS) update step Input: ----------------------------- k: int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Filter mean on step k p_P: matrix of size (state_dim,state_dim) Filter Covariance on step k p_m_pred: matrix of size (state_dim, time_series_no) Means from the smoother prediction step. p_P_pred: Covariance from the smoother prediction step. p_m_prev_step Smoother mean from the previous step. p_P_prev_step: Smoother covariance from the previous step. p_f_A: function (k, m, P) return Jacobian of dynamic function, it is passed into p_a. k (iteration number), starts at 0 m: point where Jacobian is evaluated P: parameter for Jacobian, usually covariance matrix. """ A = p_dynamic_callables.Ak(k,p_m,p_P) # state transition matrix (or Jacobian) tmp = np.dot( A, p_P.T) if A.shape[0] == 1: # 1D states G = tmp.T / p_P_pred # P[:,:,k] is symmetric else: try: LL,islower = linalg.cho_factor(p_P_pred) G = linalg.cho_solve((LL,islower),tmp).T except: # It happende that p_P_pred has several near zero eigenvalues # hence the Cholesky method does not work. res = sp.linalg.lstsq(p_P_pred, tmp) G = res[0].T m_upd = p_m + G.dot( p_m_prev_step-p_m_pred ) P_upd = p_P + G.dot( p_P_prev_step-p_P_pred).dot(G.T) P_upd = 0.5*(P_upd + P_upd.T) return m_upd, P_upd, G
Rauch–Tung–Striebel(RTS) update step Input: ----------------------------- k: int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Filter mean on step k p_P: matrix of size (state_dim,state_dim) Filter Covariance on step k p_m_pred: matrix of size (state_dim, time_series_no) Means from the smoother prediction step. p_P_pred: Covariance from the smoother prediction step. p_m_prev_step Smoother mean from the previous step. p_P_prev_step: Smoother covariance from the previous step. p_f_A: function (k, m, P) return Jacobian of dynamic function, it is passed into p_a. k (iteration number), starts at 0 m: point where Jacobian is evaluated P: parameter for Jacobian, usually covariance matrix.
def rename_tokens(docgraph_with_old_names, docgraph_with_new_names, verbose=False): """ Renames the tokens of a graph (``docgraph_with_old_names``) in-place, using the token names of another document graph (``docgraph_with_new_names``). Also updates the ``.tokens`` list of the old graph. This will only work, iff both graphs have the same tokenization. """ old2new = create_token_mapping(docgraph_with_old_names, docgraph_with_new_names, verbose=verbose) # save the mappings from old to new token node IDs in the `renamed_nodes` # attribute of the merged graph if hasattr(docgraph_with_new_names, 'renamed_nodes'): docgraph_with_new_names.renamed_nodes.update(old2new) else: docgraph_with_new_names.renamed_nodes = old2new relabel_nodes(docgraph_with_old_names, old2new, copy=False) new_token_ids = old2new.values() # new_token_ids could be empty (if docgraph_with_new_names is still empty) if new_token_ids: docgraph_with_old_names.tokens = new_token_ids
Renames the tokens of a graph (``docgraph_with_old_names``) in-place, using the token names of another document graph (``docgraph_with_new_names``). Also updates the ``.tokens`` list of the old graph. This will only work, iff both graphs have the same tokenization.
def left_complement(clr): """ Returns the left half of the split complement. A list is returned with the same darker and softer colors as in the complementary list, but using the hue of the left split complement instead of the complement itself. """ left = split_complementary(clr)[1] colors = complementary(clr) colors[3].h = left.h colors[4].h = left.h colors[5].h = left.h colors = colorlist( colors[0], colors[2], colors[1], colors[3], colors[4], colors[5] ) return colors
Returns the left half of the split complement. A list is returned with the same darker and softer colors as in the complementary list, but using the hue of the left split complement instead of the complement itself.
def create_git_release(self, tag, name, message, draft=False, prerelease=False, target_commitish=github.GithubObject.NotSet): """ :calls: `POST /repos/:owner/:repo/releases <http://developer.github.com/v3/repos/releases>`_ :param tag: string :param name: string :param message: string :param draft: bool :param prerelease: bool :param target_commitish: string or :class:`github.Branch.Branch` or :class:`github.Commit.Commit` or :class:`github.GitCommit.GitCommit` :rtype: :class:`github.GitRelease.GitRelease` """ assert isinstance(tag, (str, unicode)), tag assert isinstance(name, (str, unicode)), name assert isinstance(message, (str, unicode)), message assert isinstance(draft, bool), draft assert isinstance(prerelease, bool), prerelease assert target_commitish is github.GithubObject.NotSet or isinstance(target_commitish, (str, unicode, github.Branch.Branch, github.Commit.Commit, github.GitCommit.GitCommit)), target_commitish post_parameters = { "tag_name": tag, "name": name, "body": message, "draft": draft, "prerelease": prerelease, } if isinstance(target_commitish, (str, unicode)): post_parameters["target_commitish"] = target_commitish elif isinstance(target_commitish, github.Branch.Branch): post_parameters["target_commitish"] = target_commitish.name elif isinstance(target_commitish, (github.Commit.Commit, github.GitCommit.GitCommit)): post_parameters["target_commitish"] = target_commitish.sha headers, data = self._requester.requestJsonAndCheck( "POST", self.url + "/releases", input=post_parameters ) return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
:calls: `POST /repos/:owner/:repo/releases <http://developer.github.com/v3/repos/releases>`_ :param tag: string :param name: string :param message: string :param draft: bool :param prerelease: bool :param target_commitish: string or :class:`github.Branch.Branch` or :class:`github.Commit.Commit` or :class:`github.GitCommit.GitCommit` :rtype: :class:`github.GitRelease.GitRelease`
def get_rendered_toctree(builder, docname, prune=False, collapse=True): """Build the toctree relative to the named document, with the given parameters, and then return the rendered HTML fragment. """ fulltoc = build_full_toctree(builder, docname, prune=prune, collapse=collapse, ) rendered_toc = builder.render_partial(fulltoc)['fragment'] return rendered_toc
Build the toctree relative to the named document, with the given parameters, and then return the rendered HTML fragment.
def tail(collection, filter=None, projection=None, limit=0, timeout=None, aggregate=False): """A generator which will block and yield entries as they are added to a capped collection. Only use this on capped collections; behaviour is undefined against non-tailable cursors. Accepts a timeout as an integer or floating point number of seconds, indicating how long to wait for a result. Correct operation requires a modern MongoDB installation, version 3.2 or newer, and the client driver to support it. Use is trivial: for obj in tail(db.collection, timeout=10): print(obj) An optional argument, aggregate, allows you to control how the timeout value is interpreted. By default, False, the timeout is used as the longest period of time to wait for a new record, resetting on each retrieved record. Additional important note: tailing will fail (badly) if the collection is empty. Always prime the collection with an empty or otherwise unimportant record before attempting to use this feature. """ if not collection.options().get('capped', False): raise TypeError("Can only tail capped collections.") # Similarly, verify that the collection isn't empty. Empty is bad. (Busy loop.) if not collection.count(): raise ValueError("Cowardly refusing to tail an empty collection.") cursor = collection.find(filter, projection, limit=limit, cursor_type=CursorType.TAILABLE_AWAIT) cursor = cursor.hint([('$natural', 1)]) if timeout: if aggregate: # Total query time not to exceed `timeout` seconds. cursor = cursor.max_time_ms(int(timeout * 1000)).max_await_time_ms(int(timeout * 1000)) else: # Individual wait time not to exceed `timeout` seconds. cursor = cursor.max_await_time_ms(int(timeout * 1000)) return cursor
A generator which will block and yield entries as they are added to a capped collection. Only use this on capped collections; behaviour is undefined against non-tailable cursors. Accepts a timeout as an integer or floating point number of seconds, indicating how long to wait for a result. Correct operation requires a modern MongoDB installation, version 3.2 or newer, and the client driver to support it. Use is trivial: for obj in tail(db.collection, timeout=10): print(obj) An optional argument, aggregate, allows you to control how the timeout value is interpreted. By default, False, the timeout is used as the longest period of time to wait for a new record, resetting on each retrieved record. Additional important note: tailing will fail (badly) if the collection is empty. Always prime the collection with an empty or otherwise unimportant record before attempting to use this feature.
def latex_defs_to_katex_macros(defs): r'''Converts LaTeX \def statements to KaTeX macros. This is a helper function that can be used in conf.py to translate your already specified LaTeX definitions. https://github.com/Khan/KaTeX#rendering-options, e.g. `\def \e #1{\mathrm{e}^{#1}}` => `"\\e:" "\\mathrm{e}^{#1}"`' Example ------- import sphinxcontrib.katex as katex # Get your LaTeX defs into `latex_defs` and then do latex_macros = katex.import_macros_from_latex(latex_defs) katex_options = 'macros: {' + latex_macros + '}' ''' # Remove empty lines defs = defs.strip() tmp = [] for line in defs.splitlines(): # Remove spaces from every line line = line.strip() # Remove "\def" at the beginning of line line = re.sub(r'^\\def[ ]?', '', line) # Remove optional #1 parameter before {} command brackets line = re.sub(r'(#[0-9])+', '', line, 1) # Remove outer {} command brackets with "" line = re.sub(r'( {)|(}$)', '"', line) # Add "": to the new command line = re.sub(r'(^\\[A-Za-z]+)', r'"\1":', line, 1) # Add , at end of line line = re.sub(r'$', ',', line, 1) # Duplicate all \ line = re.sub(r'\\', r'\\\\', line) tmp.append(line) macros = '\n'.join(tmp) return macros
r'''Converts LaTeX \def statements to KaTeX macros. This is a helper function that can be used in conf.py to translate your already specified LaTeX definitions. https://github.com/Khan/KaTeX#rendering-options, e.g. `\def \e #1{\mathrm{e}^{#1}}` => `"\\e:" "\\mathrm{e}^{#1}"`' Example ------- import sphinxcontrib.katex as katex # Get your LaTeX defs into `latex_defs` and then do latex_macros = katex.import_macros_from_latex(latex_defs) katex_options = 'macros: {' + latex_macros + '}'
def validate_swagger_schema(schema_dir, resource_listing): """Validate the structure of Swagger schemas against the spec. **Valid only for Swagger v1.2 spec** Note: It is possible that resource_listing is not present in the schema_dir. The path is passed in the call so that ssv can fetch the api-declaration files from the path. :param resource_listing: Swagger Spec v1.2 resource listing :type resource_listing: dict :param schema_dir: A path to Swagger spec directory :type schema_dir: string :raises: :py:class:`swagger_spec_validator.SwaggerValidationError` """ schema_filepath = os.path.join(schema_dir, API_DOCS_FILENAME) swagger_spec_validator.validator12.validate_spec( resource_listing, urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_filepath))), )
Validate the structure of Swagger schemas against the spec. **Valid only for Swagger v1.2 spec** Note: It is possible that resource_listing is not present in the schema_dir. The path is passed in the call so that ssv can fetch the api-declaration files from the path. :param resource_listing: Swagger Spec v1.2 resource listing :type resource_listing: dict :param schema_dir: A path to Swagger spec directory :type schema_dir: string :raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
def results(self, trial_ids): """ Accepts a sequence of trial ids and returns a pandas dataframe with the schema trial_id, iteration?, *metric_schema_union where iteration is an optional column that specifies the iteration when a user logged a metric, if the user supplied one. The iteration column is added if any metric was logged with an iteration. Then, every metric name that was ever logged is a column in the metric_schema_union. """ metadata_folder = os.path.join(self.log_dir, constants.METADATA_FOLDER) dfs = [] # TODO: various file-creation corner cases like the result file not # always existing if stuff is not logged and etc should be ironed out # (would probably be easier if we had a centralized Sync class which # relied on some formal remote store semantics). for trial_id in trial_ids: # TODO constants should just contain the recipes for filename # construction instead of this multi-file implicit constraint result_file = os.path.join( metadata_folder, trial_id + "_" + constants.RESULT_SUFFIX) assert os.path.isfile(result_file), result_file dfs.append(pd.read_json(result_file, typ='frame', lines=True)) df = pd.concat(dfs, axis=0, ignore_index=True, sort=False) return df
Accepts a sequence of trial ids and returns a pandas dataframe with the schema trial_id, iteration?, *metric_schema_union where iteration is an optional column that specifies the iteration when a user logged a metric, if the user supplied one. The iteration column is added if any metric was logged with an iteration. Then, every metric name that was ever logged is a column in the metric_schema_union.
def calledTwice(cls, spy): #pylint: disable=invalid-name """ Checking the inspector is called twice Args: SinonSpy """ cls.__is_spy(spy) if not (spy.calledTwice): raise cls.failException(cls.message)
Checking the inspector is called twice Args: SinonSpy
def render_table(self, headers, rows, style=None): """ Format input to textual table. """ table = self.table(headers, rows, style) table.render(self._io)
Format input to textual table.
def append(self, parent, content): """ Append the specified L{content} to the I{parent}. @param parent: The parent node to append to. @type parent: L{Element} @param content: The content to append. @type content: L{Object} """ log.debug('appending parent:\n%s\ncontent:\n%s', parent, content) if self.start(content): self.appender.append(parent, content) self.end(parent, content)
Append the specified L{content} to the I{parent}. @param parent: The parent node to append to. @type parent: L{Element} @param content: The content to append. @type content: L{Object}
def validate_response(self): """Raises an exception if there was an error. Otherwise, do nothing. Clients should handle these errors, since these require custom handling to properly resolve. """ if self.is_success(): return # Handle the error if we have any information if self.details: error = self.details.get('error', None) if error == PushResponse.ERROR_DEVICE_NOT_REGISTERED: raise DeviceNotRegisteredError(self) elif error == PushResponse.ERROR_MESSAGE_TOO_BIG: raise MessageTooBigError(self) elif error == PushResponse.ERROR_MESSAGE_RATE_EXCEEDED: raise MessageRateExceededError(self) # No known error information, so let's raise a generic error. raise PushResponseError(self)
Raises an exception if there was an error. Otherwise, do nothing. Clients should handle these errors, since these require custom handling to properly resolve.
def promote_transaction( self, transaction, depth=3, min_weight_magnitude=None, ): # type: (TransactionHash, int, Optional[int]) -> dict """ Promotes a transaction by adding spam on top of it. :return: Dict with the following structure:: { 'bundle': Bundle, The newly-published bundle. } """ if min_weight_magnitude is None: min_weight_magnitude = self.default_min_weight_magnitude return extended.PromoteTransactionCommand(self.adapter)( transaction=transaction, depth=depth, minWeightMagnitude=min_weight_magnitude, )
Promotes a transaction by adding spam on top of it. :return: Dict with the following structure:: { 'bundle': Bundle, The newly-published bundle. }
def parse_default_property_value(property_name, property_type_id, default_value_string): """Parse the default value string into its proper form given the property type ID. Args: property_name: string, the name of the property whose default value is being parsed. Used primarily to construct meaningful error messages, should the default value prove invalid. property_type_id: int, one of the property type ID constants defined in this file that OrientDB uses to designate the native type of a given property. default_value_string: string, the textual representation of the default value for for the property, as returned by OrientDB's schema introspection code. Returns: an object of type matching the property that can be used as the property's default value. For example, if the property is of string type, the return type will be a string, and if the property is of list type, the return type will be a list. Raises: AssertionError, if the default value is not supported or does not match the property's declared type (e.g. if a default of "[]" is set on an integer property). """ if property_type_id == PROPERTY_TYPE_EMBEDDED_SET_ID and default_value_string == '{}': return set() elif property_type_id == PROPERTY_TYPE_EMBEDDED_LIST_ID and default_value_string == '[]': return list() elif (property_type_id == PROPERTY_TYPE_STRING_ID and isinstance(default_value_string, six.string_types)): return default_value_string elif property_type_id == PROPERTY_TYPE_BOOLEAN_ID: return _parse_bool_default_value(property_name, default_value_string) elif property_type_id == PROPERTY_TYPE_DATETIME_ID: return _parse_datetime_default_value(property_name, default_value_string) elif property_type_id == PROPERTY_TYPE_DATE_ID: return _parse_date_default_value(property_name, default_value_string) else: raise AssertionError(u'Unsupported default value for property "{}" with type id {}: ' u'{}'.format(property_name, property_type_id, default_value_string))
Parse the default value string into its proper form given the property type ID. Args: property_name: string, the name of the property whose default value is being parsed. Used primarily to construct meaningful error messages, should the default value prove invalid. property_type_id: int, one of the property type ID constants defined in this file that OrientDB uses to designate the native type of a given property. default_value_string: string, the textual representation of the default value for for the property, as returned by OrientDB's schema introspection code. Returns: an object of type matching the property that can be used as the property's default value. For example, if the property is of string type, the return type will be a string, and if the property is of list type, the return type will be a list. Raises: AssertionError, if the default value is not supported or does not match the property's declared type (e.g. if a default of "[]" is set on an integer property).
def keys_values(data, *keys): """Get an entry as a list from a dict. Provide a fallback key.""" values = [] if is_mapping(data): for key in keys: if key in data: values.extend(ensure_list(data[key])) return values
Get an entry as a list from a dict. Provide a fallback key.
def eval_callx(self, exp): "dispatch for CallX" # below: this isn't contains(exp,consumes_row) -- it's just checking the current expression return (self.eval_agg_call if consumes_rows(exp) else self.eval_nonagg_call)(exp)
dispatch for CallX
def Satisfy_Constraints(U, B, BtBinv): """U is the prolongator update. Project out components of U such that U*B = 0. Parameters ---------- U : bsr_matrix m x n sparse bsr matrix Update to the prolongator B : array n x k array of the coarse grid near nullspace vectors BtBinv : array Local inv(B_i.H*B_i) matrices for each supernode, i B_i is B restricted to the sparsity pattern of supernode i in U Returns ------- Updated U, so that U*B = 0. Update is computed by orthogonally (in 2-norm) projecting out the components of span(B) in U in a row-wise fashion. See Also -------- The principal calling routine, pyamg.aggregation.smooth.energy_prolongation_smoother """ RowsPerBlock = U.blocksize[0] ColsPerBlock = U.blocksize[1] num_block_rows = int(U.shape[0]/RowsPerBlock) UB = np.ravel(U*B) # Apply constraints, noting that we need the conjugate of B # for use as Bi.H in local projection pyamg.amg_core.satisfy_constraints_helper(RowsPerBlock, ColsPerBlock, num_block_rows, B.shape[1], np.conjugate(np.ravel(B)), UB, np.ravel(BtBinv), U.indptr, U.indices, np.ravel(U.data)) return U
U is the prolongator update. Project out components of U such that U*B = 0. Parameters ---------- U : bsr_matrix m x n sparse bsr matrix Update to the prolongator B : array n x k array of the coarse grid near nullspace vectors BtBinv : array Local inv(B_i.H*B_i) matrices for each supernode, i B_i is B restricted to the sparsity pattern of supernode i in U Returns ------- Updated U, so that U*B = 0. Update is computed by orthogonally (in 2-norm) projecting out the components of span(B) in U in a row-wise fashion. See Also -------- The principal calling routine, pyamg.aggregation.smooth.energy_prolongation_smoother
def publish_active_scene(self, scene_id): """publish changed active scene""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_active(self.sequence_number, scene_id)) return self.sequence_number
publish changed active scene
def stream_time(self, significant_digits=3): """ :param significant_digits: int of the number of significant digits in the return :return: float of the time in seconds of how long the data took to stream """ try: return round( self._timestamps['last_stream'] - self._timestamps['stream'], significant_digits) except Exception as e: return None
:param significant_digits: int of the number of significant digits in the return :return: float of the time in seconds of how long the data took to stream
def date_time_this_century( self, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the current century. :param before_now: include days in current century before today :param after_now: include days in current century after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_century_start = datetime( now.year - (now.year % 100), 1, 1, tzinfo=tzinfo) next_century_start = datetime( min(this_century_start.year + 100, MAXYEAR), 1, 1, tzinfo=tzinfo) if before_now and after_now: return self.date_time_between_dates( this_century_start, next_century_start, tzinfo) elif not before_now and after_now: return self.date_time_between_dates(now, next_century_start, tzinfo) elif not after_now and before_now: return self.date_time_between_dates(this_century_start, now, tzinfo) else: return now
Gets a DateTime object for the current century. :param before_now: include days in current century before today :param after_now: include days in current century after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime
def _to_DOM(self): """ Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object """ root_node = ET.Element('station') created_at_node = ET.SubElement(root_node, "created_at") created_at_node.text = \ timeformatutils.to_ISO8601(self.created_at)if self.created_at is not None else 'null' updated_at_node = ET.SubElement(root_node, "updated_at") updated_at_node.text = \ timeformatutils.to_ISO8601(self.updated_at)if self.updated_at is not None else 'null' station_id_node = ET.SubElement(root_node, 'id') station_id_node.text = str(self.id) station_id_node = ET.SubElement(root_node, 'external_id') station_id_node.text = str(self.external_id) station_name_node = ET.SubElement(root_node, 'name') station_name_node.text = str(self.name) if self.name is not None else 'null' lat_node = ET.SubElement(root_node, 'lat') lat_node.text = str(self.lat) lon_node = ET.SubElement(root_node, 'lon') lon_node.text = str(self.lon) alt_node = ET.SubElement(root_node, 'alt') alt_node.text = str(self.alt) if self.alt is not None else 'null' rank_node = ET.SubElement(root_node, 'rank') rank_node.text = str(self.rank) if self.rank is not None else 'null' return root_node
Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object
def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s)
Searches the string for the pattern, caching the compiled regexp.
def GetValues(self): """Retrieves all values within the key. Returns: generator[WinRegistryValue]: Windows Registry value generator. """ if not self._registry_key and self._registry: self._GetKeyFromRegistry() if self._registry_key: return self._registry_key.GetValues() return iter([])
Retrieves all values within the key. Returns: generator[WinRegistryValue]: Windows Registry value generator.
def numa_nodemask_to_set(mask): """ Convert NUMA nodemask to Python set. """ result = set() for i in range(0, get_max_node() + 1): if __nodemask_isset(mask, i): result.add(i) return result
Convert NUMA nodemask to Python set.
def surfaceIntersection(actor1, actor2, tol=1e-06, lw=3): """Intersect 2 surfaces and return a line actor. .. hint:: |surfIntersect.py|_ """ bf = vtk.vtkIntersectionPolyDataFilter() poly1 = actor1.GetMapper().GetInput() poly2 = actor2.GetMapper().GetInput() bf.SetInputData(0, poly1) bf.SetInputData(1, poly2) bf.Update() actor = Actor(bf.GetOutput(), "k", 1) actor.GetProperty().SetLineWidth(lw) return actor
Intersect 2 surfaces and return a line actor. .. hint:: |surfIntersect.py|_
def resolve_push_to(push_to, default_url, default_namespace): ''' Given a push-to value, return the registry and namespace. :param push_to: string: User supplied --push-to value. :param default_url: string: Container engine's default_index value (e.g. docker.io). :return: tuple: registry_url, namespace ''' protocol = 'http://' if push_to.startswith('http://') else 'https://' url = push_to = REMOVE_HTTP.sub('', push_to) namespace = default_namespace parts = url.split('/', 1) special_set = {'.', ':'} char_set = set([c for c in parts[0]]) if len(parts) == 1: if not special_set.intersection(char_set) and parts[0] != 'localhost': registry_url = default_url namespace = push_to else: registry_url = protocol + parts[0] else: registry_url = protocol + parts[0] namespace = parts[1] return registry_url, namespace
Given a push-to value, return the registry and namespace. :param push_to: string: User supplied --push-to value. :param default_url: string: Container engine's default_index value (e.g. docker.io). :return: tuple: registry_url, namespace
def backward(self, diff_x, influences, activations, **kwargs): """ Backward pass through the network, including update. Parameters ---------- diff_x : numpy array A matrix containing the differences between the input and neurons. influences : numpy array A matrix containing the influence each neuron has on each other neuron. This is used to calculate the updates. activations : numpy array The activations each neuron has to each data point. This is used to calculate the BMU. differency_y : numpy array The differences between the input and context neurons. Returns ------- updates : tuple of arrays The updates to the weights and context weights, respectively. """ diff_y = kwargs['diff_y'] bmu = self._get_bmu(activations) influence = influences[bmu] # Update x_update = np.multiply(diff_x, influence) y_update = np.multiply(diff_y, influence) return x_update, y_update
Backward pass through the network, including update. Parameters ---------- diff_x : numpy array A matrix containing the differences between the input and neurons. influences : numpy array A matrix containing the influence each neuron has on each other neuron. This is used to calculate the updates. activations : numpy array The activations each neuron has to each data point. This is used to calculate the BMU. differency_y : numpy array The differences between the input and context neurons. Returns ------- updates : tuple of arrays The updates to the weights and context weights, respectively.
def remove_editor(self, username, *args, **kwargs): """Remove an editor from this wiki page. :param username: The name or Redditor object of the user to remove. This method points to :meth:`add_editor` with _delete=True. Additional parameters are are passed to :meth:`add_editor` and subsequently into :meth:`~praw.__init__.BaseReddit.request_json`. """ return self.add_editor(username=username, _delete=True, *args, **kwargs)
Remove an editor from this wiki page. :param username: The name or Redditor object of the user to remove. This method points to :meth:`add_editor` with _delete=True. Additional parameters are are passed to :meth:`add_editor` and subsequently into :meth:`~praw.__init__.BaseReddit.request_json`.
def from_hex(cls, value): """Initialize a new network from hexadecimal notation.""" if len(value) == 8: return cls(int(value, 16)) elif len(value) == 32: return cls(int(value, 16)) else: raise ValueError('%r: invalid hexadecimal notation' % (value,))
Initialize a new network from hexadecimal notation.
def spawn_worker(params): """ This method has to be module level function :type params: Params """ setup_logging(params) log.info("Adding worker: idx=%s\tconcurrency=%s\tresults=%s", params.worker_index, params.concurrency, params.report) worker = Worker(params) worker.start() worker.join()
This method has to be module level function :type params: Params
def load_excel(self, filepath, **kwargs): """Set the main dataframe with the content of an Excel file :param filepath: url of the csv file to load, can be absolute if it starts with ``/`` or relative if it starts with ``./`` :type filepath: str :param kwargs: keyword arguments to pass to Pandas ``read_excel`` function :example: ``ds.load_excel("./myfile.xlsx")`` """ try: df = pd.read_excel(filepath, **kwargs) if len(df.index) == 0: self.warning("Empty Excel file. Can not set the dataframe.") return self.df = df except Exception as e: self.err(e, "Can not load Excel file")
Set the main dataframe with the content of an Excel file :param filepath: url of the csv file to load, can be absolute if it starts with ``/`` or relative if it starts with ``./`` :type filepath: str :param kwargs: keyword arguments to pass to Pandas ``read_excel`` function :example: ``ds.load_excel("./myfile.xlsx")``
def _formatter(self, x=None, y=None, z=None, s=None, label=None, **kwargs): """ Default formatter function, if no `formatter` kwarg is specified. Takes information about the pick event as a series of kwargs and returns the string to be displayed. """ def is_date(axis): fmt = axis.get_major_formatter() return (isinstance(fmt, mdates.DateFormatter) or isinstance(fmt, mdates.AutoDateFormatter)) def format_date(num): if num is not None: return mdates.num2date(num).strftime(self.date_format) ax = kwargs['event'].artist.axes # Display x and y with range-specific formatting if is_date(ax.xaxis): x = format_date(x) else: limits = ax.get_xlim() x = self._format_coord(x, limits) kwargs['xerror'] = self._format_coord(kwargs.get('xerror'), limits) if is_date(ax.yaxis): y = format_date(y) else: limits = ax.get_ylim() y = self._format_coord(y, limits) kwargs['yerror'] = self._format_coord(kwargs.get('yerror'), limits) output = [] for key, val in zip(['x', 'y', 'z', 's'], [x, y, z, s]): if val is not None: try: output.append(u'{key}: {val:0.3g}'.format(key=key, val=val)) except ValueError: # X & Y will be strings at this point. # For masked arrays, etc, "z" and s values may be a string output.append(u'{key}: {val}'.format(key=key, val=val)) # label may be None or an empty string (for an un-labeled AxesImage)... # Un-labeled Line2D's will have labels that start with an underscore if label and not label.startswith('_'): output.append(u'Label: {}'.format(label)) if kwargs.get(u'point_label', None) is not None: output.append(u'Point: ' + u', '.join(kwargs['point_label'])) for arg in ['xerror', 'yerror']: val = kwargs.get(arg, None) if val is not None: output.append(u'{}: {}'.format(arg, val)) return u'\n'.join(output)
Default formatter function, if no `formatter` kwarg is specified. Takes information about the pick event as a series of kwargs and returns the string to be displayed.
def expandRecs(G, RecCollect, nodeType, weighted): """Expand all the citations from _RecCollect_""" for Rec in RecCollect: fullCiteList = [makeID(c, nodeType) for c in Rec.createCitation(multiCite = True)] if len(fullCiteList) > 1: for i, citeID1 in enumerate(fullCiteList): if citeID1 in G: for citeID2 in fullCiteList[i + 1:]: if citeID2 not in G: G.add_node(citeID2, **G.node[citeID1]) if weighted: G.add_edge(citeID1, citeID2, weight = 1) else: G.add_edge(citeID1, citeID2) elif weighted: try: G.edges[citeID1, citeID2]['weight'] += 1 except KeyError: G.add_edge(citeID1, citeID2, weight = 1) for e1, e2, data in G.edges(citeID1, data = True): G.add_edge(citeID2, e2, **data)
Expand all the citations from _RecCollect_
def get_asset_content(self, asset_content_id): """Gets the ``AssetContent`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``AssetContent`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to an ``AssetContent`` and retained for compatibility. :param asset_content_id: the ``Id`` of the ``AssetContent`` to retrieve :type asset_content_id: ``osid.id.Id`` :return: the returned ``AssetContent`` :rtype: ``osid.repository.Asset`` :raise: ``NotFound`` -- no ``AssetContent`` found with the given ``Id`` :raise: ``NullArgument`` -- ``asset_content_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ return AssetContent(self._provider_session.get_asset_content(asset_content_id), self._config_map)
Gets the ``AssetContent`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``AssetContent`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to an ``AssetContent`` and retained for compatibility. :param asset_content_id: the ``Id`` of the ``AssetContent`` to retrieve :type asset_content_id: ``osid.id.Id`` :return: the returned ``AssetContent`` :rtype: ``osid.repository.Asset`` :raise: ``NotFound`` -- no ``AssetContent`` found with the given ``Id`` :raise: ``NullArgument`` -- ``asset_content_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
def handle(self, *args, **options): """ Iterates over all the CRON_CLASSES (or if passed in as a commandline argument) and runs them. """ cron_classes = options['cron_classes'] if cron_classes: cron_class_names = cron_classes else: cron_class_names = getattr(settings, 'CRON_CLASSES', []) try: crons_to_run = [get_class(x) for x in cron_class_names] except Exception: error = traceback.format_exc() self.stdout.write('Make sure these are valid cron class names: %s\n%s' % (cron_class_names, error)) return for cron_class in crons_to_run: run_cron_with_cache_check( cron_class, force=options['force'], silent=options['silent'] ) clear_old_log_entries() close_old_connections()
Iterates over all the CRON_CLASSES (or if passed in as a commandline argument) and runs them.
def list_path(root_dir): """List directory if exists. :param dir: str :return: list """ res = [] if os.path.isdir(root_dir): for name in os.listdir(root_dir): res.append(name) return res
List directory if exists. :param dir: str :return: list
def get_valences(self, structure): """ Returns a list of valences for the structure. This currently works only for ordered structures only. Args: structure: Structure to analyze Returns: A list of valences for each site in the structure (for an ordered structure), e.g., [1, 1, -2] or a list of lists with the valences for each fractional element of each site in the structure (for an unordered structure), e.g., [[2, 4], [3], [-2], [-2], [-2]] Raises: A ValueError if the valences cannot be determined. """ els = [Element(el.symbol) for el in structure.composition.elements] if not set(els).issubset(set(BV_PARAMS.keys())): raise ValueError( "Structure contains elements not in set of BV parameters!" ) # Perform symmetry determination and get sites grouped by symmetry. if self.symm_tol: finder = SpacegroupAnalyzer(structure, self.symm_tol) symm_structure = finder.get_symmetrized_structure() equi_sites = symm_structure.equivalent_sites else: equi_sites = [[site] for site in structure] # Sort the equivalent sites by decreasing electronegativity. equi_sites = sorted(equi_sites, key=lambda sites: -sites[0].species .average_electroneg) # Get a list of valences and probabilities for each symmetrically # distinct site. valences = [] all_prob = [] if structure.is_ordered: for sites in equi_sites: test_site = sites[0] nn = structure.get_neighbors(test_site, self.max_radius) prob = self._calc_site_probabilities(test_site, nn) all_prob.append(prob) val = list(prob.keys()) # Sort valences in order of decreasing probability. val = sorted(val, key=lambda v: -prob[v]) # Retain probabilities that are at least 1/100 of highest prob. valences.append( list(filter(lambda v: prob[v] > 0.01 * prob[val[0]], val))) else: full_all_prob = [] for sites in equi_sites: test_site = sites[0] nn = structure.get_neighbors(test_site, self.max_radius) prob = self._calc_site_probabilities_unordered(test_site, nn) all_prob.append(prob) full_all_prob.extend(prob.values()) vals = [] for (elsp, occ) in get_z_ordered_elmap( test_site.species): val = list(prob[elsp.symbol].keys()) # Sort valences in order of decreasing probability. val = sorted(val, key=lambda v: -prob[elsp.symbol][v]) # Retain probabilities that are at least 1/100 of highest # prob. vals.append( list(filter( lambda v: prob[elsp.symbol][v] > 0.001 * prob[ elsp.symbol][val[0]], val))) valences.append(vals) # make variables needed for recursion if structure.is_ordered: nsites = np.array([len(i) for i in equi_sites]) vmin = np.array([min(i) for i in valences]) vmax = np.array([max(i) for i in valences]) self._n = 0 self._best_score = 0 self._best_vset = None def evaluate_assignment(v_set): el_oxi = collections.defaultdict(list) for i, sites in enumerate(equi_sites): el_oxi[sites[0].specie.symbol].append(v_set[i]) max_diff = max([max(v) - min(v) for v in el_oxi.values()]) if max_diff > 1: return score = functools.reduce( operator.mul, [all_prob[i][v] for i, v in enumerate(v_set)]) if score > self._best_score: self._best_vset = v_set self._best_score = score def _recurse(assigned=[]): # recurses to find permutations of valences based on whether a # charge balanced assignment can still be found if self._n > self.max_permutations: return i = len(assigned) highest = vmax.copy() highest[:i] = assigned highest *= nsites highest = np.sum(highest) lowest = vmin.copy() lowest[:i] = assigned lowest *= nsites lowest = np.sum(lowest) if highest < 0 or lowest > 0: self._n += 1 return if i == len(valences): evaluate_assignment(assigned) self._n += 1 return else: for v in valences[i]: new_assigned = list(assigned) _recurse(new_assigned + [v]) else: nsites = np.array([len(i) for i in equi_sites]) tmp = [] attrib = [] for insite, nsite in enumerate(nsites): for val in valences[insite]: tmp.append(nsite) attrib.append(insite) new_nsites = np.array(tmp) fractions = [] elements = [] for sites in equi_sites: for sp, occu in get_z_ordered_elmap(sites[0].species): elements.append(sp.symbol) fractions.append(occu) fractions = np.array(fractions, np.float) new_valences = [] for vals in valences: for val in vals: new_valences.append(val) vmin = np.array([min(i) for i in new_valences], np.float) vmax = np.array([max(i) for i in new_valences], np.float) self._n = 0 self._best_score = 0 self._best_vset = None def evaluate_assignment(v_set): el_oxi = collections.defaultdict(list) jj = 0 for i, sites in enumerate(equi_sites): for specie, occu in get_z_ordered_elmap( sites[0].species): el_oxi[specie.symbol].append(v_set[jj]) jj += 1 max_diff = max([max(v) - min(v) for v in el_oxi.values()]) if max_diff > 2: return score = six.moves.reduce( operator.mul, [all_prob[attrib[iv]][elements[iv]][vv] for iv, vv in enumerate(v_set)]) if score > self._best_score: self._best_vset = v_set self._best_score = score def _recurse(assigned=[]): # recurses to find permutations of valences based on whether a # charge balanced assignment can still be found if self._n > self.max_permutations: return i = len(assigned) highest = vmax.copy() highest[:i] = assigned highest *= new_nsites highest *= fractions highest = np.sum(highest) lowest = vmin.copy() lowest[:i] = assigned lowest *= new_nsites lowest *= fractions lowest = np.sum(lowest) if (highest < -self.charge_neutrality_tolerance or lowest > self.charge_neutrality_tolerance): self._n += 1 return if i == len(new_valences): evaluate_assignment(assigned) self._n += 1 return else: for v in new_valences[i]: new_assigned = list(assigned) _recurse(new_assigned + [v]) _recurse() if self._best_vset: if structure.is_ordered: assigned = {} for val, sites in zip(self._best_vset, equi_sites): for site in sites: assigned[site] = val return [int(assigned[site]) for site in structure] else: assigned = {} new_best_vset = [] for ii in range(len(equi_sites)): new_best_vset.append(list()) for ival, val in enumerate(self._best_vset): new_best_vset[attrib[ival]].append(val) for val, sites in zip(new_best_vset, equi_sites): for site in sites: assigned[site] = val return [[int(frac_site) for frac_site in assigned[site]] for site in structure] else: raise ValueError("Valences cannot be assigned!")
Returns a list of valences for the structure. This currently works only for ordered structures only. Args: structure: Structure to analyze Returns: A list of valences for each site in the structure (for an ordered structure), e.g., [1, 1, -2] or a list of lists with the valences for each fractional element of each site in the structure (for an unordered structure), e.g., [[2, 4], [3], [-2], [-2], [-2]] Raises: A ValueError if the valences cannot be determined.
def _get_menu_meta_width(self, max_width, complete_state): """ Return the width of the meta column. """ if self._show_meta(complete_state): return min(max_width, max(get_cwidth(c.display_meta) for c in complete_state.current_completions) + 2) else: return 0
Return the width of the meta column.
def _GetEarliestYearFromFileEntry(self): """Retrieves the year from the file entry date and time values. This function uses the creation time if available otherwise the change time (metadata last modification time) is used. Returns: int: year of the file entry or None. """ file_entry = self.GetFileEntry() if not file_entry: return None stat_object = file_entry.GetStat() posix_time = getattr(stat_object, 'crtime', None) if posix_time is None: posix_time = getattr(stat_object, 'ctime', None) # Gzip files don't store the creation or metadata modification times, # but the modification time stored in the file is a good proxy. if file_entry.TYPE_INDICATOR == dfvfs_definitions.TYPE_INDICATOR_GZIP: posix_time = getattr(stat_object, 'mtime', None) if posix_time is None: logger.warning( 'Unable to determine earliest year from file stat information.') return None try: year = timelib.GetYearFromPosixTime( posix_time, timezone=self._knowledge_base.timezone) return year except ValueError as exception: logger.error(( 'Unable to determine earliest year from file stat information with ' 'error: {0!s}').format(exception)) return None
Retrieves the year from the file entry date and time values. This function uses the creation time if available otherwise the change time (metadata last modification time) is used. Returns: int: year of the file entry or None.
def get_app_dir(app_name, roaming=True, force_posix=False): r"""Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. To give you an idea, for an app called ``"Foo Bar"``, something like the following folders could be returned: Mac OS X: ``~/Library/Application Support/Foo Bar`` Mac OS X (POSIX): ``~/.foo-bar`` Unix: ``~/.config/foo-bar`` Unix (POSIX): ``~/.foo-bar`` Win XP (roaming): ``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar`` Win XP (not roaming): ``C:\Documents and Settings\<user>\Application Data\Foo Bar`` Win 7 (roaming): ``C:\Users\<user>\AppData\Roaming\Foo Bar`` Win 7 (not roaming): ``C:\Users\<user>\AppData\Local\Foo Bar`` .. versionadded:: 2.0 :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no affect otherwise. :param force_posix: if this is set to `True` then on any POSIX system the folder will be stored in the home folder with a leading dot instead of the XDG config home or darwin's application support folder. """ if WIN: key = roaming and 'APPDATA' or 'LOCALAPPDATA' folder = os.environ.get(key) if folder is None: folder = os.path.expanduser('~') return os.path.join(folder, app_name) if force_posix: return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) if sys.platform == 'darwin': return os.path.join(os.path.expanduser( '~/Library/Application Support'), app_name) return os.path.join( os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), _posixify(app_name))
r"""Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. To give you an idea, for an app called ``"Foo Bar"``, something like the following folders could be returned: Mac OS X: ``~/Library/Application Support/Foo Bar`` Mac OS X (POSIX): ``~/.foo-bar`` Unix: ``~/.config/foo-bar`` Unix (POSIX): ``~/.foo-bar`` Win XP (roaming): ``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar`` Win XP (not roaming): ``C:\Documents and Settings\<user>\Application Data\Foo Bar`` Win 7 (roaming): ``C:\Users\<user>\AppData\Roaming\Foo Bar`` Win 7 (not roaming): ``C:\Users\<user>\AppData\Local\Foo Bar`` .. versionadded:: 2.0 :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no affect otherwise. :param force_posix: if this is set to `True` then on any POSIX system the folder will be stored in the home folder with a leading dot instead of the XDG config home or darwin's application support folder.
def get_results(self, job_id): """Get results of a job Args: job_id (str): The ID of the job. See: https://auth0.com/docs/api/management/v2#!/Jobs/get_results """ url = self._url('%s/results' % job_id) return self.client.get(url)
Get results of a job Args: job_id (str): The ID of the job. See: https://auth0.com/docs/api/management/v2#!/Jobs/get_results