code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _fetch_all(cls, api_key, endpoint=None, offset=0, limit=25, **kwargs): """ Call `self._fetch_page` for as many pages as exist. TODO: should be extended to do async page fetches if API allows it via exposing total value. Returns a list of `cls` instances. """ output = [] qp = kwargs.copy() limit = max(1, min(100, limit)) maximum = kwargs.get('maximum') qp['limit'] = min(limit, maximum) if maximum is not None else limit qp['offset'] = offset more, total = None, None while True: entities, options = cls._fetch_page( api_key=api_key, endpoint=endpoint, **qp ) output += entities more = options.get('more') limit = options.get('limit') offset = options.get('offset') total = options.get('total') if more is None: if total is None or offset is None: break more = (limit + offset) < total if not more or (maximum is not None and len(output) >= maximum): break qp['limit'] = limit qp['offset'] = offset + limit return output
Call `self._fetch_page` for as many pages as exist. TODO: should be extended to do async page fetches if API allows it via exposing total value. Returns a list of `cls` instances.
def random(cls, length, bit_prob=.5): """Create a bit string of the given length, with the probability of each bit being set equal to bit_prob, which defaults to .5. Usage: # Create a random BitString of length 10 with mostly zeros. bits = BitString.random(10, bit_prob=.1) Arguments: length: An int, indicating the desired length of the result. bit_prob: A float in the range [0, 1]. This is the probability of any given bit in the result having a value of 1; default is .5, giving 0 and 1 equal probabilities of appearance for each bit's value. Return: A randomly generated BitString instance of the requested length. """ assert isinstance(length, int) and length >= 0 assert isinstance(bit_prob, (int, float)) and 0 <= bit_prob <= 1 bits = numpy.random.choice( [False, True], size=(length,), p=[1-bit_prob, bit_prob] ) bits.flags.writeable = False return cls(bits)
Create a bit string of the given length, with the probability of each bit being set equal to bit_prob, which defaults to .5. Usage: # Create a random BitString of length 10 with mostly zeros. bits = BitString.random(10, bit_prob=.1) Arguments: length: An int, indicating the desired length of the result. bit_prob: A float in the range [0, 1]. This is the probability of any given bit in the result having a value of 1; default is .5, giving 0 and 1 equal probabilities of appearance for each bit's value. Return: A randomly generated BitString instance of the requested length.
def to_wire_dict (self): """Return a simplified transport object for logging and caching. The transport object must contain these attributes: - url_data.valid: bool Indicates if URL is valid - url_data.result: unicode Result string - url_data.warnings: list of tuples (tag, warning message) List of tagged warnings for this URL. - url_data.name: unicode string or None name of URL (eg. filename or link name) - url_data.parent_url: unicode or None Parent URL - url_data.base_ref: unicode HTML base reference URL of parent - url_data.url: unicode Fully qualified URL. - url_data.domain: unicode URL domain part. - url_data.checktime: int Number of seconds needed to check this link, default: zero. - url_data.dltime: int Number of seconds needed to download URL content, default: -1 - url_data.size: int Size of downloaded URL content, default: -1 - url_data.info: list of unicode Additional information about this URL. - url_data.line: int Line number of this URL at parent document, or -1 - url_data.column: int Column number of this URL at parent document, or -1 - url_data.page: int Page number of this URL at parent document, or -1 - url_data.cache_url: unicode Cache url for this URL. - url_data.content_type: unicode MIME content type for URL content. - url_data.level: int Recursion level until reaching this URL from start URL - url_data.last_modified: datetime Last modification date of retrieved page (or None). """ return dict(valid=self.valid, extern=self.extern[0], result=self.result, warnings=self.warnings[:], name=self.name or u"", title=self.get_title(), parent_url=self.parent_url or u"", base_ref=self.base_ref or u"", base_url=self.base_url or u"", url=self.url or u"", domain=(self.urlparts[1] if self.urlparts else u""), checktime=self.checktime, dltime=self.dltime, size=self.size, info=self.info, line=self.line, column=self.column, page=self.page, cache_url=self.cache_url, content_type=self.content_type, level=self.recursion_level, modified=self.modified, )
Return a simplified transport object for logging and caching. The transport object must contain these attributes: - url_data.valid: bool Indicates if URL is valid - url_data.result: unicode Result string - url_data.warnings: list of tuples (tag, warning message) List of tagged warnings for this URL. - url_data.name: unicode string or None name of URL (eg. filename or link name) - url_data.parent_url: unicode or None Parent URL - url_data.base_ref: unicode HTML base reference URL of parent - url_data.url: unicode Fully qualified URL. - url_data.domain: unicode URL domain part. - url_data.checktime: int Number of seconds needed to check this link, default: zero. - url_data.dltime: int Number of seconds needed to download URL content, default: -1 - url_data.size: int Size of downloaded URL content, default: -1 - url_data.info: list of unicode Additional information about this URL. - url_data.line: int Line number of this URL at parent document, or -1 - url_data.column: int Column number of this URL at parent document, or -1 - url_data.page: int Page number of this URL at parent document, or -1 - url_data.cache_url: unicode Cache url for this URL. - url_data.content_type: unicode MIME content type for URL content. - url_data.level: int Recursion level until reaching this URL from start URL - url_data.last_modified: datetime Last modification date of retrieved page (or None).
def _read(self, stream, text, byte_order): ''' Read the actual data from a PLY file. ''' dtype = self.dtype(byte_order) if text: self._read_txt(stream) elif _can_mmap(stream) and not self._have_list: # Loading the data is straightforward. We will memory map # the file in copy-on-write mode. num_bytes = self.count * dtype.itemsize offset = stream.tell() stream.seek(0, 2) max_bytes = stream.tell() - offset if max_bytes < num_bytes: raise PlyElementParseError("early end-of-file", self, max_bytes // dtype.itemsize) self._data = _np.memmap(stream, dtype, 'c', offset, self.count) # Fix stream position stream.seek(offset + self.count * dtype.itemsize) else: # A simple load is impossible. self._read_bin(stream, byte_order) self._check_sanity()
Read the actual data from a PLY file.
def free(self, connection): """Free the connection from use by the session that was using it. :param connection: The connection to free :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError """ LOGGER.debug('Pool %s freeing connection %s', self.id, id(connection)) try: self.connection_handle(connection).free() except KeyError: raise ConnectionNotFoundError(self.id, id(connection)) if self.idle_connections == list(self.connections.values()): with self._lock: self.idle_start = self.time_method() LOGGER.debug('Pool %s freed connection %s', self.id, id(connection))
Free the connection from use by the session that was using it. :param connection: The connection to free :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError
def get_gcd(a, b): "Return greatest common divisor for a and b." while a: a, b = b % a, a return b
Return greatest common divisor for a and b.
def _next_page(self): """ Fetch the next page of the query. """ if self._last_page_seen: raise StopIteration new, self._last_page_seen = self.conn.query_multiple(self.object_type, self._next_page_index, self.url_params, self.query_params) self._next_page_index += 1 if len(new) == 0: self._last_page_seen = True # don't bother with next page if nothing was returned else: self._results += new
Fetch the next page of the query.
def list_instances(self): """List instances owned by the project. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_instances] :end-before: [END bigtable_list_instances] :rtype: tuple :returns: (instances, failed_locations), where 'instances' is list of :class:`google.cloud.bigtable.instance.Instance`, and 'failed_locations' is a list of locations which could not be resolved. """ resp = self.instance_admin_client.list_instances(self.project_path) instances = [Instance.from_pb(instance, self) for instance in resp.instances] return instances, resp.failed_locations
List instances owned by the project. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_instances] :end-before: [END bigtable_list_instances] :rtype: tuple :returns: (instances, failed_locations), where 'instances' is list of :class:`google.cloud.bigtable.instance.Instance`, and 'failed_locations' is a list of locations which could not be resolved.
def search(self, CorpNum, DType, SDate, EDate, State, ItemCode, Page, PerPage, Order, UserID=None, QString=None): """ λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμ‹œ, W-μž‘μ„±μΌμž, I-λ°œν–‰μΌμ‹œ 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ ItemCode : λͺ…μ„Έμ„œ μ’…λ₯˜μ½”λ“œ λ°°μ—΄, 121-λͺ…μ„Έμ„œ, 122-μ²­κ΅¬μ„œ, 123-κ²¬μ μ„œ, 124-λ°œμ£Όμ„œ 125-μž…κΈˆν‘œ, 126-영수증 Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή λͺ©λ‘κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ QString : 거래처 정보, 거래처 μƒν˜Έ λ˜λŠ” μ‚¬μ—…μžλ“±λ‘λ²ˆν˜Έ 기재, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” """ if DType == None or DType == '': raise PopbillException(-99999999, "μΌμžμœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if SDate == None or SDate == '': raise PopbillException(-99999999, "μ‹œμž‘μΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if EDate == None or EDate == '': raise PopbillException(-99999999, "μ’…λ£ŒμΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") uri = '/Statement/Search' uri += '?DType=' + DType uri += '&SDate=' + SDate uri += '&EDate=' + EDate uri += '&State=' + ','.join(State) uri += '&ItemCode=' + ','.join(ItemCode) uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order if QString is not None: uri += '&QString=' + QString return self._httpget(uri, CorpNum, UserID)
λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμ‹œ, W-μž‘μ„±μΌμž, I-λ°œν–‰μΌμ‹œ 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ ItemCode : λͺ…μ„Έμ„œ μ’…λ₯˜μ½”λ“œ λ°°μ—΄, 121-λͺ…μ„Έμ„œ, 122-μ²­κ΅¬μ„œ, 123-κ²¬μ μ„œ, 124-λ°œμ£Όμ„œ 125-μž…κΈˆν‘œ, 126-영수증 Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή λͺ©λ‘κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ QString : 거래처 정보, 거래처 μƒν˜Έ λ˜λŠ” μ‚¬μ—…μžλ“±λ‘λ²ˆν˜Έ 기재, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ UserID : 팝빌 νšŒμ›μ•„μ΄λ””
def _MergeEntities(self, a, b): """Merges two agencies. To be merged, they are required to have the same id, name, url and timezone. The remaining language attribute is taken from the new agency. Args: a: The first agency. b: The second agency. Returns: The merged agency. Raises: MergeError: The agencies could not be merged. """ def _MergeAgencyId(a_agency_id, b_agency_id): """Merge two agency ids. The only difference between this and _MergeIdentical() is that the values None and '' are regarded as being the same. Args: a_agency_id: The first agency id. b_agency_id: The second agency id. Returns: The merged agency id. Raises: MergeError: The agency ids could not be merged. """ a_agency_id = a_agency_id or None b_agency_id = b_agency_id or None return self._MergeIdentical(a_agency_id, b_agency_id) scheme = {'agency_id': _MergeAgencyId, 'agency_name': self._MergeIdentical, 'agency_url': self._MergeIdentical, 'agency_timezone': self._MergeIdentical} return self._SchemedMerge(scheme, a, b)
Merges two agencies. To be merged, they are required to have the same id, name, url and timezone. The remaining language attribute is taken from the new agency. Args: a: The first agency. b: The second agency. Returns: The merged agency. Raises: MergeError: The agencies could not be merged.
def hide_defaults(self): """Removes fields' values that are the same as default values.""" for k in list(self.fields.keys()): if k in self.default_fields: if self.default_fields[k] == self.fields[k]: del(self.fields[k]) self.payload.hide_defaults()
Removes fields' values that are the same as default values.
def command(self, outfile, configfile, pix): """ Generate the command for running the likelihood scan. """ params = dict(script=self.config['scan']['script'], config=configfile, outfile=outfile, nside=self.nside_likelihood, pix=pix, verbose='-v' if self.verbose else '') cmd = '%(script)s %(config)s %(outfile)s --hpx %(nside)i %(pix)i %(verbose)s'%params return cmd
Generate the command for running the likelihood scan.
def find_and_modify(self, query=None, update=None): """ Finds documents in this collection that match a given query and updates them """ update = update or {} for document in self.find(query=query): document.update(update) self.update(document)
Finds documents in this collection that match a given query and updates them
async def write_message_data(self, data: bytes, timeout: NumType = None) -> None: """ Encode and write email message data. Automatically quotes lines beginning with a period per RFC821. Lone \\\\r and \\\\n characters are converted to \\\\r\\\\n characters. """ data = LINE_ENDINGS_REGEX.sub(b"\r\n", data) data = PERIOD_REGEX.sub(b"..", data) if not data.endswith(b"\r\n"): data += b"\r\n" data += b".\r\n" await self.write_and_drain(data, timeout=timeout)
Encode and write email message data. Automatically quotes lines beginning with a period per RFC821. Lone \\\\r and \\\\n characters are converted to \\\\r\\\\n characters.
def get_node_values( self, feature=None, show_root=False, show_tips=False, ): """ Returns node values from tree object in node plot order. To modify values you must modify the .treenode object directly by setting new 'features'. For example for node in ttree.treenode.traverse(): node.add_feature("PP", 100) By default node and tip values are hidden (set to "") so that they are not shown on the tree plot. To include values for these nodes use the 'show_root'=True, or 'show_tips'=True arguments. tree.get_node_values("support", True, True) """ # access nodes in the order they will be plotted ndict = self.get_node_dict(return_internal=True, return_nodes=True) nodes = [ndict[i] for i in range(self.nnodes)[::-1]] # get features if feature: vals = [i.__getattribute__(feature) if hasattr(i, feature) else "" for i in nodes] else: vals = [" " for i in nodes] # apply hiding rules if not show_root: vals = [i if not j.is_root() else "" for i, j in zip(vals, nodes)] if not show_tips: vals = [i if not j.is_leaf() else "" for i, j in zip(vals, nodes)] # convert float to ints for prettier printing unless all floats # raise exception and skip if there are true strings (names) try: if all([Decimal(str(i)) % 1 == 0 for i in vals if i]): vals = [int(i) if isinstance(i, float) else i for i in vals] except Exception: pass return vals
Returns node values from tree object in node plot order. To modify values you must modify the .treenode object directly by setting new 'features'. For example for node in ttree.treenode.traverse(): node.add_feature("PP", 100) By default node and tip values are hidden (set to "") so that they are not shown on the tree plot. To include values for these nodes use the 'show_root'=True, or 'show_tips'=True arguments. tree.get_node_values("support", True, True)
def get_fun(returner, fun): ''' Return info about last time fun was called on each minion CLI Example: .. code-block:: bash salt '*' ret.get_fun mysql network.interfaces ''' returners = salt.loader.returners(__opts__, __salt__) return returners['{0}.get_fun'.format(returner)](fun)
Return info about last time fun was called on each minion CLI Example: .. code-block:: bash salt '*' ret.get_fun mysql network.interfaces
def fingerprint(channel_samples: list, Fs: int = DEFAULT_FS, wsize: int = DEFAULT_WINDOW_SIZE, wratio: Union[int, float] = DEFAULT_OVERLAP_RATIO, fan_value: int = DEFAULT_FAN_VALUE, amp_min: Union[int, float] = DEFAULT_AMP_MIN)-> Iterator[tuple]: """ FFT the channel, log transform output, find local maxima, then return locally sensitive hashes. # """ # FFT the signal and extract frequency components arr2D = mlab.specgram( channel_samples, NFFT=wsize, Fs=Fs, window=mlab.window_hanning, noverlap=int(wsize * wratio))[0] arr2D = 10 * np.log10(arr2D) arr2D[arr2D == -np.inf] = 0 # replace infs with zeros # find local maxima local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min) # return hashes return generate_hashes(local_maxima, fan_value=fan_value)
FFT the channel, log transform output, find local maxima, then return locally sensitive hashes. #
def set_classifier_interface_params(spec, features, class_labels, model_accessor_for_class_labels, output_features = None): """ Common utilities to set the regression interface params. """ # Normalize the features list. features = _fm.process_or_validate_features(features) if class_labels is None: raise ValueError("List of class labels must be provided.") n_classes = len(class_labels) output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels) if len(output_features) == 1: predicted_class_output, pred_cl_type = output_features[0] score_output = None elif len(output_features) == 2: predicted_class_output, pred_cl_type = output_features[0] score_output, score_output_type = output_features[1] else: raise ValueError("Provided output classes for a classifier must be " "a list of features, predicted class and (optionally) class_score.") spec.description.predictedFeatureName = predicted_class_output # Are they out of order? if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()): raise ValueError("Provided predicted class output type not Int64 or String (%s)." % repr(pred_cl_type)) if score_output is not None: if not isinstance(score_output_type, datatypes.Dictionary): raise ValueError("Provided class score output type not a Dictionary (%s)." % repr(score_output_type)) if score_output_type.key_type != pred_cl_type: raise ValueError(("Provided class score output (%s) key_type (%s) does not " "match type of class prediction (%s).") % (score_output, repr(score_output_type.key_type), repr(pred_cl_type))) spec.description.predictedProbabilitiesName = score_output # add input for index, (cur_input_name, input_type) in enumerate(features): input_ = spec.description.input.add() input_.name = cur_input_name datatypes._set_datatype(input_.type, input_type) # add output for index, (cur_output_name, output_type) in enumerate(output_features): output_ = spec.description.output.add() output_.name = cur_output_name datatypes._set_datatype(output_.type, output_type) # Worry about the class labels if pred_cl_type == datatypes.String(): try: for c in class_labels: getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c)) # Not all the classifiers have class labels; in particular the pipeline # classifier. Thus it's not an error if we can't actually set them. except AttributeError: pass else: for c in class_labels: conv_error = False try: if not (int(c) == c): conv_error = True except: conv_error = True if conv_error: raise TypeError(("Cannot cast '%s' class to an int type " % str(c)) + "(class type determined by type of first class).") try: getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c)) # Not all the classifiers have class labels; in particular the pipeline # classifier. Thus it's not an error if we can't actually set them. except AttributeError: break # And we are done! return spec
Common utilities to set the regression interface params.
def enable_asynchronous(self): """Check if socket have been monkey patched by gevent""" def is_monkey_patched(): try: from gevent import monkey, socket except ImportError: return False if hasattr(monkey, "saved"): return "socket" in monkey.saved return gevent.socket.socket == socket.socket if not is_monkey_patched(): raise Exception("To activate asynchonoucity, please monkey patch" " the socket module with gevent") return True
Check if socket have been monkey patched by gevent
def get_extra_path(name): """ :param name: name in format helper.path_name sip.default_sip_dir """ # Paths are cached in path_cache helper_name, _, key = name.partition(".") helper = path_helpers.get(helper_name) if not helper: raise ValueError("Helper '{0}' not found.".format(helper)) if name not in path_cache: extra_paths = helper.extra_paths() path_cache.update(extra_paths) extra_path = path_cache.get(name) if not extra_path: raise ValueError("Helper '{0}' has no path called {1}".format(helper_name, name)) return extra_path
:param name: name in format helper.path_name sip.default_sip_dir
def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
This method handles an OACK from the server, syncing any accepted options.
def set_path(self, path): """Set the path of the file.""" if os.path.isabs(path): path = os.path.normpath(os.path.join(self.cwd, path)) self.path = path self.relative = os.path.relpath(self.path, self.base)
Set the path of the file.
def snippets(self): '''Get all snippets in this DAP''' return [strip_suffix(f, '.yaml') for f in self._stripped_files if self._snippets_pattern.match(f)]
Get all snippets in this DAP
def download(self, overwrite=True): """ Download the zipcodes CSV file. If ``overwrite`` is set to False, the file won't be downloaded if it already exists. """ if overwrite or not os.path.exists(self.file_path): _, f = tempfile.mkstemp() try: urlretrieve(self.DOWNLOAD_URL, f) extract_csv(f, self.file_path) finally: os.remove(f)
Download the zipcodes CSV file. If ``overwrite`` is set to False, the file won't be downloaded if it already exists.
def render(self, progress, width=None, status=None): """Render the widget.""" current_pct = int(progress * 100 + 0.1) return RenderResult(rendered="%3d%%" % current_pct, next_progress=(current_pct + 1) / 100)
Render the widget.
def set_stream_stats(self, rx_ports=None, tx_ports=None, start_offset=40, sequence_checking=True, data_integrity=True, timestamp=True): """ Set TX ports and RX streams for stream statistics. :param ports: list of ports to set RX pgs. If empty set for all ports. :type ports: list[ixexplorer.ixe_port.IxePort] :param tx_ports: list of streams to set TX pgs. If empty set for all streams. :type tx_ports: dict[ixexplorer.ixe_port.IxePort, list[ixexplorer.ixe_stream.IxeStream]] :param sequence_checking: True - enable sequence checkbox, False - disable :param data_integrity: True - enable data integrity checkbox, False - disable :param timestamp: True - enable timestamp checkbox, False - disable :param start_offset: start offset for signatures (group ID, signature, sequence) """ if not rx_ports: rx_ports = self.ports.values() if not tx_ports: tx_ports = {} for port in self.ports.values(): tx_ports[port] = port.streams.values() groupIdOffset = start_offset signatureOffset = start_offset + 4 next_offset = start_offset + 8 if sequence_checking: sequenceNumberOffset = next_offset next_offset += 4 if data_integrity: di_signatureOffset = next_offset for port in rx_ports: modes = [] modes.append(IxeReceiveMode.widePacketGroup) port.packetGroup.groupIdOffset = groupIdOffset port.packetGroup.signatureOffset = signatureOffset if sequence_checking and int(port.isValidFeature('portFeatureRxSequenceChecking')): modes.append(IxeReceiveMode.sequenceChecking) port.packetGroup.sequenceNumberOffset = sequenceNumberOffset if data_integrity and int(port.isValidFeature('portFeatureRxDataIntegrity')): modes.append(IxeReceiveMode.dataIntegrity) port.dataIntegrity.signatureOffset = di_signatureOffset if timestamp and int(port.isValidFeature('portFeatureRxFirstTimeStamp')): port.dataIntegrity.enableTimeStamp = True else: port.dataIntegrity.enableTimeStamp = False port.set_receive_modes(*modes) port.write() for port, streams in tx_ports.items(): for stream in streams: stream.packetGroup.insertSignature = True stream.packetGroup.groupIdOffset = groupIdOffset stream.packetGroup.signatureOffset = signatureOffset if sequence_checking: stream.packetGroup.insertSequenceSignature = True stream.packetGroup.sequenceNumberOffset = sequenceNumberOffset if data_integrity and int(port.isValidFeature('portFeatureRxDataIntegrity')): stream.dataIntegrity.insertSignature = True stream.dataIntegrity.signatureOffset = di_signatureOffset if timestamp: stream.enableTimestamp = True else: stream.enableTimestamp = False port.write()
Set TX ports and RX streams for stream statistics. :param ports: list of ports to set RX pgs. If empty set for all ports. :type ports: list[ixexplorer.ixe_port.IxePort] :param tx_ports: list of streams to set TX pgs. If empty set for all streams. :type tx_ports: dict[ixexplorer.ixe_port.IxePort, list[ixexplorer.ixe_stream.IxeStream]] :param sequence_checking: True - enable sequence checkbox, False - disable :param data_integrity: True - enable data integrity checkbox, False - disable :param timestamp: True - enable timestamp checkbox, False - disable :param start_offset: start offset for signatures (group ID, signature, sequence)
def output_is_valid(self, process_data): """ Check whether process output is allowed with output driver. Parameters ---------- process_data : raw process output Returns ------- True or False """ if self.METADATA["data_type"] == "raster": return ( is_numpy_or_masked_array(process_data) or is_numpy_or_masked_array_with_tags(process_data) ) elif self.METADATA["data_type"] == "vector": return is_feature_list(process_data)
Check whether process output is allowed with output driver. Parameters ---------- process_data : raw process output Returns ------- True or False
def fraction(value, allow_empty = False, minimum = None, maximum = None, **kwargs): """Validate that ``value`` is a :class:`Fraction <python:fractions.Fraction>`. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is :obj:`None <python:None>`. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is :obj:`None <python:None>`. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`Fraction <python:fractions.Fraction>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and ``allow_empty`` is ``False`` :raises MinimumValueError: if ``minimum`` is supplied and ``value`` is less than the ``minimum`` :raises MaximumValueError: if ``maximum`` is supplied and ``value`` is more than the ``maximum`` :raises CannotCoerceError: if unable to coerce ``value`` to a :class:`Fraction <python:fractions.Fraction>` """ try: value = _numeric_coercion(value, coercion_function = fractions.Fraction, allow_empty = allow_empty, minimum = minimum, maximum = maximum) except (errors.EmptyValueError, errors.CannotCoerceError, errors.MinimumValueError, errors.MaximumValueError) as error: raise error except Exception as error: raise errors.CannotCoerceError('unable to coerce value (%s) to Fraction, ' 'for an unknown reason - please see ' 'stack trace' % value) return value
Validate that ``value`` is a :class:`Fraction <python:fractions.Fraction>`. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is :obj:`None <python:None>`. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is :obj:`None <python:None>`. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`Fraction <python:fractions.Fraction>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and ``allow_empty`` is ``False`` :raises MinimumValueError: if ``minimum`` is supplied and ``value`` is less than the ``minimum`` :raises MaximumValueError: if ``maximum`` is supplied and ``value`` is more than the ``maximum`` :raises CannotCoerceError: if unable to coerce ``value`` to a :class:`Fraction <python:fractions.Fraction>`
def normalize_cjk_fullwidth_ascii(seq: str) -> str: """ Conver fullwith ASCII to halfwidth ASCII. See https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms """ def convert(char: str) -> str: code_point = ord(char) if not 0xFF01 <= code_point <= 0xFF5E: return char return chr(code_point - 0xFEE0) return ''.join(map(convert, seq))
Conver fullwith ASCII to halfwidth ASCII. See https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms
def guess_payload_class(self, payload): """ the type of the payload encapsulation is given be the outer TZSP layers attribute encapsulation_protocol # noqa: E501 """ under_layer = self.underlayer tzsp_header = None while under_layer: if isinstance(under_layer, TZSP): tzsp_header = under_layer break under_layer = under_layer.underlayer if tzsp_header: return tzsp_header.get_encapsulated_payload_class() else: raise TZSPStructureException('missing parent TZSP header')
the type of the payload encapsulation is given be the outer TZSP layers attribute encapsulation_protocol # noqa: E501
def _set_static_ip(name, session, vm_): ''' Set static IP during create() if defined ''' ipv4_cidr = '' ipv4_gw = '' if 'ipv4_gw' in vm_.keys(): log.debug('ipv4_gw is found in keys') ipv4_gw = vm_['ipv4_gw'] if 'ipv4_cidr' in vm_.keys(): log.debug('ipv4_cidr is found in keys') ipv4_cidr = vm_['ipv4_cidr'] log.debug('attempting to set IP in instance') set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
Set static IP during create() if defined
def _check_chn_type(channels, available_channels): """ Function used for checking weather the elements in "channels" input are coincident with the available channels. ---------- Parameters ---------- channels : list [[mac_address_1_channel_1 <int>, mac_address_1_channel_2 <int>...], [mac_address_2_channel_1 <int>...]...] From which channels will the data be loaded. available_channels : dict Dictionary with the list of all the available channels per device. Returns ------- out : list It is returned a list of the selected channels in a standardized format. """ # ------------------------ Definition of constants and variables ------------------------------- chn_list_standardized = [] # %%%%%%%%%%%%%%%%%%%%%%%%%%% Fill of "chn_list_standardized" %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% devices = list(available_channels.keys()) for dev_nbr, device in enumerate(devices): if channels is not None: sub_unit = channels[dev_nbr] for channel in sub_unit: # Each sublist must be composed by integers. if channel in available_channels[devices[dev_nbr]]: continue else: raise RuntimeError("At least one of the specified channels is not available in " "the acquisition file.") chn_list_standardized.append(sub_unit) else: # By omission all the channels were selected. chn_list_standardized.append(available_channels[device]) return chn_list_standardized
Function used for checking weather the elements in "channels" input are coincident with the available channels. ---------- Parameters ---------- channels : list [[mac_address_1_channel_1 <int>, mac_address_1_channel_2 <int>...], [mac_address_2_channel_1 <int>...]...] From which channels will the data be loaded. available_channels : dict Dictionary with the list of all the available channels per device. Returns ------- out : list It is returned a list of the selected channels in a standardized format.
def get_suggested_repositories(self): """Method to procure suggested repositories for the user. :return: Iterator to procure suggested repositories for the user. """ if self.suggested_repositories is None: # Procure repositories to suggest to user. repository_set = list() for term_count in range(5, 2, -1): query = self.__get_query_for_repos(term_count=term_count) repository_set.extend(self.__get_repos_for_query(query)) # Remove repositories authenticated user is already interested in. catchy_repos = GitSuggest.minus( repository_set, self.user_starred_repositories ) # Filter out repositories with too long descriptions. This is a # measure to weed out spammy repositories. filtered_repos = [] if len(catchy_repos) > 0: for repo in catchy_repos: if ( repo is not None and repo.description is not None and len(repo.description) <= GitSuggest.MAX_DESC_LEN ): filtered_repos.append(repo) # Present the repositories, highly starred to not starred. filtered_repos = sorted( filtered_repos, key=attrgetter("stargazers_count"), reverse=True, ) self.suggested_repositories = GitSuggest.get_unique_repositories( filtered_repos ) # Return an iterator to help user fetch the repository listing. for repository in self.suggested_repositories: yield repository
Method to procure suggested repositories for the user. :return: Iterator to procure suggested repositories for the user.
def get_instruments(self, name=None): """ :returns: sorted list of (mount, instrument) """ if name: return self.get_instruments_by_name(name) return sorted( self._instruments.items(), key=lambda s: s[0].lower())
:returns: sorted list of (mount, instrument)
def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with io.open(vocab_file, 'r') as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab
Loads a vocabulary file into a dictionary.
def find_npolfile(flist,detector,filters): """ Search a list of files for one that matches the configuration of detector and filters used. """ npolfile = None for f in flist: fdet = fits.getval(f, 'detector', memmap=False) if fdet == detector: filt1 = fits.getval(f, 'filter1', memmap=False) filt2 = fits.getval(f, 'filter2', memmap=False) fdate = fits.getval(f, 'date', memmap=False) if filt1 == 'ANY' or \ (filt1 == filters[0] and filt2 == filters[1]): npolfile = f return npolfile
Search a list of files for one that matches the configuration of detector and filters used.
def import_eit_fzj(self, filename, configfile, correction_file=None, timestep=None, **kwargs): """EIT data import for FZJ Medusa systems""" # we get not electrode positions (dummy1) and no topography data # (dummy2) df_emd, dummy1, dummy2 = eit_fzj.read_3p_data( filename, configfile, **kwargs ) if correction_file is not None: eit_fzj_utils.apply_correction_factors(df_emd, correction_file) if timestep is not None: df_emd['timestep'] = timestep self._add_to_container(df_emd) print('Summary:') self._describe_data(df_emd)
EIT data import for FZJ Medusa systems
def strip_metadata(report): """ Duplicates org_name, org_email and report_id into JSON root and removes report_metadata key to bring it more inline with Elastic output. """ report['org_name'] = report['report_metadata']['org_name'] report['org_email'] = report['report_metadata']['org_email'] report['report_id'] = report['report_metadata']['report_id'] report.pop('report_metadata') return report
Duplicates org_name, org_email and report_id into JSON root and removes report_metadata key to bring it more inline with Elastic output.
def take_function_register(self, rtype = SharedData.TYPES.NO_TYPE): """Reserves register for function return value and sets its type""" reg = SharedData.FUNCTION_REGISTER if reg not in self.free_registers: self.error("function register already taken") self.free_registers.remove(reg) self.used_registers.append(reg) self.symtab.set_type(reg, rtype) return reg
Reserves register for function return value and sets its type
def merge_validator_config(configs): """ Given a list of ValidatorConfig objects, merges them into a single ValidatorConfig, giving priority in the order of the configs (first has highest priority). """ bind_network = None bind_component = None bind_consensus = None endpoint = None peering = None seeds = None peers = None network_public_key = None network_private_key = None scheduler = None permissions = None roles = None opentsdb_url = None opentsdb_db = None opentsdb_username = None opentsdb_password = None minimum_peer_connectivity = None maximum_peer_connectivity = None state_pruning_block_depth = None fork_cache_keep_time = None component_thread_pool_workers = None network_thread_pool_workers = None signature_thread_pool_workers = None for config in reversed(configs): if config.bind_network is not None: bind_network = config.bind_network if config.bind_component is not None: bind_component = config.bind_component if config.bind_consensus is not None: bind_consensus = config.bind_consensus if config.endpoint is not None: endpoint = config.endpoint if config.peering is not None: peering = config.peering if config.seeds is not None: seeds = config.seeds if config.peers is not None: peers = config.peers if config.network_public_key is not None: network_public_key = config.network_public_key if config.network_private_key is not None: network_private_key = config.network_private_key if config.scheduler is not None: scheduler = config.scheduler if config.permissions is not None or config.permissions == {}: permissions = config.permissions if config.roles is not None: roles = config.roles if config.opentsdb_url is not None: opentsdb_url = config.opentsdb_url if config.opentsdb_db is not None: opentsdb_db = config.opentsdb_db if config.opentsdb_username is not None: opentsdb_username = config.opentsdb_username if config.opentsdb_password is not None: opentsdb_password = config.opentsdb_password if config.minimum_peer_connectivity is not None: minimum_peer_connectivity = config.minimum_peer_connectivity if config.maximum_peer_connectivity is not None: maximum_peer_connectivity = config.maximum_peer_connectivity if config.state_pruning_block_depth is not None: state_pruning_block_depth = config.state_pruning_block_depth if config.fork_cache_keep_time is not None: fork_cache_keep_time = config.fork_cache_keep_time if config.component_thread_pool_workers is not None: component_thread_pool_workers = \ config.component_thread_pool_workers if config.network_thread_pool_workers is not None: network_thread_pool_workers = \ config.network_thread_pool_workers if config.signature_thread_pool_workers is not None: signature_thread_pool_workers = \ config.signature_thread_pool_workers return ValidatorConfig( bind_network=bind_network, bind_component=bind_component, bind_consensus=bind_consensus, endpoint=endpoint, peering=peering, seeds=seeds, peers=peers, network_public_key=network_public_key, network_private_key=network_private_key, scheduler=scheduler, permissions=permissions, roles=roles, opentsdb_url=opentsdb_url, opentsdb_db=opentsdb_db, opentsdb_username=opentsdb_username, opentsdb_password=opentsdb_password, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, state_pruning_block_depth=state_pruning_block_depth, fork_cache_keep_time=fork_cache_keep_time, component_thread_pool_workers=component_thread_pool_workers, network_thread_pool_workers=network_thread_pool_workers, signature_thread_pool_workers=signature_thread_pool_workers )
Given a list of ValidatorConfig objects, merges them into a single ValidatorConfig, giving priority in the order of the configs (first has highest priority).
def split_input(cls, mapper_spec): """Inherit docs.""" shard_count = mapper_spec.shard_count query_spec = cls._get_query_spec(mapper_spec) if not property_range.should_shard_by_property_range(query_spec.filters): return super(DatastoreInputReader, cls).split_input(mapper_spec) # Artificially increase the number of shards to get a more even split. # For example, if we are creating 7 shards for one week of data based on a # Day property and the data points tend to be clumped on certain days (say, # Monday and Wednesday), instead of assigning each shard a single day of # the week, we will split each day into "oversplit_factor" pieces, and # assign each shard "oversplit_factor" pieces with "1 / oversplit_factor" # the work, so that the data from Monday and Wednesday is more evenly # spread across all shards. oversplit_factor = query_spec.oversplit_factor oversplit_shard_count = oversplit_factor * shard_count p_range = property_range.PropertyRange(query_spec.filters, query_spec.model_class_path) p_ranges = p_range.split(oversplit_shard_count) # User specified a namespace. if query_spec.ns is not None: ns_range = namespace_range.NamespaceRange( namespace_start=query_spec.ns, namespace_end=query_spec.ns, _app=query_spec.app) ns_ranges = [copy.copy(ns_range) for _ in p_ranges] else: ns_keys = namespace_range.get_namespace_keys( query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1) if not ns_keys: return # User doesn't specify ns but the number of ns is small. # We still split by property range. if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD: ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app) for _ in p_ranges] # Lots of namespaces. Split by ns. else: ns_ranges = namespace_range.NamespaceRange.split(n=oversplit_shard_count, contiguous=False, can_query=lambda: True, _app=query_spec.app) p_ranges = [copy.copy(p_range) for _ in ns_ranges] assert len(p_ranges) == len(ns_ranges) iters = [ db_iters.RangeIteratorFactory.create_property_range_iterator( p, ns, query_spec) for p, ns in zip(p_ranges, ns_ranges)] # Reduce the number of ranges back down to the shard count. # It's possible that we didn't split into enough shards even # after oversplitting, in which case we don't need to do anything. if len(iters) > shard_count: # We cycle through the iterators and chain them together, e.g. # if we look at the indices chained together, we get: # Shard #0 gets 0, num_shards, 2 * num_shards, ... # Shard #1 gets 1, num_shards + 1, 2 * num_shards + 1, ... # Shard #2 gets 2, num_shards + 2, 2 * num_shards + 2, ... # and so on. This should split fairly evenly. iters = [ db_iters.RangeIteratorFactory.create_multi_property_range_iterator( [iters[i] for i in xrange(start_index, len(iters), shard_count)] ) for start_index in xrange(shard_count) ] return [cls(i) for i in iters]
Inherit docs.
def generate_random_128bit_string(): """Returns a 128 bit UTF-8 encoded string. Follows the same conventions as generate_random_64bit_string(). The upper 32 bits are the current time in epoch seconds, and the lower 96 bits are random. This allows for AWS X-Ray `interop <https://github.com/openzipkin/zipkin/issues/1754>`_ :returns: 32-character hex string """ t = int(time.time()) lower_96 = random.getrandbits(96) return '{:032x}'.format((t << 96) | lower_96)
Returns a 128 bit UTF-8 encoded string. Follows the same conventions as generate_random_64bit_string(). The upper 32 bits are the current time in epoch seconds, and the lower 96 bits are random. This allows for AWS X-Ray `interop <https://github.com/openzipkin/zipkin/issues/1754>`_ :returns: 32-character hex string
def get_auth(): """Get authorization token for https """ import getpass from requests.auth import HTTPDigestAuth #This binds raw_input to input for Python 2 input_func = input try: input_func = raw_input except NameError: pass uname = input_func("MODSCAG Username:") pw = getpass.getpass("MODSCAG Password:") auth = HTTPDigestAuth(uname, pw) #wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw return auth
Get authorization token for https
def exclude(*what): """ Blacklist *what*. :param what: What to blacklist. :type what: :class:`list` of classes or :class:`attr.Attribute`\\ s. :rtype: :class:`callable` """ cls, attrs = _split_what(what) def exclude_(attribute, value): return value.__class__ not in cls and attribute not in attrs return exclude_
Blacklist *what*. :param what: What to blacklist. :type what: :class:`list` of classes or :class:`attr.Attribute`\\ s. :rtype: :class:`callable`
def as_action_description(self): """ Get the action description. Returns a dictionary describing the action. """ description = { self.name: { 'href': self.href_prefix + self.href, 'timeRequested': self.time_requested, 'status': self.status, }, } if self.input is not None: description[self.name]['input'] = self.input if self.time_completed is not None: description[self.name]['timeCompleted'] = self.time_completed return description
Get the action description. Returns a dictionary describing the action.
def is_dn(s): """Return True if s is a LDAP DN.""" if s == '': return True rm = DN_REGEX.match(s) return rm is not None and rm.group(0) == s
Return True if s is a LDAP DN.
def _residual_soil(self): """ Methodology source: FEH, Vol. 3, p. 14 """ return self.catchment.descriptors.bfihost \ + 1.3 * (0.01 * self.catchment.descriptors.sprhost) \ - 0.987
Methodology source: FEH, Vol. 3, p. 14
def decode(message): """ Convert a generic JSON message * The entire message is converted to JSON and treated as the message data * The timestamp of the message is the time that the message is RECEIVED """ try: data = json.loads(message.payload.decode("utf-8")) except ValueError as e: raise InvalidEventException('Unable to parse JSON. payload="%s" error=%s' % (message.payload, str(e))) timestamp = datetime.now(pytz.timezone("UTC")) # TODO: Flatten JSON, covert into array of key/value pairs return Message(data, timestamp)
Convert a generic JSON message * The entire message is converted to JSON and treated as the message data * The timestamp of the message is the time that the message is RECEIVED
def setup_admin_on_rest_handlers(admin, admin_handler): """ Initialize routes. """ add_route = admin.router.add_route add_static = admin.router.add_static static_folder = str(PROJ_ROOT / 'static') a = admin_handler add_route('GET', '', a.index_page, name='admin.index') add_route('POST', '/token', a.token, name='admin.token') add_static('/static', path=static_folder, name='admin.static') add_route('DELETE', '/logout', a.logout, name='admin.logout')
Initialize routes.
def plot_2(data, *args): """Plot 2. Running best score (scatter plot)""" df_all = pd.DataFrame(data) df_params = nonconstant_parameters(data) x = [df_all['id'][0]] y = [df_all['mean_test_score'][0]] params = [df_params.loc[0]] for i in range(len(df_all)): if df_all['mean_test_score'][i] > y[-1]: x.append(df_all['id'][i]) y.append(df_all['mean_test_score'][i]) params.append(df_params.loc[i]) return build_scatter_tooltip( x=x, y=y, tt=pd.DataFrame(params), title='Running best')
Plot 2. Running best score (scatter plot)
def get_insight(self, project_key, insight_id, **kwargs): """Retrieve an insight :param project_key: Project identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :returns: Insight definition, with all attributes :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> insight = api_client.get_insight( ... 'jonloyens/' ... 'an-example-project-that-shows-what-to-put-in-data-world', ... 'c2538b0c-c200-474c-9631-5ff4f13026eb') # doctest: +SKIP >>> insight['title'] # doctest: +SKIP 'Coast Guard Lives Saved by Fiscal Year' """ try: project_owner, project_id = parse_dataset_key(project_key) return self._insights_api.get_insight(project_owner, project_id, insight_id, **kwargs).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Retrieve an insight :param project_key: Project identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :returns: Insight definition, with all attributes :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> insight = api_client.get_insight( ... 'jonloyens/' ... 'an-example-project-that-shows-what-to-put-in-data-world', ... 'c2538b0c-c200-474c-9631-5ff4f13026eb') # doctest: +SKIP >>> insight['title'] # doctest: +SKIP 'Coast Guard Lives Saved by Fiscal Year'
def find_files(sequencepath): """ Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files """ # Create a sorted list of all the FASTA files in the sequence path files = sorted(glob(os.path.join(sequencepath, '*.fa*'))) return files
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files
def create_shipping_address(self, shipping_address): """Creates a shipping address on an existing account. If you are creating an account, you can embed the shipping addresses with the request""" url = urljoin(self._url, '/shipping_addresses') return shipping_address.post(url)
Creates a shipping address on an existing account. If you are creating an account, you can embed the shipping addresses with the request
def datetime(self): 'εˆ†ι’ŸηΊΏη»“ζž„θΏ”ε›ždatetime ζ—₯ηΊΏη»“ζž„θΏ”ε›ždate' index = self.data.index.remove_unused_levels() return pd.to_datetime(index.levels[0])
εˆ†ι’ŸηΊΏη»“ζž„θΏ”ε›ždatetime ζ—₯ηΊΏη»“ζž„θΏ”ε›ždate
def get_dir_backup(): """ retrieves directory backup """ args = parser.parse_args() s3_get_dir_backup( args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name, args.s3_folder, args.zip_backups_dir, args.project)
retrieves directory backup
def get_neutron_endpoint(cls, json_resp): """ Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service Sends a CRITICAL service check when none are found registered in the Catalog """ catalog = json_resp.get('token', {}).get('catalog', []) match = 'neutron' neutron_endpoint = None for entry in catalog: if entry['name'] == match or 'Networking' in entry['name']: valid_endpoints = {} for ep in entry['endpoints']: interface = ep.get('interface', '') if interface in ['public', 'internal']: valid_endpoints[interface] = ep['url'] if valid_endpoints: # Favor public endpoints over internal neutron_endpoint = valid_endpoints.get("public", valid_endpoints.get("internal")) break else: raise MissingNeutronEndpoint() return neutron_endpoint
Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service Sends a CRITICAL service check when none are found registered in the Catalog
def redirect(self, pid): """Redirect persistent identifier to another persistent identifier. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` where redirect the PID. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not registered or is not already redirecting to another PID. :raises invenio_pidstore.errors.PIDDoesNotExistError: If PID is not found. :returns: `True` if the PID is successfully redirect. """ if not (self.is_registered() or self.is_redirected()): raise PIDInvalidAction("Persistent identifier is not registered.") try: with db.session.begin_nested(): if self.is_redirected(): r = Redirect.query.get(self.object_uuid) r.pid = pid else: with db.session.begin_nested(): r = Redirect(pid=pid) db.session.add(r) self.status = PIDStatus.REDIRECTED self.object_type = None self.object_uuid = r.id db.session.add(self) except IntegrityError: raise PIDDoesNotExistError(pid.pid_type, pid.pid_value) except SQLAlchemyError: logger.exception("Failed to redirect to {0}".format( pid), extra=dict(pid=self)) raise logger.info("Redirected PID to {0}".format(pid), extra=dict(pid=self)) return True
Redirect persistent identifier to another persistent identifier. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` where redirect the PID. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not registered or is not already redirecting to another PID. :raises invenio_pidstore.errors.PIDDoesNotExistError: If PID is not found. :returns: `True` if the PID is successfully redirect.
def artist(self, spotify_id): """Get a spotify artist by their ID. Parameters ---------- spotify_id : str The spotify_id to search by. """ route = Route('GET', '/artists/{spotify_id}', spotify_id=spotify_id) return self.request(route)
Get a spotify artist by their ID. Parameters ---------- spotify_id : str The spotify_id to search by.
def ast_scan_file(filename, re_fallback=True): '''Scans a file for imports using AST. In addition to normal imports, try to get imports via `__import__` or `import_module` calls. The AST parser should be able to resolve simple variable assignments in cases where these functions are called with variables instead of strings. ''' try: with io.open(filename, 'rb') as fp: try: root = ast.parse(fp.read(), filename=filename) except (SyntaxError, IndentationError): if re_fallback: log.debug('Falling back to regex scanner') return _ast_scan_file_re(filename) else: log.error('Could not parse file: %s', filename) log.info('Exception:', exc_info=True) return None, None log.debug('Starting AST Scan: %s', filename) ast_visitor.reset(filename) ast_visitor.visit(root) log.debug('Project path: %s', ast_visitor.import_root) return ast_visitor.scope, ast_visitor.imports except IOError: log.warn('Could not open file: %s', filename) return None, None
Scans a file for imports using AST. In addition to normal imports, try to get imports via `__import__` or `import_module` calls. The AST parser should be able to resolve simple variable assignments in cases where these functions are called with variables instead of strings.
def delete_bond(self, n, m): """ implementation of bond removing """ self.remove_edge(n, m) self.flush_cache()
implementation of bond removing
def write(self, file): """Write the image to the open file object. See `.save()` if you have a filename. In general, you can only call this method once; after it has been called the first time the PNG image is written, the source data will have been streamed, and cannot be streamed again. """ w = Writer(**self.info) w.write(file, self.rows)
Write the image to the open file object. See `.save()` if you have a filename. In general, you can only call this method once; after it has been called the first time the PNG image is written, the source data will have been streamed, and cannot be streamed again.
def set_default_content_type(application, content_type, encoding=None): """ Store the default content type for an application. :param tornado.web.Application application: the application to modify :param str content_type: the content type to default to :param str|None encoding: encoding to use when one is unspecified """ settings = get_settings(application, force_instance=True) settings.default_content_type = content_type settings.default_encoding = encoding
Store the default content type for an application. :param tornado.web.Application application: the application to modify :param str content_type: the content type to default to :param str|None encoding: encoding to use when one is unspecified
def get_bin(self): """Return the binary notation of the address/netmask.""" return _convert(self._ip_dec, notation=IP_BIN, inotation=IP_DEC, _check=False, _isnm=self._isnm)
Return the binary notation of the address/netmask.
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" return self.get_json(self.USER_INFO_URL, method="POST", headers=self._get_headers(access_token))
Loads user data from service
def delete_glossary( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a glossary, or cancels glossary construction if the glossary isn't created yet. Returns NOT\_FOUND, if the glossary doesn't exist. Example: >>> from google.cloud import translate_v3beta1 >>> >>> client = translate_v3beta1.TranslationServiceClient() >>> >>> name = client.glossary_path('[PROJECT]', '[LOCATION]', '[GLOSSARY]') >>> >>> response = client.delete_glossary(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. The name of the glossary to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_glossary" not in self._inner_api_calls: self._inner_api_calls[ "delete_glossary" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_glossary, default_retry=self._method_configs["DeleteGlossary"].retry, default_timeout=self._method_configs["DeleteGlossary"].timeout, client_info=self._client_info, ) request = translation_service_pb2.DeleteGlossaryRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) operation = self._inner_api_calls["delete_glossary"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, translation_service_pb2.DeleteGlossaryResponse, metadata_type=translation_service_pb2.DeleteGlossaryMetadata, )
Deletes a glossary, or cancels glossary construction if the glossary isn't created yet. Returns NOT\_FOUND, if the glossary doesn't exist. Example: >>> from google.cloud import translate_v3beta1 >>> >>> client = translate_v3beta1.TranslationServiceClient() >>> >>> name = client.glossary_path('[PROJECT]', '[LOCATION]', '[GLOSSARY]') >>> >>> response = client.delete_glossary(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. The name of the glossary to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def gen_batches(iterable, batch_size): ''' Returns a generator object that yields batches from `iterable`. See `iter_batches` for more details and caveats. Note that `iter_batches` returns an iterator, which never supports `len()`, `gen_batches` returns an iterable which supports `len()` if and only if `iterable` does. This *may* be an iterator, but could be a `SizedGenerator` object. To obtain an iterator (for example, to use the `next()` function), call `iter()` on this iterable. >>> batches = gen_batches('abcdefghijkl', batch_size=5) >>> len(batches) 3 >>> for batch in batches: ... print(list(batch)) ['a', 'b', 'c', 'd', 'e'] ['f', 'g', 'h', 'i', 'j'] ['k', 'l'] ''' def batches_thunk(): return iter_batches(iterable, batch_size) try: length = len(iterable) except TypeError: return batches_thunk() num_batches = (length - 1) // batch_size + 1 return SizedGenerator(batches_thunk, length=num_batches)
Returns a generator object that yields batches from `iterable`. See `iter_batches` for more details and caveats. Note that `iter_batches` returns an iterator, which never supports `len()`, `gen_batches` returns an iterable which supports `len()` if and only if `iterable` does. This *may* be an iterator, but could be a `SizedGenerator` object. To obtain an iterator (for example, to use the `next()` function), call `iter()` on this iterable. >>> batches = gen_batches('abcdefghijkl', batch_size=5) >>> len(batches) 3 >>> for batch in batches: ... print(list(batch)) ['a', 'b', 'c', 'd', 'e'] ['f', 'g', 'h', 'i', 'j'] ['k', 'l']
def authenticate(self, _=None): # TODO: remove unused var ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. :rtype: Crypticle :returns: A crypticle used for encryption operations ''' acceptance_wait_time = self.opts['acceptance_wait_time'] acceptance_wait_time_max = self.opts['acceptance_wait_time_max'] channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear') if not acceptance_wait_time_max: acceptance_wait_time_max = acceptance_wait_time try: while True: creds = self.sign_in(channel=channel) if creds == 'retry': if self.opts.get('caller'): # We have a list of masters, so we should break # and try the next one in the list. if self.opts.get('local_masters', None): error = SaltClientError('Minion failed to authenticate' ' with the master, has the ' 'minion key been accepted?') break else: print('Minion failed to authenticate with the master, ' 'has the minion key been accepted?') sys.exit(2) if acceptance_wait_time: log.info('Waiting %s seconds before retry.', acceptance_wait_time) time.sleep(acceptance_wait_time) if acceptance_wait_time < acceptance_wait_time_max: acceptance_wait_time += acceptance_wait_time log.debug('Authentication wait time is %s', acceptance_wait_time) continue break self._creds = creds self._crypticle = Crypticle(self.opts, creds['aes']) finally: channel.close()
Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. :rtype: Crypticle :returns: A crypticle used for encryption operations
def direct_messages_sent(self, since_id=None, max_id=None, count=None, include_entities=None, page=None): """ Gets the 20 most recent direct messages sent by the authenticating user. https://dev.twitter.com/docs/api/1.1/get/direct_messages/sent :param str since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occured since the since_id, the since_id will be forced to the oldest ID available. :params str max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. :param int count: Returns results with an ID less than (that is, older than) or equal to the specified ID. :param int page: Specifies the page of results to retrieve. :param bool include_entities: The entities node will not be included when set to ``False``. :returns: A list of direct message dicts. """ params = {} set_str_param(params, 'since_id', since_id) set_str_param(params, 'max_id', max_id) set_int_param(params, 'count', count) set_int_param(params, 'page', page) set_bool_param(params, 'include_entities', include_entities) return self._get_api('direct_messages/sent.json', params)
Gets the 20 most recent direct messages sent by the authenticating user. https://dev.twitter.com/docs/api/1.1/get/direct_messages/sent :param str since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occured since the since_id, the since_id will be forced to the oldest ID available. :params str max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. :param int count: Returns results with an ID less than (that is, older than) or equal to the specified ID. :param int page: Specifies the page of results to retrieve. :param bool include_entities: The entities node will not be included when set to ``False``. :returns: A list of direct message dicts.
def get_or_load_name(self, type_, id_, method): """ read-through cache for a type of object's name. If we don't have a cached name for this type/id, then we will query the live Koji server and store the value before returning. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :param method: function to call if this value is not in the cache. This method must return a deferred that fires with an object with a ".name" attribute. :returns: deferred that when fired returns a str, or None """ name = self.get_name(type_, id_) if name is not None: defer.returnValue(name) instance = yield method(id_) if instance is None: defer.returnValue(None) self.put_name(type_, id_, instance.name) defer.returnValue(instance.name)
read-through cache for a type of object's name. If we don't have a cached name for this type/id, then we will query the live Koji server and store the value before returning. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :param method: function to call if this value is not in the cache. This method must return a deferred that fires with an object with a ".name" attribute. :returns: deferred that when fired returns a str, or None
def _special_method_cache(method, cache_wrapper): """ Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5 """ name = method.__name__ special_names = '__getattr__', '__getitem__' if name not in special_names: return wrapper_name = '__cached' + name def proxy(self, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) setattr(self, wrapper_name, cache) else: cache = getattr(self, wrapper_name) return cache(*args, **kwargs) return proxy
Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5
def parse_ppi_graph(path: str, min_edge_weight: float = 0.0) -> Graph: """Build an undirected graph of gene interactions from edgelist file. :param str path: The path to the edgelist file :param float min_edge_weight: Cutoff to keep/remove the edges, default is 0, but could also be 0.63. :return Graph: Protein-protein interaction graph """ logger.info("In parse_ppi_graph()") graph = igraph.read(os.path.expanduser(path), format="ncol", directed=False, names=True) graph.delete_edges(graph.es.select(weight_lt=min_edge_weight)) graph.delete_vertices(graph.vs.select(_degree=0)) logger.info(f"Loaded PPI network.\n" f"Number of proteins: {len(graph.vs)}\n" f"Number of interactions: {len(graph.es)}\n") return graph
Build an undirected graph of gene interactions from edgelist file. :param str path: The path to the edgelist file :param float min_edge_weight: Cutoff to keep/remove the edges, default is 0, but could also be 0.63. :return Graph: Protein-protein interaction graph
def async_alert(self, alert_msg: str, new_prompt: Optional[str] = None) -> None: # pragma: no cover """ Display an important message to the user while they are at the prompt in between commands. To the user it appears as if an alert message is printed above the prompt and their current input text and cursor location is left alone. IMPORTANT: This function will not print an alert unless it can acquire self.terminal_lock to ensure a prompt is onscreen. Therefore it is best to acquire the lock before calling this function to guarantee the alert prints. :param alert_msg: the message to display to the user :param new_prompt: if you also want to change the prompt that is displayed, then include it here see async_update_prompt() docstring for guidance on updating a prompt :raises RuntimeError if called while another thread holds terminal_lock """ if not (vt100_support and self.use_rawinput): return import shutil import colorama.ansi as ansi from colorama import Cursor # Sanity check that can't fail if self.terminal_lock was acquired before calling this function if self.terminal_lock.acquire(blocking=False): # Figure out what prompt is displaying current_prompt = self.continuation_prompt if self.at_continuation_prompt else self.prompt # Only update terminal if there are changes update_terminal = False if alert_msg: alert_msg += '\n' update_terminal = True # Set the prompt if its changed if new_prompt is not None and new_prompt != self.prompt: self.prompt = new_prompt # If we aren't at a continuation prompt, then it's OK to update it if not self.at_continuation_prompt: rl_set_prompt(self.prompt) update_terminal = True if update_terminal: # Get the size of the terminal terminal_size = shutil.get_terminal_size() # Split the prompt lines since it can contain newline characters. prompt_lines = current_prompt.splitlines() # Calculate how many terminal lines are taken up by all prompt lines except for the last one. # That will be included in the input lines calculations since that is where the cursor is. num_prompt_terminal_lines = 0 for line in prompt_lines[:-1]: line_width = utils.ansi_safe_wcswidth(line) num_prompt_terminal_lines += int(line_width / terminal_size.columns) + 1 # Now calculate how many terminal lines are take up by the input last_prompt_line = prompt_lines[-1] last_prompt_line_width = utils.ansi_safe_wcswidth(last_prompt_line) input_width = last_prompt_line_width + utils.ansi_safe_wcswidth(readline.get_line_buffer()) num_input_terminal_lines = int(input_width / terminal_size.columns) + 1 # Get the cursor's offset from the beginning of the first input line cursor_input_offset = last_prompt_line_width + rl_get_point() # Calculate what input line the cursor is on cursor_input_line = int(cursor_input_offset / terminal_size.columns) + 1 # Create a string that when printed will clear all input lines and display the alert terminal_str = '' # Move the cursor down to the last input line if cursor_input_line != num_input_terminal_lines: terminal_str += Cursor.DOWN(num_input_terminal_lines - cursor_input_line) # Clear each line from the bottom up so that the cursor ends up on the first prompt line total_lines = num_prompt_terminal_lines + num_input_terminal_lines terminal_str += (ansi.clear_line() + Cursor.UP(1)) * (total_lines - 1) # Clear the first prompt line terminal_str += ansi.clear_line() # Move the cursor to the beginning of the first prompt line and print the alert terminal_str += '\r' + alert_msg if rl_type == RlType.GNU: sys.stderr.write(terminal_str) elif rl_type == RlType.PYREADLINE: # noinspection PyUnresolvedReferences readline.rl.mode.console.write(terminal_str) # Redraw the prompt and input lines rl_force_redisplay() self.terminal_lock.release() else: raise RuntimeError("another thread holds terminal_lock")
Display an important message to the user while they are at the prompt in between commands. To the user it appears as if an alert message is printed above the prompt and their current input text and cursor location is left alone. IMPORTANT: This function will not print an alert unless it can acquire self.terminal_lock to ensure a prompt is onscreen. Therefore it is best to acquire the lock before calling this function to guarantee the alert prints. :param alert_msg: the message to display to the user :param new_prompt: if you also want to change the prompt that is displayed, then include it here see async_update_prompt() docstring for guidance on updating a prompt :raises RuntimeError if called while another thread holds terminal_lock
def contract(self, process): """ this contracts the current node to its parent and then either caclulates the params and values if all child data exists, OR uses the default parent data. (In real terms it returns the parent and recalculates) TODO = processes need to be recalculated """ print('TODO: process check = ', process) print(self.name, ' contracted to ->', self.parent) return self.parent
this contracts the current node to its parent and then either caclulates the params and values if all child data exists, OR uses the default parent data. (In real terms it returns the parent and recalculates) TODO = processes need to be recalculated
def update( self, kb_id, update_kb, custom_headers=None, raw=False, **operation_config): """Asynchronous operation to modify a knowledgebase. :param kb_id: Knowledgebase id. :type kb_id: str :param update_kb: Post body of the request. :type update_kb: ~azure.cognitiveservices.knowledge.qnamaker.models.UpdateKbOperationDTO :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: Operation or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.knowledge.qnamaker.models.Operation or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.knowledge.qnamaker.models.ErrorResponseException>` """ # Construct URL url = self.update.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'kbId': self._serialize.url("kb_id", kb_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(update_kb, 'UpdateKbOperationDTO') # Construct and send request request = self._client.patch(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [202]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None header_dict = {} if response.status_code == 202: deserialized = self._deserialize('Operation', response) header_dict = { 'Location': 'str', } if raw: client_raw_response = ClientRawResponse(deserialized, response) client_raw_response.add_headers(header_dict) return client_raw_response return deserialized
Asynchronous operation to modify a knowledgebase. :param kb_id: Knowledgebase id. :type kb_id: str :param update_kb: Post body of the request. :type update_kb: ~azure.cognitiveservices.knowledge.qnamaker.models.UpdateKbOperationDTO :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: Operation or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.knowledge.qnamaker.models.Operation or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.knowledge.qnamaker.models.ErrorResponseException>`
def set_alpha_value(self, value): ''' setter Learning rate. ''' if isinstance(value, float) is False: raise TypeError("The type of __alpha_value must be float.") self.__alpha_value = value
setter Learning rate.
def health_check(self): """ Verify that device is accessible over CLI by sending ENTER for cli session """ api_response = 'Online' result = 'Health check on resource {}'.format(self._resource_name) try: health_check_flow = RunCommandFlow(self.cli_handler, self._logger) health_check_flow.execute_flow() result += ' passed.' except Exception as e: self._logger.exception(e) api_response = 'Error' result += ' failed.' try: self._api.SetResourceLiveStatus(self._resource_name, api_response, result) except Exception: self._logger.error('Cannot update {} resource status on portal'.format(self._resource_name)) return result
Verify that device is accessible over CLI by sending ENTER for cli session
def _process_cache(self, d, path=()): """Recusively walk a nested recon cache dict to obtain path/values""" for k, v in d.iteritems(): if not isinstance(v, dict): self.metrics.append((path + (k,), v)) else: self._process_cache(v, path + (k,))
Recusively walk a nested recon cache dict to obtain path/values
def parse_item(self, location: str, item_type: Type[T], item_name_for_log: str = None, file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> T: """ Main method to parse an item of type item_type :param location: :param item_type: :param item_name_for_log: :param file_mapping_conf: :param options: :return: """ # -- item_name_for_log item_name_for_log = item_name_for_log or '' check_var(item_name_for_log, var_types=str, var_name='item_name_for_log') if len(item_name_for_log) > 0: item_name_for_log = item_name_for_log + ' ' self.logger.debug('**** Starting to parse single object ' + item_name_for_log + 'of type <' + get_pretty_type_str(item_type) + '> at location ' + location + ' ****') # common steps return self._parse__item(item_type, location, file_mapping_conf, options=options)
Main method to parse an item of type item_type :param location: :param item_type: :param item_name_for_log: :param file_mapping_conf: :param options: :return:
def _build(self, inputs): """Connects the SliceByDim module into the graph. Args: inputs: `Tensor` to slice. Its rank must be greater than the maximum dimension specified in `dims` (plus one as python is 0 indexed). Returns: The sliced tensor. Raises: ValueError: If `inputs` tensor has insufficient rank. """ shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) # Checks that the rank of the tensor. max_dim = np.max(self._dims) + 1 if rank < max_dim: raise ValueError("Rank of inputs must be at least {}.".format(max_dim)) # Builds default lists for begin and size to pass to `tf.slice`. full_begin = [0] * rank full_size = [-1] * rank # Updates lists with what the user provided. for dim, begin, size in zip(self._dims, self._begin, self._size): full_begin[dim] = begin full_size[dim] = size return tf.slice(inputs, begin=full_begin, size=full_size)
Connects the SliceByDim module into the graph. Args: inputs: `Tensor` to slice. Its rank must be greater than the maximum dimension specified in `dims` (plus one as python is 0 indexed). Returns: The sliced tensor. Raises: ValueError: If `inputs` tensor has insufficient rank.
def summarize_entity_person(person): """ assume person entity using cnschma person vocabulary, http://cnschema.org/Person """ ret = [] value = person.get("name") if not value: return False ret.append(value) prop = "courtesyName" value = json_get_first_item(person, prop) if value == u"不详": value = "" if value: ret.append(u'ε­—{}'.format(value)) value = person.get("alternateName") if value: #ret.append(u'别名{}'.format(value)) # Bugged pass prop = "artName" value = json_get_first_item(person, prop) if value: ret.append(u'号{}'.format(value)) value = person.get("dynasty") if value: ret.append(u'{}δΊΊ'.format(value)) prop = "ancestralHome" value = json_get_first_item(person, prop) if value: ret.append(u'η₯–籍{}'.format(value)) birth_date = person.get("birthDate", "") birth_place = person.get("birthPlace", "") # Special case for unknown birth date if birth_date == u"不详": birth_date = "" if birth_place: ret.append(u'{}ε‡Ίη”ŸδΊŽ{}'.format(birth_date, birth_place)) elif birth_date: ret.append(u'{}ε‡Ίη”Ÿ'.format(birth_date)) prop = "nationality" nationality = json_get_first_item(person, prop) prop = "occupation" occupation = json_get_first_item(person, prop) if occupation: if nationality: ret.append(u'{}{}'.format(nationality, occupation)) else: ret.append(u'{}'.format(occupation)) elif nationality: ret.append(u'{}δΊΊ'.format(nationality)) prop = "authorOf" value = json_get_list(person, prop) if value: logging.info(value) value = u"、".join(value) ret.append(u'δΈ»θ¦δ½œε“οΌš{}'.format(value) ) prop = "accomplishment" value = json_get_list(person, prop) if value: value = u"、".join(value) if len(value) < 30: # Colon is handled by text reading software ret.append( u"主要成就:{}".format(value) ) ret = u",".join(ret) # Make all commas Chinese ret = ret.replace(u',', u',') ret = re.sub(u",+", u",", ret) # Removes repeat commas # Handles periods at end ret = re.sub(ur"[γ€‚οΌŒ]+$", u"", ret) # Converts brackets to Chinese ret = ret.replace(u'(', u'(') ret = ret.replace(u')', u'οΌ‰') # Removes brackets and all contained info ret = re.sub(ur"([^οΌ‰]*οΌ‰", u"", ret) ret = u''.join([ret, u"。"]) return ret
assume person entity using cnschma person vocabulary, http://cnschema.org/Person
def fit_interval_censoring( self, lower_bound, upper_bound, event_observed=None, timeline=None, label=None, alpha=None, ci_labels=None, show_progress=False, entry=None, weights=None, ): # pylint: disable=too-many-arguments """ Fit the model to an interval censored dataset. Parameters ---------- lower_bound: an array, or pd.Series length n, the start of the period the subject experienced the event in. upper_bound: an array, or pd.Series length n, the end of the period the subject experienced the event in. If the value is equal to the corresponding value in lower_bound, then the individual's event was observed (not censored). event_observed: numpy array or pd.Series, optional length n, if left optional, infer from ``lower_bound`` and ``upper_cound`` (if lower_bound==upper_bound then event observed, if lower_bound < upper_bound, then event censored) timeline: list, optional return the estimate at the values in timeline (positively increasing) label: string, optional a string to name the column of the estimate. alpha: float, optional the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only. ci_labels: list, optional add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha> show_progress: boolean, optional since this is an iterative fitting algorithm, switching this to True will display some iteration details. entry: an array, or pd.Series, of length n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population entered study when they were "born": time zero. weights: an array, or pd.Series, of length n integer weights per observation Returns ------- self self with new properties like ``cumulative_hazard_``, ``survival_function_`` """ check_nans_or_infs(lower_bound) check_positivity(upper_bound) self.upper_bound = np.asarray(pass_for_numeric_dtypes_or_raise_array(upper_bound)) self.lower_bound = np.asarray(pass_for_numeric_dtypes_or_raise_array(lower_bound)) if (self.upper_bound < self.lower_bound).any(): raise ValueError("All upper_bound times must be greater than or equal to lower_bound times.") if event_observed is None: event_observed = self.upper_bound == self.lower_bound if ((self.lower_bound == self.upper_bound) != event_observed).any(): raise ValueError( "For all rows, lower_bound == upper_bound if and only if event observed = 1 (uncensored). Likewise, lower_bound < upper_bound if and only if event observed = 0 (censored)" ) self._censoring_type = CensoringType.INTERVAL return self._fit( (np.clip(self.lower_bound, 1e-20, 1e25), np.clip(self.upper_bound, 1e-20, 1e25)), event_observed=event_observed, timeline=timeline, label=label, alpha=alpha, ci_labels=ci_labels, show_progress=show_progress, entry=entry, weights=weights, )
Fit the model to an interval censored dataset. Parameters ---------- lower_bound: an array, or pd.Series length n, the start of the period the subject experienced the event in. upper_bound: an array, or pd.Series length n, the end of the period the subject experienced the event in. If the value is equal to the corresponding value in lower_bound, then the individual's event was observed (not censored). event_observed: numpy array or pd.Series, optional length n, if left optional, infer from ``lower_bound`` and ``upper_cound`` (if lower_bound==upper_bound then event observed, if lower_bound < upper_bound, then event censored) timeline: list, optional return the estimate at the values in timeline (positively increasing) label: string, optional a string to name the column of the estimate. alpha: float, optional the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only. ci_labels: list, optional add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha> show_progress: boolean, optional since this is an iterative fitting algorithm, switching this to True will display some iteration details. entry: an array, or pd.Series, of length n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population entered study when they were "born": time zero. weights: an array, or pd.Series, of length n integer weights per observation Returns ------- self self with new properties like ``cumulative_hazard_``, ``survival_function_``
def _connectionLost(self, reason): """Called when the protocol connection is lost - Log the disconnection. - Mark any outstanding requests as unsent so they will be sent when a new connection is made. - If closing the broker client, mark completion of that process. :param reason: Failure that indicates the reason for disconnection. """ log.info('%r: Connection closed: %r', self, reason) # Reset our proto so we don't try to send to a down connection self.proto = None # Mark any in-flight requests as unsent. for tReq in self.requests.values(): tReq.sent = False if self._dDown: self._dDown.callback(None) elif self.requests: self._connect()
Called when the protocol connection is lost - Log the disconnection. - Mark any outstanding requests as unsent so they will be sent when a new connection is made. - If closing the broker client, mark completion of that process. :param reason: Failure that indicates the reason for disconnection.
def fit_allele_specific_predictors( self, n_models, architecture_hyperparameters_list, allele, peptides, affinities, inequalities=None, train_rounds=None, models_dir_for_save=None, verbose=0, progress_preamble="", progress_print_interval=5.0): """ Fit one or more allele specific predictors for a single allele using one or more neural network architectures. The new predictors are saved in the Class1AffinityPredictor instance and will be used on subsequent calls to `predict`. Parameters ---------- n_models : int Number of neural networks to fit architecture_hyperparameters_list : list of dict List of hyperparameter sets. allele : string peptides : `EncodableSequences` or list of string affinities : list of float nM affinities inequalities : list of string, each element one of ">", "<", or "=" See Class1NeuralNetwork.fit for details. train_rounds : sequence of int Each training point i will be used on training rounds r for which train_rounds[i] > r, r >= 0. models_dir_for_save : string, optional If specified, the Class1AffinityPredictor is (incrementally) written to the given models dir after each neural network is fit. verbose : int Keras verbosity progress_preamble : string Optional string of information to include in each progress update progress_print_interval : float How often (in seconds) to print progress. Set to None to disable. Returns ------- list of `Class1NeuralNetwork` """ allele = mhcnames.normalize_allele_name(allele) if allele not in self.allele_to_allele_specific_models: self.allele_to_allele_specific_models[allele] = [] encodable_peptides = EncodableSequences.create(peptides) peptides_affinities_inequalities_per_round = [ (encodable_peptides, affinities, inequalities) ] if train_rounds is not None: for round in sorted(set(train_rounds)): round_mask = train_rounds > round if round_mask.any(): sub_encodable_peptides = EncodableSequences.create( encodable_peptides.sequences[round_mask]) peptides_affinities_inequalities_per_round.append(( sub_encodable_peptides, affinities[round_mask], None if inequalities is None else inequalities[round_mask])) n_rounds = len(peptides_affinities_inequalities_per_round) n_architectures = len(architecture_hyperparameters_list) # Adjust progress info to indicate number of models and # architectures. pieces = [] if n_models > 1: pieces.append("Model {model_num:2d} / {n_models:2d}") if n_architectures > 1: pieces.append( "Architecture {architecture_num:2d} / {n_architectures:2d}") if len(peptides_affinities_inequalities_per_round) > 1: pieces.append("Round {round:2d} / {n_rounds:2d}") pieces.append("{n_peptides:4d} peptides") progress_preamble_template = "[ %s ] {user_progress_preamble}" % ( ", ".join(pieces)) models = [] for model_num in range(n_models): for (architecture_num, architecture_hyperparameters) in enumerate( architecture_hyperparameters_list): model = Class1NeuralNetwork(**architecture_hyperparameters) for round_num in range(n_rounds): (round_peptides, round_affinities, round_inequalities) = ( peptides_affinities_inequalities_per_round[round_num] ) model.fit( round_peptides, round_affinities, inequalities=round_inequalities, verbose=verbose, progress_preamble=progress_preamble_template.format( n_peptides=len(round_peptides), round=round_num, n_rounds=n_rounds, user_progress_preamble=progress_preamble, model_num=model_num + 1, n_models=n_models, architecture_num=architecture_num + 1, n_architectures=n_architectures), progress_print_interval=progress_print_interval) model_name = self.model_name(allele, model_num) row = pandas.Series(collections.OrderedDict([ ("model_name", model_name), ("allele", allele), ("config_json", json.dumps(model.get_config())), ("model", model), ])).to_frame().T self._manifest_df = pandas.concat( [self.manifest_df, row], ignore_index=True) self.allele_to_allele_specific_models[allele].append(model) if models_dir_for_save: self.save( models_dir_for_save, model_names_to_write=[model_name]) models.append(model) self.clear_cache() return models
Fit one or more allele specific predictors for a single allele using one or more neural network architectures. The new predictors are saved in the Class1AffinityPredictor instance and will be used on subsequent calls to `predict`. Parameters ---------- n_models : int Number of neural networks to fit architecture_hyperparameters_list : list of dict List of hyperparameter sets. allele : string peptides : `EncodableSequences` or list of string affinities : list of float nM affinities inequalities : list of string, each element one of ">", "<", or "=" See Class1NeuralNetwork.fit for details. train_rounds : sequence of int Each training point i will be used on training rounds r for which train_rounds[i] > r, r >= 0. models_dir_for_save : string, optional If specified, the Class1AffinityPredictor is (incrementally) written to the given models dir after each neural network is fit. verbose : int Keras verbosity progress_preamble : string Optional string of information to include in each progress update progress_print_interval : float How often (in seconds) to print progress. Set to None to disable. Returns ------- list of `Class1NeuralNetwork`
def _convert_file_records(self, file_records): """ Apply _notebook_model_from_db or _file_model_from_db to each entry in file_records, depending on the result of `guess_type`. """ for record in file_records: type_ = self.guess_type(record['name'], allow_directory=False) if type_ == 'notebook': yield self._notebook_model_from_db(record, False) elif type_ == 'file': yield self._file_model_from_db(record, False, None) else: self.do_500("Unknown file type %s" % type_)
Apply _notebook_model_from_db or _file_model_from_db to each entry in file_records, depending on the result of `guess_type`.
def update_assessment_taken(self, assessment_taken_form): """Updates an existing assessment taken. arg: assessment_taken_form (osid.assessment.AssessmentTakenForm): the form containing the elements to be updated raise: IllegalState - ``assessment_taken_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``assessment_taken_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_offered_form`` did not originate from ``get_assessment_taken_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.update_resource_template collection = JSONClientValidated('assessment', collection='AssessmentTaken', runtime=self._runtime) if not isinstance(assessment_taken_form, ABCAssessmentTakenForm): raise errors.InvalidArgument('argument type is not an AssessmentTakenForm') if not assessment_taken_form.is_for_update(): raise errors.InvalidArgument('the AssessmentTakenForm is for update only, not create') try: if self._forms[assessment_taken_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('assessment_taken_form already used in an update transaction') except KeyError: raise errors.Unsupported('assessment_taken_form did not originate from this session') if not assessment_taken_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(assessment_taken_form._my_map) self._forms[assessment_taken_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: return objects.AssessmentTaken( osid_object_map=assessment_taken_form._my_map, runtime=self._runtime, proxy=self._proxy)
Updates an existing assessment taken. arg: assessment_taken_form (osid.assessment.AssessmentTakenForm): the form containing the elements to be updated raise: IllegalState - ``assessment_taken_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``assessment_taken_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_offered_form`` did not originate from ``get_assessment_taken_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
def _actionsFreqs(self,*args,**kwargs): """ NAME: actionsFreqs (_actionsFreqs) PURPOSE: evaluate the actions and frequencies (jr,lz,jz,Omegar,Omegaphi,Omegaz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based) ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT) _firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object OUTPUT: (jr,lz,jz,Omegar,Omegaphi,Omegaz) HISTORY: 2013-09-10 - Written - Bovy (IAS) """ acfs= self._actionsFreqsAngles(*args,**kwargs) return (acfs[0],acfs[1],acfs[2],acfs[3],acfs[4],acfs[5])
NAME: actionsFreqs (_actionsFreqs) PURPOSE: evaluate the actions and frequencies (jr,lz,jz,Omegar,Omegaphi,Omegaz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based) ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT) _firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object OUTPUT: (jr,lz,jz,Omegar,Omegaphi,Omegaz) HISTORY: 2013-09-10 - Written - Bovy (IAS)
def monkeycache(apis): """ Feed this a dictionary of api bananas, it spits out processed cache """ if isinstance(type(apis), type(None)) or apis is None: return {} verbs = set() cache = {} cache['count'] = apis['count'] cache['asyncapis'] = [] apilist = apis['api'] if apilist is None: print("[monkeycache] Server response issue, no apis found") for api in apilist: name = getvalue(api, 'name') verb, subject = splitverbsubject(name) apidict = {} apidict['name'] = name apidict['description'] = getvalue(api, 'description') apidict['isasync'] = getvalue(api, 'isasync') if apidict['isasync']: cache['asyncapis'].append(name) apidict['related'] = splitcsvstring(getvalue(api, 'related')) required = [] apiparams = [] for param in getvalue(api, 'params'): apiparam = {} apiparam['name'] = getvalue(param, 'name') apiparam['description'] = getvalue(param, 'description') apiparam['required'] = (getvalue(param, 'required') is True) apiparam['length'] = int(getvalue(param, 'length')) apiparam['type'] = getvalue(param, 'type') apiparam['related'] = splitcsvstring(getvalue(param, 'related')) if apiparam['required']: required.append(apiparam['name']) apiparams.append(apiparam) apidict['requiredparams'] = required apidict['params'] = apiparams if verb not in cache: cache[verb] = {} cache[verb][subject] = apidict verbs.add(verb) cache['verbs'] = list(verbs) return cache
Feed this a dictionary of api bananas, it spits out processed cache
def nt2codon_rep(ntseq): """Represent nucleotide sequence by sequence of codon symbols. 'Translates' the nucleotide sequence into a symbolic representation of 'amino acids' where each codon gets its own unique character symbol. These characters should be reserved only for representing the 64 individual codons --- note that this means it is important that this function matches the corresponding function in the preprocess script and that any custom alphabet does not use these symbols. Defining symbols for each individual codon allows for Pgen computation of inframe nucleotide sequences. Parameters ---------- ntseq : str A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be 'translated' into the codon - symbol representation. Can be either uppercase or lowercase, but only composed of A, C, G, or T. Returns ------- codon_rep : str The codon - symbolic representation of ntseq. Note that if len(ntseq) == 3L --> len(codon_rep) == L Example -------- >>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC') '\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f' """ # nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3} #Use single characters not in use to represent each individual codon --- this function is called in constructing the codon dictionary codon_rep ='\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf' return ''.join([codon_rep[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])
Represent nucleotide sequence by sequence of codon symbols. 'Translates' the nucleotide sequence into a symbolic representation of 'amino acids' where each codon gets its own unique character symbol. These characters should be reserved only for representing the 64 individual codons --- note that this means it is important that this function matches the corresponding function in the preprocess script and that any custom alphabet does not use these symbols. Defining symbols for each individual codon allows for Pgen computation of inframe nucleotide sequences. Parameters ---------- ntseq : str A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be 'translated' into the codon - symbol representation. Can be either uppercase or lowercase, but only composed of A, C, G, or T. Returns ------- codon_rep : str The codon - symbolic representation of ntseq. Note that if len(ntseq) == 3L --> len(codon_rep) == L Example -------- >>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC') '\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f'
def id(self, id): """ Sets the id of this Shift. UUID for this object :param id: The id of this Shift. :type: str """ if id is None: raise ValueError("Invalid value for `id`, must not be `None`") if len(id) > 255: raise ValueError("Invalid value for `id`, length must be less than `255`") self._id = id
Sets the id of this Shift. UUID for this object :param id: The id of this Shift. :type: str
def toarray(vari): """ Convert polynomial array into a numpy.asarray of polynomials. Args: vari (Poly, numpy.ndarray): Input data. Returns: (numpy.ndarray): A numpy array with ``Q.shape==A.shape``. Examples: >>> poly = cp.prange(3) >>> print(poly) [1, q0, q0^2] >>> array = cp.toarray(poly) >>> print(isinstance(array, numpy.ndarray)) True >>> print(array[1]) q0 """ if isinstance(vari, Poly): shape = vari.shape out = numpy.asarray( [{} for _ in range(numpy.prod(shape))], dtype=object ) core = vari.A.copy() for key in core.keys(): core[key] = core[key].flatten() for i in range(numpy.prod(shape)): if not numpy.all(core[key][i] == 0): out[i][key] = core[key][i] for i in range(numpy.prod(shape)): out[i] = Poly(out[i], vari.dim, (), vari.dtype) out = out.reshape(shape) return out return numpy.asarray(vari)
Convert polynomial array into a numpy.asarray of polynomials. Args: vari (Poly, numpy.ndarray): Input data. Returns: (numpy.ndarray): A numpy array with ``Q.shape==A.shape``. Examples: >>> poly = cp.prange(3) >>> print(poly) [1, q0, q0^2] >>> array = cp.toarray(poly) >>> print(isinstance(array, numpy.ndarray)) True >>> print(array[1]) q0
def __on_download_progress_update(self, blocknum, blocksize, totalsize): """ Prints some download progress information :param blocknum: :param blocksize: :param totalsize: :return: """ if not self.__show_download_progress: return readsofar = blocknum * blocksize if totalsize > 0: s = "\r%s / %s" % (size(readsofar), size(totalsize)) sys.stdout.write(s) if readsofar >= totalsize: # near the end sys.stderr.write("\r") else: # total size is unknown sys.stdout.write("\rread %s" % (size(readsofar)))
Prints some download progress information :param blocknum: :param blocksize: :param totalsize: :return:
def create_equipamento_roteiro(self): """Get an instance of equipamento_roteiro services facade.""" return EquipamentoRoteiro( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of equipamento_roteiro services facade.
def get_times_modified(self): """ :returns: The total number of times increment_times_modified has been called for this resource by all processes. :rtype: int """ times_modified = self.conn.client.get(self.times_modified_key) if times_modified is None: return 0 return int(times_modified)
:returns: The total number of times increment_times_modified has been called for this resource by all processes. :rtype: int
def faces_to_path(mesh, face_ids=None, **kwargs): """ Given a mesh and face indices find the outline edges and turn them into a Path3D. Parameters --------- mesh : trimesh.Trimesh Triangulated surface in 3D face_ids : (n,) int Indexes referencing mesh.faces Returns --------- kwargs : dict Kwargs for Path3D constructor """ if face_ids is None: edges = mesh.edges_sorted else: # take advantage of edge ordering to index as single row edges = mesh.edges_sorted.reshape( (-1, 6))[face_ids].reshape((-1, 2)) # an edge which occurs onely once is on the boundary unique_edges = grouping.group_rows( edges, require_count=1) # add edges and vertices to kwargs kwargs.update(edges_to_path(edges=edges[unique_edges], vertices=mesh.vertices)) return kwargs
Given a mesh and face indices find the outline edges and turn them into a Path3D. Parameters --------- mesh : trimesh.Trimesh Triangulated surface in 3D face_ids : (n,) int Indexes referencing mesh.faces Returns --------- kwargs : dict Kwargs for Path3D constructor
def mkrngs(self): """ Transform boolean arrays into list of limit pairs. Gets Time limits of signal/background boolean arrays and stores them as sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in the analyse object. """ bbool = bool_2_indices(self.bkg) if bbool is not None: self.bkgrng = self.Time[bbool] else: self.bkgrng = [[np.nan, np.nan]] sbool = bool_2_indices(self.sig) if sbool is not None: self.sigrng = self.Time[sbool] else: self.sigrng = [[np.nan, np.nan]] tbool = bool_2_indices(self.trn) if tbool is not None: self.trnrng = self.Time[tbool] else: self.trnrng = [[np.nan, np.nan]] self.ns = np.zeros(self.Time.size) n = 1 for i in range(len(self.sig) - 1): if self.sig[i]: self.ns[i] = n if self.sig[i] and ~self.sig[i + 1]: n += 1 self.n = int(max(self.ns)) # record number of traces return
Transform boolean arrays into list of limit pairs. Gets Time limits of signal/background boolean arrays and stores them as sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in the analyse object.
def unvectorize_args(fn): """ See Also -------- revrand.utils.decorators.vectorize_args Examples -------- The Rosenbrock function is commonly used as a performance test problem for optimization algorithms. It and its derivatives are included in `scipy.optimize` and is implemented as expected by the family of optimization methods in `scipy.optimize`. def rosen(x): return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0) This representation makes it unwieldy to perform operations such as plotting since it is less straightforward to evaluate the function on a `meshgrid`. This decorator helps reconcile the differences between these representations. >>> from scipy.optimize import rosen >>> rosen(np.array([0.5, 1.5])) 156.5 >>> unvectorize_args(rosen)(0.5, 1.5) ... # doctest: +NORMALIZE_WHITESPACE 156.5 The `rosen` function is implemented in such a way that it generalizes to the Rosenbrock function of any number of variables. This decorator supports can support any functions defined in a similar manner. The function with any number of arguments are well-defined: >>> rosen(np.array([0.5, 1.5, 1., 0., 0.2])) 418.0 >>> unvectorize_args(rosen)(0.5, 1.5, 1., 0., 0.2) ... # can accept any variable number of arguments! 418.0 Make it easier to work with for other operations >>> rosen_ = unvectorize_args(rosen) >>> y, x = np.mgrid[0:2.1:0.05, -1:1.2:0.05] >>> z = rosen_(x, y) >>> z.round(2) array([[ 104. , 85.25, 69.22, ..., 121.55, 146.42, 174.92], [ 94.25, 76.48, 61.37, ..., 110.78, 134.57, 161.95], [ 85. , 68.2 , 54.02, ..., 100.5 , 123.22, 149.47], ..., [ 94.25, 113.53, 133.57, ..., 71.83, 54.77, 39.4 ], [ 104. , 124.25, 145.22, ..., 80.55, 62.42, 45.92], [ 114.25, 135.48, 157.37, ..., 89.78, 70.57, 52.95]]) Now this can be directly plotted with `mpl_toolkits.mplot3d.Axes3D` and `ax.plot_surface`. """ @wraps(fn) def new_fn(*args): return fn(np.asarray(args)) return new_fn
See Also -------- revrand.utils.decorators.vectorize_args Examples -------- The Rosenbrock function is commonly used as a performance test problem for optimization algorithms. It and its derivatives are included in `scipy.optimize` and is implemented as expected by the family of optimization methods in `scipy.optimize`. def rosen(x): return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0) This representation makes it unwieldy to perform operations such as plotting since it is less straightforward to evaluate the function on a `meshgrid`. This decorator helps reconcile the differences between these representations. >>> from scipy.optimize import rosen >>> rosen(np.array([0.5, 1.5])) 156.5 >>> unvectorize_args(rosen)(0.5, 1.5) ... # doctest: +NORMALIZE_WHITESPACE 156.5 The `rosen` function is implemented in such a way that it generalizes to the Rosenbrock function of any number of variables. This decorator supports can support any functions defined in a similar manner. The function with any number of arguments are well-defined: >>> rosen(np.array([0.5, 1.5, 1., 0., 0.2])) 418.0 >>> unvectorize_args(rosen)(0.5, 1.5, 1., 0., 0.2) ... # can accept any variable number of arguments! 418.0 Make it easier to work with for other operations >>> rosen_ = unvectorize_args(rosen) >>> y, x = np.mgrid[0:2.1:0.05, -1:1.2:0.05] >>> z = rosen_(x, y) >>> z.round(2) array([[ 104. , 85.25, 69.22, ..., 121.55, 146.42, 174.92], [ 94.25, 76.48, 61.37, ..., 110.78, 134.57, 161.95], [ 85. , 68.2 , 54.02, ..., 100.5 , 123.22, 149.47], ..., [ 94.25, 113.53, 133.57, ..., 71.83, 54.77, 39.4 ], [ 104. , 124.25, 145.22, ..., 80.55, 62.42, 45.92], [ 114.25, 135.48, 157.37, ..., 89.78, 70.57, 52.95]]) Now this can be directly plotted with `mpl_toolkits.mplot3d.Axes3D` and `ax.plot_surface`.
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs): """ Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date`` and an ``end_date``, to only get metrics within that time range. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Get the specified metrics after this date :param end_date: Get the sepcified metrics before this date :return: The count for the metric, 0 otherwise """ result = None if start_date and end_date: start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,) start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time()) end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time()) monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date)) #We can sorta optimize this by getting most of the data by month if len(monthly_metrics_dates) >= 3: with self._analytics_backend.map() as conn: monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts( conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date) monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results) starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results) ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results) result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values()) else: diff = end_date - start_date metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1) result = sum(metric_results[1].values()) else: try: result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,))) except TypeError: result = 0 return result
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date`` and an ``end_date``, to only get metrics within that time range. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Get the specified metrics after this date :param end_date: Get the sepcified metrics before this date :return: The count for the metric, 0 otherwise
def start(self, labels=None): """Start specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be started. If it is ``None``, start the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`. """ # Default label is self.dfltlbl if labels is None: labels = self.dfltlbl # If label is not a list or tuple, create a singleton list # containing it if not isinstance(labels, (list, tuple)): labels = [labels,] # Iterate over specified label(s) t = timer() for lbl in labels: # On first call to start for a label, set its accumulator to zero if lbl not in self.td: self.td[lbl] = 0.0 self.t0[lbl] = None # Record the time at which start was called for this lbl if # it isn't already running if self.t0[lbl] is None: self.t0[lbl] = t
Start specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be started. If it is ``None``, start the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`.
def split_semicolon(line, maxsplit=None): r"""Split a line on semicolons characters but not on the escaped semicolons :param line: line to split :type line: str :param maxsplit: maximal number of split (if None, no limit) :type maxsplit: None | int :return: split line :rtype: list >>> split_semicolon('a,b;c;;g') ['a,b', 'c', '', 'g'] >>> split_semicolon('a,b;c;;g', 2) ['a,b', 'c', ';g'] >>> split_semicolon(r'a,b;c\;;g', 2) ['a,b', 'c;', 'g'] """ # Split on ';' character split_line = line.split(';') split_line_size = len(split_line) # if maxsplit is not specified, we set it to the number of part if maxsplit is None or maxsplit < 0: maxsplit = split_line_size # Join parts to the next one, if ends with a '\' # because we mustn't split if the semicolon is escaped i = 0 while i < split_line_size - 1: # for each part, check if its ends with a '\' ends = split_line[i].endswith('\\') if ends: # remove the last character '\' split_line[i] = split_line[i][:-1] # append the next part to the current if it is not the last and the current # ends with '\' or if there is more than maxsplit parts if (ends or i >= maxsplit) and i < split_line_size - 1: split_line[i] = ";".join([split_line[i], split_line[i + 1]]) # delete the next part del split_line[i + 1] split_line_size -= 1 # increase i only if we don't have append because after append the new # string can end with '\' else: i += 1 return split_line
r"""Split a line on semicolons characters but not on the escaped semicolons :param line: line to split :type line: str :param maxsplit: maximal number of split (if None, no limit) :type maxsplit: None | int :return: split line :rtype: list >>> split_semicolon('a,b;c;;g') ['a,b', 'c', '', 'g'] >>> split_semicolon('a,b;c;;g', 2) ['a,b', 'c', ';g'] >>> split_semicolon(r'a,b;c\;;g', 2) ['a,b', 'c;', 'g']
def _convert_unsigned(data, fmt): """Convert data from signed to unsigned in bulk.""" num = len(data) return struct.unpack( "{}{}".format(num, fmt.upper()).encode("utf-8"), struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data) )
Convert data from signed to unsigned in bulk.