Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
371,500
def _fetch_all(cls, api_key, endpoint=None, offset=0, limit=25, **kwargs): output = [] qp = kwargs.copy() limit = max(1, min(100, limit)) maximum = kwargs.get() qp[] = min(limit, maximum) if maximum is not None else limit qp[] = offset more, total = None, None while True: entities, options = cls._fetch_page( api_key=api_key, endpoint=endpoint, **qp ) output += entities more = options.get() limit = options.get() offset = options.get() total = options.get() if more is None: if total is None or offset is None: break more = (limit + offset) < total if not more or (maximum is not None and len(output) >= maximum): break qp[] = limit qp[] = offset + limit return output
Call `self._fetch_page` for as many pages as exist. TODO: should be extended to do async page fetches if API allows it via exposing total value. Returns a list of `cls` instances.
371,501
def random(cls, length, bit_prob=.5): assert isinstance(length, int) and length >= 0 assert isinstance(bit_prob, (int, float)) and 0 <= bit_prob <= 1 bits = numpy.random.choice( [False, True], size=(length,), p=[1-bit_prob, bit_prob] ) bits.flags.writeable = False return cls(bits)
Create a bit string of the given length, with the probability of each bit being set equal to bit_prob, which defaults to .5. Usage: # Create a random BitString of length 10 with mostly zeros. bits = BitString.random(10, bit_prob=.1) Arguments: length: An int, indicating the desired length of the result. bit_prob: A float in the range [0, 1]. This is the probability of any given bit in the result having a value of 1; default is .5, giving 0 and 1 equal probabilities of appearance for each bit's value. Return: A randomly generated BitString instance of the requested length.
371,502
def to_wire_dict (self): return dict(valid=self.valid, extern=self.extern[0], result=self.result, warnings=self.warnings[:], name=self.name or u"", title=self.get_title(), parent_url=self.parent_url or u"", base_ref=self.base_ref or u"", base_url=self.base_url or u"", url=self.url or u"", domain=(self.urlparts[1] if self.urlparts else u""), checktime=self.checktime, dltime=self.dltime, size=self.size, info=self.info, line=self.line, column=self.column, page=self.page, cache_url=self.cache_url, content_type=self.content_type, level=self.recursion_level, modified=self.modified, )
Return a simplified transport object for logging and caching. The transport object must contain these attributes: - url_data.valid: bool Indicates if URL is valid - url_data.result: unicode Result string - url_data.warnings: list of tuples (tag, warning message) List of tagged warnings for this URL. - url_data.name: unicode string or None name of URL (eg. filename or link name) - url_data.parent_url: unicode or None Parent URL - url_data.base_ref: unicode HTML base reference URL of parent - url_data.url: unicode Fully qualified URL. - url_data.domain: unicode URL domain part. - url_data.checktime: int Number of seconds needed to check this link, default: zero. - url_data.dltime: int Number of seconds needed to download URL content, default: -1 - url_data.size: int Size of downloaded URL content, default: -1 - url_data.info: list of unicode Additional information about this URL. - url_data.line: int Line number of this URL at parent document, or -1 - url_data.column: int Column number of this URL at parent document, or -1 - url_data.page: int Page number of this URL at parent document, or -1 - url_data.cache_url: unicode Cache url for this URL. - url_data.content_type: unicode MIME content type for URL content. - url_data.level: int Recursion level until reaching this URL from start URL - url_data.last_modified: datetime Last modification date of retrieved page (or None).
371,503
def _read(self, stream, text, byte_order): dtype = self.dtype(byte_order) if text: self._read_txt(stream) elif _can_mmap(stream) and not self._have_list: num_bytes = self.count * dtype.itemsize offset = stream.tell() stream.seek(0, 2) max_bytes = stream.tell() - offset if max_bytes < num_bytes: raise PlyElementParseError("early end-of-file", self, max_bytes // dtype.itemsize) self._data = _np.memmap(stream, dtype, , offset, self.count) stream.seek(offset + self.count * dtype.itemsize) else: self._read_bin(stream, byte_order) self._check_sanity()
Read the actual data from a PLY file.
371,504
def free(self, connection): LOGGER.debug(, self.id, id(connection)) try: self.connection_handle(connection).free() except KeyError: raise ConnectionNotFoundError(self.id, id(connection)) if self.idle_connections == list(self.connections.values()): with self._lock: self.idle_start = self.time_method() LOGGER.debug(, self.id, id(connection))
Free the connection from use by the session that was using it. :param connection: The connection to free :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError
371,505
def get_gcd(a, b): "Return greatest common divisor for a and b." while a: a, b = b % a, a return b
Return greatest common divisor for a and b.
371,506
def _next_page(self): if self._last_page_seen: raise StopIteration new, self._last_page_seen = self.conn.query_multiple(self.object_type, self._next_page_index, self.url_params, self.query_params) self._next_page_index += 1 if len(new) == 0: self._last_page_seen = True else: self._results += new
Fetch the next page of the query.
371,507
def list_instances(self): resp = self.instance_admin_client.list_instances(self.project_path) instances = [Instance.from_pb(instance, self) for instance in resp.instances] return instances, resp.failed_locations
List instances owned by the project. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_instances] :end-before: [END bigtable_list_instances] :rtype: tuple :returns: (instances, failed_locations), where 'instances' is list of :class:`google.cloud.bigtable.instance.Instance`, and 'failed_locations' is a list of locations which could not be resolved.
371,508
def search(self, CorpNum, DType, SDate, EDate, State, ItemCode, Page, PerPage, Order, UserID=None, QString=None): if DType == None or DType == : raise PopbillException(-99999999, "μΌμžμœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if SDate == None or SDate == : raise PopbillException(-99999999, "μ‹œμž‘μΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if EDate == None or EDate == : raise PopbillException(-99999999, "μ’…λ£ŒμΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") uri = uri += + DType uri += + SDate uri += + EDate uri += + .join(State) uri += + .join(ItemCode) uri += + str(Page) uri += + str(PerPage) uri += + Order if QString is not None: uri += + QString return self._httpget(uri, CorpNum, UserID)
λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμ‹œ, W-μž‘μ„±μΌμž, I-λ°œν–‰μΌμ‹œ 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ ItemCode : λͺ…μ„Έμ„œ μ’…λ₯˜μ½”λ“œ λ°°μ—΄, 121-λͺ…μ„Έμ„œ, 122-μ²­κ΅¬μ„œ, 123-κ²¬μ μ„œ, 124-λ°œμ£Όμ„œ 125-μž…κΈˆν‘œ, 126-영수증 Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή λͺ©λ‘κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ QString : 거래처 정보, 거래처 μƒν˜Έ λ˜λŠ” μ‚¬μ—…μžλ“±λ‘λ²ˆν˜Έ 기재, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ UserID : 팝빌 νšŒμ›μ•„μ΄λ””
371,509
def _MergeEntities(self, a, b): def _MergeAgencyId(a_agency_id, b_agency_id): a_agency_id = a_agency_id or None b_agency_id = b_agency_id or None return self._MergeIdentical(a_agency_id, b_agency_id) scheme = {: _MergeAgencyId, : self._MergeIdentical, : self._MergeIdentical, : self._MergeIdentical} return self._SchemedMerge(scheme, a, b)
Merges two agencies. To be merged, they are required to have the same id, name, url and timezone. The remaining language attribute is taken from the new agency. Args: a: The first agency. b: The second agency. Returns: The merged agency. Raises: MergeError: The agencies could not be merged.
371,510
def hide_defaults(self): for k in list(self.fields.keys()): if k in self.default_fields: if self.default_fields[k] == self.fields[k]: del(self.fields[k]) self.payload.hide_defaults()
Removes fields' values that are the same as default values.
371,511
def command(self, outfile, configfile, pix): params = dict(script=self.config[][], config=configfile, outfile=outfile, nside=self.nside_likelihood, pix=pix, verbose= if self.verbose else ) cmd = %params return cmd
Generate the command for running the likelihood scan.
371,512
def find_and_modify(self, query=None, update=None): update = update or {} for document in self.find(query=query): document.update(update) self.update(document)
Finds documents in this collection that match a given query and updates them
371,513
async def write_message_data(self, data: bytes, timeout: NumType = None) -> None: data = LINE_ENDINGS_REGEX.sub(b"\r\n", data) data = PERIOD_REGEX.sub(b"..", data) if not data.endswith(b"\r\n"): data += b"\r\n" data += b".\r\n" await self.write_and_drain(data, timeout=timeout)
Encode and write email message data. Automatically quotes lines beginning with a period per RFC821. Lone \\\\r and \\\\n characters are converted to \\\\r\\\\n characters.
371,514
def get_node_values( self, feature=None, show_root=False, show_tips=False, ): ndict = self.get_node_dict(return_internal=True, return_nodes=True) nodes = [ndict[i] for i in range(self.nnodes)[::-1]] if feature: vals = [i.__getattribute__(feature) if hasattr(i, feature) else "" for i in nodes] else: vals = [" " for i in nodes] if not show_root: vals = [i if not j.is_root() else "" for i, j in zip(vals, nodes)] if not show_tips: vals = [i if not j.is_leaf() else "" for i, j in zip(vals, nodes)] try: if all([Decimal(str(i)) % 1 == 0 for i in vals if i]): vals = [int(i) if isinstance(i, float) else i for i in vals] except Exception: pass return vals
Returns node values from tree object in node plot order. To modify values you must modify the .treenode object directly by setting new 'features'. For example for node in ttree.treenode.traverse(): node.add_feature("PP", 100) By default node and tip values are hidden (set to "") so that they are not shown on the tree plot. To include values for these nodes use the 'show_root'=True, or 'show_tips'=True arguments. tree.get_node_values("support", True, True)
371,515
def get_fun(returner, fun): * returners = salt.loader.returners(__opts__, __salt__) return returners[.format(returner)](fun)
Return info about last time fun was called on each minion CLI Example: .. code-block:: bash salt '*' ret.get_fun mysql network.interfaces
371,516
def fingerprint(channel_samples: list, Fs: int = DEFAULT_FS, wsize: int = DEFAULT_WINDOW_SIZE, wratio: Union[int, float] = DEFAULT_OVERLAP_RATIO, fan_value: int = DEFAULT_FAN_VALUE, amp_min: Union[int, float] = DEFAULT_AMP_MIN)-> Iterator[tuple]: arr2D = mlab.specgram( channel_samples, NFFT=wsize, Fs=Fs, window=mlab.window_hanning, noverlap=int(wsize * wratio))[0] arr2D = 10 * np.log10(arr2D) arr2D[arr2D == -np.inf] = 0 local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min) return generate_hashes(local_maxima, fan_value=fan_value)
FFT the channel, log transform output, find local maxima, then return locally sensitive hashes. #
371,517
def set_classifier_interface_params(spec, features, class_labels, model_accessor_for_class_labels, output_features = None): features = _fm.process_or_validate_features(features) if class_labels is None: raise ValueError("List of class labels must be provided.") n_classes = len(class_labels) output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels) if len(output_features) == 1: predicted_class_output, pred_cl_type = output_features[0] score_output = None elif len(output_features) == 2: predicted_class_output, pred_cl_type = output_features[0] score_output, score_output_type = output_features[1] else: raise ValueError("Provided output classes for a classifier must be " "a list of features, predicted class and (optionally) class_score.") spec.description.predictedFeatureName = predicted_class_output if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()): raise ValueError("Provided predicted class output type not Int64 or String (%s)." % repr(pred_cl_type)) if score_output is not None: if not isinstance(score_output_type, datatypes.Dictionary): raise ValueError("Provided class score output type not a Dictionary (%s)." % repr(score_output_type)) if score_output_type.key_type != pred_cl_type: raise ValueError(("Provided class score output (%s) key_type (%s) does not " "match type of class prediction (%s).") % (score_output, repr(score_output_type.key_type), repr(pred_cl_type))) spec.description.predictedProbabilitiesName = score_output for index, (cur_input_name, input_type) in enumerate(features): input_ = spec.description.input.add() input_.name = cur_input_name datatypes._set_datatype(input_.type, input_type) for index, (cur_output_name, output_type) in enumerate(output_features): output_ = spec.description.output.add() output_.name = cur_output_name datatypes._set_datatype(output_.type, output_type) if pred_cl_type == datatypes.String(): try: for c in class_labels: getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c)) except AttributeError: pass else: for c in class_labels: conv_error = False try: if not (int(c) == c): conv_error = True except: conv_error = True if conv_error: raise TypeError(("Cannot cast class to an int type " % str(c)) + "(class type determined by type of first class).") try: getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c)) except AttributeError: break return spec
Common utilities to set the regression interface params.
371,518
def enable_asynchronous(self): def is_monkey_patched(): try: from gevent import monkey, socket except ImportError: return False if hasattr(monkey, "saved"): return "socket" in monkey.saved return gevent.socket.socket == socket.socket if not is_monkey_patched(): raise Exception("To activate asynchonoucity, please monkey patch" " the socket module with gevent") return True
Check if socket have been monkey patched by gevent
371,519
def get_extra_path(name): helper_name, _, key = name.partition(".") helper = path_helpers.get(helper_name) if not helper: raise ValueError("Helper not found.".format(helper)) if name not in path_cache: extra_paths = helper.extra_paths() path_cache.update(extra_paths) extra_path = path_cache.get(name) if not extra_path: raise ValueError("Helper has no path called {1}".format(helper_name, name)) return extra_path
:param name: name in format helper.path_name sip.default_sip_dir
371,520
def handleOACK(self, pkt): if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
This method handles an OACK from the server, syncing any accepted options.
371,521
def set_path(self, path): if os.path.isabs(path): path = os.path.normpath(os.path.join(self.cwd, path)) self.path = path self.relative = os.path.relpath(self.path, self.base)
Set the path of the file.
371,522
def snippets(self): return [strip_suffix(f, ) for f in self._stripped_files if self._snippets_pattern.match(f)]
Get all snippets in this DAP
371,523
def download(self, overwrite=True): if overwrite or not os.path.exists(self.file_path): _, f = tempfile.mkstemp() try: urlretrieve(self.DOWNLOAD_URL, f) extract_csv(f, self.file_path) finally: os.remove(f)
Download the zipcodes CSV file. If ``overwrite`` is set to False, the file won't be downloaded if it already exists.
371,524
def render(self, progress, width=None, status=None): current_pct = int(progress * 100 + 0.1) return RenderResult(rendered="%3d%%" % current_pct, next_progress=(current_pct + 1) / 100)
Render the widget.
371,525
def set_stream_stats(self, rx_ports=None, tx_ports=None, start_offset=40, sequence_checking=True, data_integrity=True, timestamp=True): if not rx_ports: rx_ports = self.ports.values() if not tx_ports: tx_ports = {} for port in self.ports.values(): tx_ports[port] = port.streams.values() groupIdOffset = start_offset signatureOffset = start_offset + 4 next_offset = start_offset + 8 if sequence_checking: sequenceNumberOffset = next_offset next_offset += 4 if data_integrity: di_signatureOffset = next_offset for port in rx_ports: modes = [] modes.append(IxeReceiveMode.widePacketGroup) port.packetGroup.groupIdOffset = groupIdOffset port.packetGroup.signatureOffset = signatureOffset if sequence_checking and int(port.isValidFeature()): modes.append(IxeReceiveMode.sequenceChecking) port.packetGroup.sequenceNumberOffset = sequenceNumberOffset if data_integrity and int(port.isValidFeature()): modes.append(IxeReceiveMode.dataIntegrity) port.dataIntegrity.signatureOffset = di_signatureOffset if timestamp and int(port.isValidFeature()): port.dataIntegrity.enableTimeStamp = True else: port.dataIntegrity.enableTimeStamp = False port.set_receive_modes(*modes) port.write() for port, streams in tx_ports.items(): for stream in streams: stream.packetGroup.insertSignature = True stream.packetGroup.groupIdOffset = groupIdOffset stream.packetGroup.signatureOffset = signatureOffset if sequence_checking: stream.packetGroup.insertSequenceSignature = True stream.packetGroup.sequenceNumberOffset = sequenceNumberOffset if data_integrity and int(port.isValidFeature()): stream.dataIntegrity.insertSignature = True stream.dataIntegrity.signatureOffset = di_signatureOffset if timestamp: stream.enableTimestamp = True else: stream.enableTimestamp = False port.write()
Set TX ports and RX streams for stream statistics. :param ports: list of ports to set RX pgs. If empty set for all ports. :type ports: list[ixexplorer.ixe_port.IxePort] :param tx_ports: list of streams to set TX pgs. If empty set for all streams. :type tx_ports: dict[ixexplorer.ixe_port.IxePort, list[ixexplorer.ixe_stream.IxeStream]] :param sequence_checking: True - enable sequence checkbox, False - disable :param data_integrity: True - enable data integrity checkbox, False - disable :param timestamp: True - enable timestamp checkbox, False - disable :param start_offset: start offset for signatures (group ID, signature, sequence)
371,526
def output_is_valid(self, process_data): if self.METADATA["data_type"] == "raster": return ( is_numpy_or_masked_array(process_data) or is_numpy_or_masked_array_with_tags(process_data) ) elif self.METADATA["data_type"] == "vector": return is_feature_list(process_data)
Check whether process output is allowed with output driver. Parameters ---------- process_data : raw process output Returns ------- True or False
371,527
def fraction(value, allow_empty = False, minimum = None, maximum = None, **kwargs): try: value = _numeric_coercion(value, coercion_function = fractions.Fraction, allow_empty = allow_empty, minimum = minimum, maximum = maximum) except (errors.EmptyValueError, errors.CannotCoerceError, errors.MinimumValueError, errors.MaximumValueError) as error: raise error except Exception as error: raise errors.CannotCoerceError( % value) return value
Validate that ``value`` is a :class:`Fraction <python:fractions.Fraction>`. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is :obj:`None <python:None>`. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is :obj:`None <python:None>`. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`Fraction <python:fractions.Fraction>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and ``allow_empty`` is ``False`` :raises MinimumValueError: if ``minimum`` is supplied and ``value`` is less than the ``minimum`` :raises MaximumValueError: if ``maximum`` is supplied and ``value`` is more than the ``maximum`` :raises CannotCoerceError: if unable to coerce ``value`` to a :class:`Fraction <python:fractions.Fraction>`
371,528
def normalize_cjk_fullwidth_ascii(seq: str) -> str: def convert(char: str) -> str: code_point = ord(char) if not 0xFF01 <= code_point <= 0xFF5E: return char return chr(code_point - 0xFEE0) return .join(map(convert, seq))
Conver fullwith ASCII to halfwidth ASCII. See https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms
371,529
def guess_payload_class(self, payload): under_layer = self.underlayer tzsp_header = None while under_layer: if isinstance(under_layer, TZSP): tzsp_header = under_layer break under_layer = under_layer.underlayer if tzsp_header: return tzsp_header.get_encapsulated_payload_class() else: raise TZSPStructureException()
the type of the payload encapsulation is given be the outer TZSP layers attribute encapsulation_protocol # noqa: E501
371,530
def _set_static_ip(name, session, vm_): ipv4_cidr = ipv4_gw = if in vm_.keys(): log.debug() ipv4_gw = vm_[] if in vm_.keys(): log.debug() ipv4_cidr = vm_[] log.debug() set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
Set static IP during create() if defined
371,531
def _check_chn_type(channels, available_channels): chn_list_standardized = [] devices = list(available_channels.keys()) for dev_nbr, device in enumerate(devices): if channels is not None: sub_unit = channels[dev_nbr] for channel in sub_unit: if channel in available_channels[devices[dev_nbr]]: continue else: raise RuntimeError("At least one of the specified channels is not available in " "the acquisition file.") chn_list_standardized.append(sub_unit) else: chn_list_standardized.append(available_channels[device]) return chn_list_standardized
Function used for checking weather the elements in "channels" input are coincident with the available channels. ---------- Parameters ---------- channels : list [[mac_address_1_channel_1 <int>, mac_address_1_channel_2 <int>...], [mac_address_2_channel_1 <int>...]...] From which channels will the data be loaded. available_channels : dict Dictionary with the list of all the available channels per device. Returns ------- out : list It is returned a list of the selected channels in a standardized format.
371,532
def get_suggested_repositories(self): if self.suggested_repositories is None: repository_set = list() for term_count in range(5, 2, -1): query = self.__get_query_for_repos(term_count=term_count) repository_set.extend(self.__get_repos_for_query(query)) catchy_repos = GitSuggest.minus( repository_set, self.user_starred_repositories ) filtered_repos = [] if len(catchy_repos) > 0: for repo in catchy_repos: if ( repo is not None and repo.description is not None and len(repo.description) <= GitSuggest.MAX_DESC_LEN ): filtered_repos.append(repo) filtered_repos = sorted( filtered_repos, key=attrgetter("stargazers_count"), reverse=True, ) self.suggested_repositories = GitSuggest.get_unique_repositories( filtered_repos ) for repository in self.suggested_repositories: yield repository
Method to procure suggested repositories for the user. :return: Iterator to procure suggested repositories for the user.
371,533
def get_instruments(self, name=None): if name: return self.get_instruments_by_name(name) return sorted( self._instruments.items(), key=lambda s: s[0].lower())
:returns: sorted list of (mount, instrument)
371,534
def load_vocab(vocab_file): vocab = collections.OrderedDict() index = 0 with io.open(vocab_file, ) as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab
Loads a vocabulary file into a dictionary.
371,535
def find_npolfile(flist,detector,filters): npolfile = None for f in flist: fdet = fits.getval(f, , memmap=False) if fdet == detector: filt1 = fits.getval(f, , memmap=False) filt2 = fits.getval(f, , memmap=False) fdate = fits.getval(f, , memmap=False) if filt1 == or \ (filt1 == filters[0] and filt2 == filters[1]): npolfile = f return npolfile
Search a list of files for one that matches the configuration of detector and filters used.
371,536
def import_eit_fzj(self, filename, configfile, correction_file=None, timestep=None, **kwargs): df_emd, dummy1, dummy2 = eit_fzj.read_3p_data( filename, configfile, **kwargs ) if correction_file is not None: eit_fzj_utils.apply_correction_factors(df_emd, correction_file) if timestep is not None: df_emd[] = timestep self._add_to_container(df_emd) print() self._describe_data(df_emd)
EIT data import for FZJ Medusa systems
371,537
def strip_metadata(report): report[] = report[][] report[] = report[][] report[] = report[][] report.pop() return report
Duplicates org_name, org_email and report_id into JSON root and removes report_metadata key to bring it more inline with Elastic output.
371,538
def take_function_register(self, rtype = SharedData.TYPES.NO_TYPE): reg = SharedData.FUNCTION_REGISTER if reg not in self.free_registers: self.error("function register already taken") self.free_registers.remove(reg) self.used_registers.append(reg) self.symtab.set_type(reg, rtype) return reg
Reserves register for function return value and sets its type
371,539
def merge_validator_config(configs): bind_network = None bind_component = None bind_consensus = None endpoint = None peering = None seeds = None peers = None network_public_key = None network_private_key = None scheduler = None permissions = None roles = None opentsdb_url = None opentsdb_db = None opentsdb_username = None opentsdb_password = None minimum_peer_connectivity = None maximum_peer_connectivity = None state_pruning_block_depth = None fork_cache_keep_time = None component_thread_pool_workers = None network_thread_pool_workers = None signature_thread_pool_workers = None for config in reversed(configs): if config.bind_network is not None: bind_network = config.bind_network if config.bind_component is not None: bind_component = config.bind_component if config.bind_consensus is not None: bind_consensus = config.bind_consensus if config.endpoint is not None: endpoint = config.endpoint if config.peering is not None: peering = config.peering if config.seeds is not None: seeds = config.seeds if config.peers is not None: peers = config.peers if config.network_public_key is not None: network_public_key = config.network_public_key if config.network_private_key is not None: network_private_key = config.network_private_key if config.scheduler is not None: scheduler = config.scheduler if config.permissions is not None or config.permissions == {}: permissions = config.permissions if config.roles is not None: roles = config.roles if config.opentsdb_url is not None: opentsdb_url = config.opentsdb_url if config.opentsdb_db is not None: opentsdb_db = config.opentsdb_db if config.opentsdb_username is not None: opentsdb_username = config.opentsdb_username if config.opentsdb_password is not None: opentsdb_password = config.opentsdb_password if config.minimum_peer_connectivity is not None: minimum_peer_connectivity = config.minimum_peer_connectivity if config.maximum_peer_connectivity is not None: maximum_peer_connectivity = config.maximum_peer_connectivity if config.state_pruning_block_depth is not None: state_pruning_block_depth = config.state_pruning_block_depth if config.fork_cache_keep_time is not None: fork_cache_keep_time = config.fork_cache_keep_time if config.component_thread_pool_workers is not None: component_thread_pool_workers = \ config.component_thread_pool_workers if config.network_thread_pool_workers is not None: network_thread_pool_workers = \ config.network_thread_pool_workers if config.signature_thread_pool_workers is not None: signature_thread_pool_workers = \ config.signature_thread_pool_workers return ValidatorConfig( bind_network=bind_network, bind_component=bind_component, bind_consensus=bind_consensus, endpoint=endpoint, peering=peering, seeds=seeds, peers=peers, network_public_key=network_public_key, network_private_key=network_private_key, scheduler=scheduler, permissions=permissions, roles=roles, opentsdb_url=opentsdb_url, opentsdb_db=opentsdb_db, opentsdb_username=opentsdb_username, opentsdb_password=opentsdb_password, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, state_pruning_block_depth=state_pruning_block_depth, fork_cache_keep_time=fork_cache_keep_time, component_thread_pool_workers=component_thread_pool_workers, network_thread_pool_workers=network_thread_pool_workers, signature_thread_pool_workers=signature_thread_pool_workers )
Given a list of ValidatorConfig objects, merges them into a single ValidatorConfig, giving priority in the order of the configs (first has highest priority).
371,540
def split_input(cls, mapper_spec): shard_count = mapper_spec.shard_count query_spec = cls._get_query_spec(mapper_spec) if not property_range.should_shard_by_property_range(query_spec.filters): return super(DatastoreInputReader, cls).split_input(mapper_spec) oversplit_factor = query_spec.oversplit_factor oversplit_shard_count = oversplit_factor * shard_count p_range = property_range.PropertyRange(query_spec.filters, query_spec.model_class_path) p_ranges = p_range.split(oversplit_shard_count) if query_spec.ns is not None: ns_range = namespace_range.NamespaceRange( namespace_start=query_spec.ns, namespace_end=query_spec.ns, _app=query_spec.app) ns_ranges = [copy.copy(ns_range) for _ in p_ranges] else: ns_keys = namespace_range.get_namespace_keys( query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1) if not ns_keys: return if len(iters) > shard_count: iters = [ db_iters.RangeIteratorFactory.create_multi_property_range_iterator( [iters[i] for i in xrange(start_index, len(iters), shard_count)] ) for start_index in xrange(shard_count) ] return [cls(i) for i in iters]
Inherit docs.
371,541
def generate_random_128bit_string(): t = int(time.time()) lower_96 = random.getrandbits(96) return .format((t << 96) | lower_96)
Returns a 128 bit UTF-8 encoded string. Follows the same conventions as generate_random_64bit_string(). The upper 32 bits are the current time in epoch seconds, and the lower 96 bits are random. This allows for AWS X-Ray `interop <https://github.com/openzipkin/zipkin/issues/1754>`_ :returns: 32-character hex string
371,542
def get_auth(): import getpass from requests.auth import HTTPDigestAuth input_func = input try: input_func = raw_input except NameError: pass uname = input_func("MODSCAG Username:") pw = getpass.getpass("MODSCAG Password:") auth = HTTPDigestAuth(uname, pw) return auth
Get authorization token for https
371,543
def exclude(*what): cls, attrs = _split_what(what) def exclude_(attribute, value): return value.__class__ not in cls and attribute not in attrs return exclude_
Blacklist *what*. :param what: What to blacklist. :type what: :class:`list` of classes or :class:`attr.Attribute`\\ s. :rtype: :class:`callable`
371,544
def as_action_description(self): description = { self.name: { : self.href_prefix + self.href, : self.time_requested, : self.status, }, } if self.input is not None: description[self.name][] = self.input if self.time_completed is not None: description[self.name][] = self.time_completed return description
Get the action description. Returns a dictionary describing the action.
371,545
def is_dn(s): if s == : return True rm = DN_REGEX.match(s) return rm is not None and rm.group(0) == s
Return True if s is a LDAP DN.
371,546
def _residual_soil(self): return self.catchment.descriptors.bfihost \ + 1.3 * (0.01 * self.catchment.descriptors.sprhost) \ - 0.987
Methodology source: FEH, Vol. 3, p. 14
371,547
def decode(message): try: data = json.loads(message.payload.decode("utf-8")) except ValueError as e: raise InvalidEventException( % (message.payload, str(e))) timestamp = datetime.now(pytz.timezone("UTC")) return Message(data, timestamp)
Convert a generic JSON message * The entire message is converted to JSON and treated as the message data * The timestamp of the message is the time that the message is RECEIVED
371,548
def setup_admin_on_rest_handlers(admin, admin_handler): add_route = admin.router.add_route add_static = admin.router.add_static static_folder = str(PROJ_ROOT / ) a = admin_handler add_route(, , a.index_page, name=) add_route(, , a.token, name=) add_static(, path=static_folder, name=) add_route(, , a.logout, name=)
Initialize routes.
371,549
def plot_2(data, *args): df_all = pd.DataFrame(data) df_params = nonconstant_parameters(data) x = [df_all[][0]] y = [df_all[][0]] params = [df_params.loc[0]] for i in range(len(df_all)): if df_all[][i] > y[-1]: x.append(df_all[][i]) y.append(df_all[][i]) params.append(df_params.loc[i]) return build_scatter_tooltip( x=x, y=y, tt=pd.DataFrame(params), title=)
Plot 2. Running best score (scatter plot)
371,550
def get_insight(self, project_key, insight_id, **kwargs): try: project_owner, project_id = parse_dataset_key(project_key) return self._insights_api.get_insight(project_owner, project_id, insight_id, **kwargs).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Retrieve an insight :param project_key: Project identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :returns: Insight definition, with all attributes :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> insight = api_client.get_insight( ... 'jonloyens/' ... 'an-example-project-that-shows-what-to-put-in-data-world', ... 'c2538b0c-c200-474c-9631-5ff4f13026eb') # doctest: +SKIP >>> insight['title'] # doctest: +SKIP 'Coast Guard Lives Saved by Fiscal Year'
371,551
def find_files(sequencepath): files = sorted(glob(os.path.join(sequencepath, ))) return files
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files
371,552
def create_shipping_address(self, shipping_address): url = urljoin(self._url, ) return shipping_address.post(url)
Creates a shipping address on an existing account. If you are creating an account, you can embed the shipping addresses with the request
371,553
def datetime(self): index = self.data.index.remove_unused_levels() return pd.to_datetime(index.levels[0])
εˆ†ι’ŸηΊΏη»“ζž„θΏ”ε›ždatetime ζ—₯ηΊΏη»“ζž„θΏ”ε›ždate
371,554
def get_dir_backup(): args = parser.parse_args() s3_get_dir_backup( args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name, args.s3_folder, args.zip_backups_dir, args.project)
retrieves directory backup
371,555
def get_neutron_endpoint(cls, json_resp): catalog = json_resp.get(, {}).get(, []) match = neutron_endpoint = None for entry in catalog: if entry[] == match or in entry[]: valid_endpoints = {} for ep in entry[]: interface = ep.get(, ) if interface in [, ]: valid_endpoints[interface] = ep[] if valid_endpoints: neutron_endpoint = valid_endpoints.get("public", valid_endpoints.get("internal")) break else: raise MissingNeutronEndpoint() return neutron_endpoint
Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service Sends a CRITICAL service check when none are found registered in the Catalog
371,556
def redirect(self, pid): if not (self.is_registered() or self.is_redirected()): raise PIDInvalidAction("Persistent identifier is not registered.") try: with db.session.begin_nested(): if self.is_redirected(): r = Redirect.query.get(self.object_uuid) r.pid = pid else: with db.session.begin_nested(): r = Redirect(pid=pid) db.session.add(r) self.status = PIDStatus.REDIRECTED self.object_type = None self.object_uuid = r.id db.session.add(self) except IntegrityError: raise PIDDoesNotExistError(pid.pid_type, pid.pid_value) except SQLAlchemyError: logger.exception("Failed to redirect to {0}".format( pid), extra=dict(pid=self)) raise logger.info("Redirected PID to {0}".format(pid), extra=dict(pid=self)) return True
Redirect persistent identifier to another persistent identifier. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` where redirect the PID. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not registered or is not already redirecting to another PID. :raises invenio_pidstore.errors.PIDDoesNotExistError: If PID is not found. :returns: `True` if the PID is successfully redirect.
371,557
def artist(self, spotify_id): route = Route(, , spotify_id=spotify_id) return self.request(route)
Get a spotify artist by their ID. Parameters ---------- spotify_id : str The spotify_id to search by.
371,558
def ast_scan_file(filename, re_fallback=True): try: with io.open(filename, ) as fp: try: root = ast.parse(fp.read(), filename=filename) except (SyntaxError, IndentationError): if re_fallback: log.debug() return _ast_scan_file_re(filename) else: log.error(, filename) log.info(, exc_info=True) return None, None log.debug(, filename) ast_visitor.reset(filename) ast_visitor.visit(root) log.debug(, ast_visitor.import_root) return ast_visitor.scope, ast_visitor.imports except IOError: log.warn(, filename) return None, None
Scans a file for imports using AST. In addition to normal imports, try to get imports via `__import__` or `import_module` calls. The AST parser should be able to resolve simple variable assignments in cases where these functions are called with variables instead of strings.
371,559
def delete_bond(self, n, m): self.remove_edge(n, m) self.flush_cache()
implementation of bond removing
371,560
def write(self, file): w = Writer(**self.info) w.write(file, self.rows)
Write the image to the open file object. See `.save()` if you have a filename. In general, you can only call this method once; after it has been called the first time the PNG image is written, the source data will have been streamed, and cannot be streamed again.
371,561
def set_default_content_type(application, content_type, encoding=None): settings = get_settings(application, force_instance=True) settings.default_content_type = content_type settings.default_encoding = encoding
Store the default content type for an application. :param tornado.web.Application application: the application to modify :param str content_type: the content type to default to :param str|None encoding: encoding to use when one is unspecified
371,562
def get_bin(self): return _convert(self._ip_dec, notation=IP_BIN, inotation=IP_DEC, _check=False, _isnm=self._isnm)
Return the binary notation of the address/netmask.
371,563
def user_data(self, access_token, *args, **kwargs): return self.get_json(self.USER_INFO_URL, method="POST", headers=self._get_headers(access_token))
Loads user data from service
371,564
def delete_glossary( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "delete_glossary" not in self._inner_api_calls: self._inner_api_calls[ "delete_glossary" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_glossary, default_retry=self._method_configs["DeleteGlossary"].retry, default_timeout=self._method_configs["DeleteGlossary"].timeout, client_info=self._client_info, ) request = translation_service_pb2.DeleteGlossaryRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) operation = self._inner_api_calls["delete_glossary"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, translation_service_pb2.DeleteGlossaryResponse, metadata_type=translation_service_pb2.DeleteGlossaryMetadata, )
Deletes a glossary, or cancels glossary construction if the glossary isn't created yet. Returns NOT\_FOUND, if the glossary doesn't exist. Example: >>> from google.cloud import translate_v3beta1 >>> >>> client = translate_v3beta1.TranslationServiceClient() >>> >>> name = client.glossary_path('[PROJECT]', '[LOCATION]', '[GLOSSARY]') >>> >>> response = client.delete_glossary(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. The name of the glossary to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
371,565
def gen_batches(iterable, batch_size): abcdefghijklabcdefghijkl def batches_thunk(): return iter_batches(iterable, batch_size) try: length = len(iterable) except TypeError: return batches_thunk() num_batches = (length - 1) // batch_size + 1 return SizedGenerator(batches_thunk, length=num_batches)
Returns a generator object that yields batches from `iterable`. See `iter_batches` for more details and caveats. Note that `iter_batches` returns an iterator, which never supports `len()`, `gen_batches` returns an iterable which supports `len()` if and only if `iterable` does. This *may* be an iterator, but could be a `SizedGenerator` object. To obtain an iterator (for example, to use the `next()` function), call `iter()` on this iterable. >>> batches = gen_batches('abcdefghijkl', batch_size=5) >>> len(batches) 3 >>> for batch in batches: ... print(list(batch)) ['a', 'b', 'c', 'd', 'e'] ['f', 'g', 'h', 'i', 'j'] ['k', 'l']
371,566
def authenticate(self, _=None): acceptance_wait_time = self.opts[] acceptance_wait_time_max = self.opts[] channel = salt.transport.client.ReqChannel.factory(self.opts, crypt=) if not acceptance_wait_time_max: acceptance_wait_time_max = acceptance_wait_time try: while True: creds = self.sign_in(channel=channel) if creds == : if self.opts.get(): if self.opts.get(, None): error = SaltClientError( ) break else: print( ) sys.exit(2) if acceptance_wait_time: log.info(, acceptance_wait_time) time.sleep(acceptance_wait_time) if acceptance_wait_time < acceptance_wait_time_max: acceptance_wait_time += acceptance_wait_time log.debug(, acceptance_wait_time) continue break self._creds = creds self._crypticle = Crypticle(self.opts, creds[]) finally: channel.close()
Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master AES key. :rtype: Crypticle :returns: A crypticle used for encryption operations
371,567
def direct_messages_sent(self, since_id=None, max_id=None, count=None, include_entities=None, page=None): params = {} set_str_param(params, , since_id) set_str_param(params, , max_id) set_int_param(params, , count) set_int_param(params, , page) set_bool_param(params, , include_entities) return self._get_api(, params)
Gets the 20 most recent direct messages sent by the authenticating user. https://dev.twitter.com/docs/api/1.1/get/direct_messages/sent :param str since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occured since the since_id, the since_id will be forced to the oldest ID available. :params str max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. :param int count: Returns results with an ID less than (that is, older than) or equal to the specified ID. :param int page: Specifies the page of results to retrieve. :param bool include_entities: The entities node will not be included when set to ``False``. :returns: A list of direct message dicts.
371,568
def get_or_load_name(self, type_, id_, method): name = self.get_name(type_, id_) if name is not None: defer.returnValue(name) instance = yield method(id_) if instance is None: defer.returnValue(None) self.put_name(type_, id_, instance.name) defer.returnValue(instance.name)
read-through cache for a type of object's name. If we don't have a cached name for this type/id, then we will query the live Koji server and store the value before returning. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :param method: function to call if this value is not in the cache. This method must return a deferred that fires with an object with a ".name" attribute. :returns: deferred that when fired returns a str, or None
371,569
def _special_method_cache(method, cache_wrapper): name = method.__name__ special_names = , if name not in special_names: return wrapper_name = + name def proxy(self, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) setattr(self, wrapper_name, cache) else: cache = getattr(self, wrapper_name) return cache(*args, **kwargs) return proxy
Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5
371,570
def parse_ppi_graph(path: str, min_edge_weight: float = 0.0) -> Graph: logger.info("In parse_ppi_graph()") graph = igraph.read(os.path.expanduser(path), format="ncol", directed=False, names=True) graph.delete_edges(graph.es.select(weight_lt=min_edge_weight)) graph.delete_vertices(graph.vs.select(_degree=0)) logger.info(f"Loaded PPI network.\n" f"Number of proteins: {len(graph.vs)}\n" f"Number of interactions: {len(graph.es)}\n") return graph
Build an undirected graph of gene interactions from edgelist file. :param str path: The path to the edgelist file :param float min_edge_weight: Cutoff to keep/remove the edges, default is 0, but could also be 0.63. :return Graph: Protein-protein interaction graph
371,571
def async_alert(self, alert_msg: str, new_prompt: Optional[str] = None) -> None: if not (vt100_support and self.use_rawinput): return import shutil import colorama.ansi as ansi from colorama import Cursor cursor_input_offset = last_prompt_line_width + rl_get_point() cursor_input_line = int(cursor_input_offset / terminal_size.columns) + 1 terminal_str = if cursor_input_line != num_input_terminal_lines: terminal_str += Cursor.DOWN(num_input_terminal_lines - cursor_input_line) total_lines = num_prompt_terminal_lines + num_input_terminal_lines terminal_str += (ansi.clear_line() + Cursor.UP(1)) * (total_lines - 1) terminal_str += ansi.clear_line() terminal_str += + alert_msg if rl_type == RlType.GNU: sys.stderr.write(terminal_str) elif rl_type == RlType.PYREADLINE: readline.rl.mode.console.write(terminal_str) rl_force_redisplay() self.terminal_lock.release() else: raise RuntimeError("another thread holds terminal_lock")
Display an important message to the user while they are at the prompt in between commands. To the user it appears as if an alert message is printed above the prompt and their current input text and cursor location is left alone. IMPORTANT: This function will not print an alert unless it can acquire self.terminal_lock to ensure a prompt is onscreen. Therefore it is best to acquire the lock before calling this function to guarantee the alert prints. :param alert_msg: the message to display to the user :param new_prompt: if you also want to change the prompt that is displayed, then include it here see async_update_prompt() docstring for guidance on updating a prompt :raises RuntimeError if called while another thread holds terminal_lock
371,572
def contract(self, process): print(, process) print(self.name, , self.parent) return self.parent
this contracts the current node to its parent and then either caclulates the params and values if all child data exists, OR uses the default parent data. (In real terms it returns the parent and recalculates) TODO = processes need to be recalculated
371,573
def update( self, kb_id, update_kb, custom_headers=None, raw=False, **operation_config): url = self.update.metadata[] path_format_arguments = { : self._serialize.url("self.config.endpoint", self.config.endpoint, , skip_quote=True), : self._serialize.url("kb_id", kb_id, ) } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} header_parameters = {} header_parameters[] = header_parameters[] = if custom_headers: header_parameters.update(custom_headers) body_content = self._serialize.body(update_kb, ) request = self._client.patch(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [202]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None header_dict = {} if response.status_code == 202: deserialized = self._deserialize(, response) header_dict = { : , } if raw: client_raw_response = ClientRawResponse(deserialized, response) client_raw_response.add_headers(header_dict) return client_raw_response return deserialized
Asynchronous operation to modify a knowledgebase. :param kb_id: Knowledgebase id. :type kb_id: str :param update_kb: Post body of the request. :type update_kb: ~azure.cognitiveservices.knowledge.qnamaker.models.UpdateKbOperationDTO :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: Operation or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.knowledge.qnamaker.models.Operation or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.knowledge.qnamaker.models.ErrorResponseException>`
371,574
def set_alpha_value(self, value): if isinstance(value, float) is False: raise TypeError("The type of __alpha_value must be float.") self.__alpha_value = value
setter Learning rate.
371,575
def health_check(self): api_response = result = .format(self._resource_name) try: health_check_flow = RunCommandFlow(self.cli_handler, self._logger) health_check_flow.execute_flow() result += except Exception as e: self._logger.exception(e) api_response = result += try: self._api.SetResourceLiveStatus(self._resource_name, api_response, result) except Exception: self._logger.error(.format(self._resource_name)) return result
Verify that device is accessible over CLI by sending ENTER for cli session
371,576
def _process_cache(self, d, path=()): for k, v in d.iteritems(): if not isinstance(v, dict): self.metrics.append((path + (k,), v)) else: self._process_cache(v, path + (k,))
Recusively walk a nested recon cache dict to obtain path/values
371,577
def parse_item(self, location: str, item_type: Type[T], item_name_for_log: str = None, file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> T: item_name_for_log = item_name_for_log or check_var(item_name_for_log, var_types=str, var_name=) if len(item_name_for_log) > 0: item_name_for_log = item_name_for_log + self.logger.debug( + item_name_for_log + + get_pretty_type_str(item_type) + + location + ) return self._parse__item(item_type, location, file_mapping_conf, options=options)
Main method to parse an item of type item_type :param location: :param item_type: :param item_name_for_log: :param file_mapping_conf: :param options: :return:
371,578
def _build(self, inputs): shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) max_dim = np.max(self._dims) + 1 if rank < max_dim: raise ValueError("Rank of inputs must be at least {}.".format(max_dim)) full_begin = [0] * rank full_size = [-1] * rank for dim, begin, size in zip(self._dims, self._begin, self._size): full_begin[dim] = begin full_size[dim] = size return tf.slice(inputs, begin=full_begin, size=full_size)
Connects the SliceByDim module into the graph. Args: inputs: `Tensor` to slice. Its rank must be greater than the maximum dimension specified in `dims` (plus one as python is 0 indexed). Returns: The sliced tensor. Raises: ValueError: If `inputs` tensor has insufficient rank.
371,579
def summarize_entity_person(person): ret = [] value = person.get("name") if not value: return False ret.append(value) prop = "courtesyName" value = json_get_first_item(person, prop) if value == u"不详": value = "" if value: ret.append(u.format(value)) value = person.get("alternateName") if value: pass prop = "artName" value = json_get_first_item(person, prop) if value: ret.append(u.format(value)) value = person.get("dynasty") if value: ret.append(u.format(value)) prop = "ancestralHome" value = json_get_first_item(person, prop) if value: ret.append(u.format(value)) birth_date = person.get("birthDate", "") birth_place = person.get("birthPlace", "") if birth_date == u"不详": birth_date = "" if birth_place: ret.append(u.format(birth_date, birth_place)) elif birth_date: ret.append(u.format(birth_date)) prop = "nationality" nationality = json_get_first_item(person, prop) prop = "occupation" occupation = json_get_first_item(person, prop) if occupation: if nationality: ret.append(u.format(nationality, occupation)) else: ret.append(u.format(occupation)) elif nationality: ret.append(u.format(nationality)) prop = "authorOf" value = json_get_list(person, prop) if value: logging.info(value) value = u"、".join(value) ret.append(u.format(value) ) prop = "accomplishment" value = json_get_list(person, prop) if value: value = u"、".join(value) if len(value) < 30: ret.append( u"主要成就:{}".format(value) ) ret = u",".join(ret) ret = ret.replace(u, u) ret = re.sub(u",+", u",", ret) ret = re.sub(ur"[γ€‚οΌŒ]+$", u"", ret) ret = ret.replace(u, u) ret = ret.replace(u, u) ret = re.sub(ur"([^οΌ‰]*οΌ‰", u"", ret) ret = u.join([ret, u"。"]) return ret
assume person entity using cnschma person vocabulary, http://cnschema.org/Person
371,580
def fit_interval_censoring( self, lower_bound, upper_bound, event_observed=None, timeline=None, label=None, alpha=None, ci_labels=None, show_progress=False, entry=None, weights=None, ): check_nans_or_infs(lower_bound) check_positivity(upper_bound) self.upper_bound = np.asarray(pass_for_numeric_dtypes_or_raise_array(upper_bound)) self.lower_bound = np.asarray(pass_for_numeric_dtypes_or_raise_array(lower_bound)) if (self.upper_bound < self.lower_bound).any(): raise ValueError("All upper_bound times must be greater than or equal to lower_bound times.") if event_observed is None: event_observed = self.upper_bound == self.lower_bound if ((self.lower_bound == self.upper_bound) != event_observed).any(): raise ValueError( "For all rows, lower_bound == upper_bound if and only if event observed = 1 (uncensored). Likewise, lower_bound < upper_bound if and only if event observed = 0 (censored)" ) self._censoring_type = CensoringType.INTERVAL return self._fit( (np.clip(self.lower_bound, 1e-20, 1e25), np.clip(self.upper_bound, 1e-20, 1e25)), event_observed=event_observed, timeline=timeline, label=label, alpha=alpha, ci_labels=ci_labels, show_progress=show_progress, entry=entry, weights=weights, )
Fit the model to an interval censored dataset. Parameters ---------- lower_bound: an array, or pd.Series length n, the start of the period the subject experienced the event in. upper_bound: an array, or pd.Series length n, the end of the period the subject experienced the event in. If the value is equal to the corresponding value in lower_bound, then the individual's event was observed (not censored). event_observed: numpy array or pd.Series, optional length n, if left optional, infer from ``lower_bound`` and ``upper_cound`` (if lower_bound==upper_bound then event observed, if lower_bound < upper_bound, then event censored) timeline: list, optional return the estimate at the values in timeline (positively increasing) label: string, optional a string to name the column of the estimate. alpha: float, optional the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only. ci_labels: list, optional add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha> show_progress: boolean, optional since this is an iterative fitting algorithm, switching this to True will display some iteration details. entry: an array, or pd.Series, of length n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population entered study when they were "born": time zero. weights: an array, or pd.Series, of length n integer weights per observation Returns ------- self self with new properties like ``cumulative_hazard_``, ``survival_function_``
371,581
def _connectionLost(self, reason): log.info(, self, reason) self.proto = None for tReq in self.requests.values(): tReq.sent = False if self._dDown: self._dDown.callback(None) elif self.requests: self._connect()
Called when the protocol connection is lost - Log the disconnection. - Mark any outstanding requests as unsent so they will be sent when a new connection is made. - If closing the broker client, mark completion of that process. :param reason: Failure that indicates the reason for disconnection.
371,582
def fit_allele_specific_predictors( self, n_models, architecture_hyperparameters_list, allele, peptides, affinities, inequalities=None, train_rounds=None, models_dir_for_save=None, verbose=0, progress_preamble="", progress_print_interval=5.0): allele = mhcnames.normalize_allele_name(allele) if allele not in self.allele_to_allele_specific_models: self.allele_to_allele_specific_models[allele] = [] encodable_peptides = EncodableSequences.create(peptides) peptides_affinities_inequalities_per_round = [ (encodable_peptides, affinities, inequalities) ] if train_rounds is not None: for round in sorted(set(train_rounds)): round_mask = train_rounds > round if round_mask.any(): sub_encodable_peptides = EncodableSequences.create( encodable_peptides.sequences[round_mask]) peptides_affinities_inequalities_per_round.append(( sub_encodable_peptides, affinities[round_mask], None if inequalities is None else inequalities[round_mask])) n_rounds = len(peptides_affinities_inequalities_per_round) n_architectures = len(architecture_hyperparameters_list) pieces = [] if n_models > 1: pieces.append("Model {model_num:2d} / {n_models:2d}") if n_architectures > 1: pieces.append( "Architecture {architecture_num:2d} / {n_architectures:2d}") if len(peptides_affinities_inequalities_per_round) > 1: pieces.append("Round {round:2d} / {n_rounds:2d}") pieces.append("{n_peptides:4d} peptides") progress_preamble_template = "[ %s ] {user_progress_preamble}" % ( ", ".join(pieces)) models = [] for model_num in range(n_models): for (architecture_num, architecture_hyperparameters) in enumerate( architecture_hyperparameters_list): model = Class1NeuralNetwork(**architecture_hyperparameters) for round_num in range(n_rounds): (round_peptides, round_affinities, round_inequalities) = ( peptides_affinities_inequalities_per_round[round_num] ) model.fit( round_peptides, round_affinities, inequalities=round_inequalities, verbose=verbose, progress_preamble=progress_preamble_template.format( n_peptides=len(round_peptides), round=round_num, n_rounds=n_rounds, user_progress_preamble=progress_preamble, model_num=model_num + 1, n_models=n_models, architecture_num=architecture_num + 1, n_architectures=n_architectures), progress_print_interval=progress_print_interval) model_name = self.model_name(allele, model_num) row = pandas.Series(collections.OrderedDict([ ("model_name", model_name), ("allele", allele), ("config_json", json.dumps(model.get_config())), ("model", model), ])).to_frame().T self._manifest_df = pandas.concat( [self.manifest_df, row], ignore_index=True) self.allele_to_allele_specific_models[allele].append(model) if models_dir_for_save: self.save( models_dir_for_save, model_names_to_write=[model_name]) models.append(model) self.clear_cache() return models
Fit one or more allele specific predictors for a single allele using one or more neural network architectures. The new predictors are saved in the Class1AffinityPredictor instance and will be used on subsequent calls to `predict`. Parameters ---------- n_models : int Number of neural networks to fit architecture_hyperparameters_list : list of dict List of hyperparameter sets. allele : string peptides : `EncodableSequences` or list of string affinities : list of float nM affinities inequalities : list of string, each element one of ">", "<", or "=" See Class1NeuralNetwork.fit for details. train_rounds : sequence of int Each training point i will be used on training rounds r for which train_rounds[i] > r, r >= 0. models_dir_for_save : string, optional If specified, the Class1AffinityPredictor is (incrementally) written to the given models dir after each neural network is fit. verbose : int Keras verbosity progress_preamble : string Optional string of information to include in each progress update progress_print_interval : float How often (in seconds) to print progress. Set to None to disable. Returns ------- list of `Class1NeuralNetwork`
371,583
def _convert_file_records(self, file_records): for record in file_records: type_ = self.guess_type(record[], allow_directory=False) if type_ == : yield self._notebook_model_from_db(record, False) elif type_ == : yield self._file_model_from_db(record, False, None) else: self.do_500("Unknown file type %s" % type_)
Apply _notebook_model_from_db or _file_model_from_db to each entry in file_records, depending on the result of `guess_type`.
371,584
def update_assessment_taken(self, assessment_taken_form): collection = JSONClientValidated(, collection=, runtime=self._runtime) if not isinstance(assessment_taken_form, ABCAssessmentTakenForm): raise errors.InvalidArgument() if not assessment_taken_form.is_for_update(): raise errors.InvalidArgument() try: if self._forms[assessment_taken_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState() except KeyError: raise errors.Unsupported() if not assessment_taken_form.is_valid(): raise errors.InvalidArgument() collection.save(assessment_taken_form._my_map) self._forms[assessment_taken_form.get_id().get_identifier()] = UPDATED return objects.AssessmentTaken( osid_object_map=assessment_taken_form._my_map, runtime=self._runtime, proxy=self._proxy)
Updates an existing assessment taken. arg: assessment_taken_form (osid.assessment.AssessmentTakenForm): the form containing the elements to be updated raise: IllegalState - ``assessment_taken_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``assessment_taken_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_offered_form`` did not originate from ``get_assessment_taken_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
371,585
def _actionsFreqs(self,*args,**kwargs): acfs= self._actionsFreqsAngles(*args,**kwargs) return (acfs[0],acfs[1],acfs[2],acfs[3],acfs[4],acfs[5])
NAME: actionsFreqs (_actionsFreqs) PURPOSE: evaluate the actions and frequencies (jr,lz,jz,Omegar,Omegaphi,Omegaz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based) ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT) _firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object OUTPUT: (jr,lz,jz,Omegar,Omegaphi,Omegaz) HISTORY: 2013-09-10 - Written - Bovy (IAS)
371,586
def monkeycache(apis): if isinstance(type(apis), type(None)) or apis is None: return {} verbs = set() cache = {} cache[] = apis[] cache[] = [] apilist = apis[] if apilist is None: print("[monkeycache] Server response issue, no apis found") for api in apilist: name = getvalue(api, ) verb, subject = splitverbsubject(name) apidict = {} apidict[] = name apidict[] = getvalue(api, ) apidict[] = getvalue(api, ) if apidict[]: cache[].append(name) apidict[] = splitcsvstring(getvalue(api, )) required = [] apiparams = [] for param in getvalue(api, ): apiparam = {} apiparam[] = getvalue(param, ) apiparam[] = getvalue(param, ) apiparam[] = (getvalue(param, ) is True) apiparam[] = int(getvalue(param, )) apiparam[] = getvalue(param, ) apiparam[] = splitcsvstring(getvalue(param, )) if apiparam[]: required.append(apiparam[]) apiparams.append(apiparam) apidict[] = required apidict[] = apiparams if verb not in cache: cache[verb] = {} cache[verb][subject] = apidict verbs.add(verb) cache[] = list(verbs) return cache
Feed this a dictionary of api bananas, it spits out processed cache
371,587
def nt2codon_rep(ntseq): nt2num = {: 0, : 1, : 2, : 3, : 0, : 1, : 2, : 3} codon_rep = return .join([codon_rep[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])
Represent nucleotide sequence by sequence of codon symbols. 'Translates' the nucleotide sequence into a symbolic representation of 'amino acids' where each codon gets its own unique character symbol. These characters should be reserved only for representing the 64 individual codons --- note that this means it is important that this function matches the corresponding function in the preprocess script and that any custom alphabet does not use these symbols. Defining symbols for each individual codon allows for Pgen computation of inframe nucleotide sequences. Parameters ---------- ntseq : str A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be 'translated' into the codon - symbol representation. Can be either uppercase or lowercase, but only composed of A, C, G, or T. Returns ------- codon_rep : str The codon - symbolic representation of ntseq. Note that if len(ntseq) == 3L --> len(codon_rep) == L Example -------- >>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC') '\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f'
371,588
def id(self, id): if id is None: raise ValueError("Invalid value for `id`, must not be `None`") if len(id) > 255: raise ValueError("Invalid value for `id`, length must be less than `255`") self._id = id
Sets the id of this Shift. UUID for this object :param id: The id of this Shift. :type: str
371,589
def toarray(vari): if isinstance(vari, Poly): shape = vari.shape out = numpy.asarray( [{} for _ in range(numpy.prod(shape))], dtype=object ) core = vari.A.copy() for key in core.keys(): core[key] = core[key].flatten() for i in range(numpy.prod(shape)): if not numpy.all(core[key][i] == 0): out[i][key] = core[key][i] for i in range(numpy.prod(shape)): out[i] = Poly(out[i], vari.dim, (), vari.dtype) out = out.reshape(shape) return out return numpy.asarray(vari)
Convert polynomial array into a numpy.asarray of polynomials. Args: vari (Poly, numpy.ndarray): Input data. Returns: (numpy.ndarray): A numpy array with ``Q.shape==A.shape``. Examples: >>> poly = cp.prange(3) >>> print(poly) [1, q0, q0^2] >>> array = cp.toarray(poly) >>> print(isinstance(array, numpy.ndarray)) True >>> print(array[1]) q0
371,590
def __on_download_progress_update(self, blocknum, blocksize, totalsize): if not self.__show_download_progress: return readsofar = blocknum * blocksize if totalsize > 0: s = "\r%s / %s" % (size(readsofar), size(totalsize)) sys.stdout.write(s) if readsofar >= totalsize: sys.stderr.write("\r") else: sys.stdout.write("\rread %s" % (size(readsofar)))
Prints some download progress information :param blocknum: :param blocksize: :param totalsize: :return:
371,591
def create_equipamento_roteiro(self): return EquipamentoRoteiro( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of equipamento_roteiro services facade.
371,592
def get_times_modified(self): times_modified = self.conn.client.get(self.times_modified_key) if times_modified is None: return 0 return int(times_modified)
:returns: The total number of times increment_times_modified has been called for this resource by all processes. :rtype: int
371,593
def faces_to_path(mesh, face_ids=None, **kwargs): if face_ids is None: edges = mesh.edges_sorted else: edges = mesh.edges_sorted.reshape( (-1, 6))[face_ids].reshape((-1, 2)) unique_edges = grouping.group_rows( edges, require_count=1) kwargs.update(edges_to_path(edges=edges[unique_edges], vertices=mesh.vertices)) return kwargs
Given a mesh and face indices find the outline edges and turn them into a Path3D. Parameters --------- mesh : trimesh.Trimesh Triangulated surface in 3D face_ids : (n,) int Indexes referencing mesh.faces Returns --------- kwargs : dict Kwargs for Path3D constructor
371,594
def mkrngs(self): bbool = bool_2_indices(self.bkg) if bbool is not None: self.bkgrng = self.Time[bbool] else: self.bkgrng = [[np.nan, np.nan]] sbool = bool_2_indices(self.sig) if sbool is not None: self.sigrng = self.Time[sbool] else: self.sigrng = [[np.nan, np.nan]] tbool = bool_2_indices(self.trn) if tbool is not None: self.trnrng = self.Time[tbool] else: self.trnrng = [[np.nan, np.nan]] self.ns = np.zeros(self.Time.size) n = 1 for i in range(len(self.sig) - 1): if self.sig[i]: self.ns[i] = n if self.sig[i] and ~self.sig[i + 1]: n += 1 self.n = int(max(self.ns)) return
Transform boolean arrays into list of limit pairs. Gets Time limits of signal/background boolean arrays and stores them as sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in the analyse object.
371,595
def unvectorize_args(fn): @wraps(fn) def new_fn(*args): return fn(np.asarray(args)) return new_fn
See Also -------- revrand.utils.decorators.vectorize_args Examples -------- The Rosenbrock function is commonly used as a performance test problem for optimization algorithms. It and its derivatives are included in `scipy.optimize` and is implemented as expected by the family of optimization methods in `scipy.optimize`. def rosen(x): return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0) This representation makes it unwieldy to perform operations such as plotting since it is less straightforward to evaluate the function on a `meshgrid`. This decorator helps reconcile the differences between these representations. >>> from scipy.optimize import rosen >>> rosen(np.array([0.5, 1.5])) 156.5 >>> unvectorize_args(rosen)(0.5, 1.5) ... # doctest: +NORMALIZE_WHITESPACE 156.5 The `rosen` function is implemented in such a way that it generalizes to the Rosenbrock function of any number of variables. This decorator supports can support any functions defined in a similar manner. The function with any number of arguments are well-defined: >>> rosen(np.array([0.5, 1.5, 1., 0., 0.2])) 418.0 >>> unvectorize_args(rosen)(0.5, 1.5, 1., 0., 0.2) ... # can accept any variable number of arguments! 418.0 Make it easier to work with for other operations >>> rosen_ = unvectorize_args(rosen) >>> y, x = np.mgrid[0:2.1:0.05, -1:1.2:0.05] >>> z = rosen_(x, y) >>> z.round(2) array([[ 104. , 85.25, 69.22, ..., 121.55, 146.42, 174.92], [ 94.25, 76.48, 61.37, ..., 110.78, 134.57, 161.95], [ 85. , 68.2 , 54.02, ..., 100.5 , 123.22, 149.47], ..., [ 94.25, 113.53, 133.57, ..., 71.83, 54.77, 39.4 ], [ 104. , 124.25, 145.22, ..., 80.55, 62.42, 45.92], [ 114.25, 135.48, 157.37, ..., 89.78, 70.57, 52.95]]) Now this can be directly plotted with `mpl_toolkits.mplot3d.Axes3D` and `ax.plot_surface`.
371,596
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs): result = None if start_date and end_date: start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,) start_date = start_date if hasattr(start_date, ) else datetime.datetime.combine(start_date, datetime.time()) end_date = end_date if hasattr(end_date, ) else datetime.datetime.combine(end_date, datetime.time()) monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date)) if len(monthly_metrics_dates) >= 3: with self._analytics_backend.map() as conn: monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts( conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date) monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results) starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results) ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results) result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values()) else: diff = end_date - start_date metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1) result = sum(metric_results[1].values()) else: try: result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,))) except TypeError: result = 0 return result
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date`` and an ``end_date``, to only get metrics within that time range. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Get the specified metrics after this date :param end_date: Get the sepcified metrics before this date :return: The count for the metric, 0 otherwise
371,597
def start(self, labels=None): if labels is None: labels = self.dfltlbl if not isinstance(labels, (list, tuple)): labels = [labels,] t = timer() for lbl in labels: if lbl not in self.td: self.td[lbl] = 0.0 self.t0[lbl] = None if self.t0[lbl] is None: self.t0[lbl] = t
Start specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be started. If it is ``None``, start the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`.
371,598
def split_semicolon(line, maxsplit=None): r split_line = line.split() split_line_size = len(split_line) if maxsplit is None or maxsplit < 0: maxsplit = split_line_size else: i += 1 return split_line
r"""Split a line on semicolons characters but not on the escaped semicolons :param line: line to split :type line: str :param maxsplit: maximal number of split (if None, no limit) :type maxsplit: None | int :return: split line :rtype: list >>> split_semicolon('a,b;c;;g') ['a,b', 'c', '', 'g'] >>> split_semicolon('a,b;c;;g', 2) ['a,b', 'c', ';g'] >>> split_semicolon(r'a,b;c\;;g', 2) ['a,b', 'c;', 'g']
371,599
def _convert_unsigned(data, fmt): num = len(data) return struct.unpack( "{}{}".format(num, fmt.upper()).encode("utf-8"), struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data) )
Convert data from signed to unsigned in bulk.