code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def strel_pair(x, y): """Create a structing element composed of the origin and another pixel x, y - x and y offsets of the other pixel returns a structuring element """ x_center = int(np.abs(x)) y_center = int(np.abs(y)) result = np.zeros((y_center * 2 + 1, x_center * 2 + 1), bool) result[y_center, x_center] = True result[y_center + int(y), x_center + int(x)] = True return result
Create a structing element composed of the origin and another pixel x, y - x and y offsets of the other pixel returns a structuring element
def MSTORE(self, address, value): """Save word to memory""" if istainted(self.pc): for taint in get_taints(self.pc): value = taint_with(value, taint) self._allocate(address, 32) self._store(address, value, 32)
Save word to memory
def authenticate(url, account, key, by='name', expires=0, timestamp=None, timeout=None, request_type="xml", admin_auth=False, use_password=False, raise_on_error=False): """ Authenticate to the Zimbra server :param url: URL of Zimbra SOAP service :param account: The account to be authenticated against :param key: The preauth key of the domain of the account or a password (if admin_auth or use_password is True) :param by: If the account is specified as a name, an ID or a ForeignPrincipal :param expires: When the token expires (or 0 for default expiration) :param timestamp: When the token was requested (None for "now") :param timeout: Timeout for the communication with the server. Defaults to the urllib2-default :param request_type: Which type of request to use ("xml" (default) or "json") :param admin_auth: This request should authenticate and generate an admin token. The "key"-parameter therefore holds the admin password (implies use_password) :param use_password: The "key"-parameter holds a password. Do a password- based user authentication. :param raise_on_error: Should I raise an exception when an authentication error occurs or just return None? :return: The authentication token or None :rtype: str or None or unicode """ if timestamp is None: timestamp = int(time.time()) * 1000 pak = "" if not admin_auth: pak = preauth.create_preauth(account, key, by, expires, timestamp) if request_type == 'xml': auth_request = RequestXml() else: auth_request = RequestJson() request_data = { 'account': { 'by': by, '_content': account } } ns = "urn:zimbraAccount" if admin_auth: ns = "urn:zimbraAdmin" request_data['password'] = key elif use_password: request_data['password'] = { "_content": key } else: request_data['preauth'] = { 'timestamp': timestamp, 'expires': expires, '_content': pak } auth_request.add_request( 'AuthRequest', request_data, ns ) server = Communication(url, timeout) if request_type == 'xml': response = ResponseXml() else: response = ResponseJson() server.send_request(auth_request, response) if response.is_fault(): if raise_on_error: raise AuthenticationFailed( "Cannot authenticate user: (%s) %s" % ( response.get_fault_code(), response.get_fault_message() ) ) return None return response.get_response()['AuthResponse']['authToken']
Authenticate to the Zimbra server :param url: URL of Zimbra SOAP service :param account: The account to be authenticated against :param key: The preauth key of the domain of the account or a password (if admin_auth or use_password is True) :param by: If the account is specified as a name, an ID or a ForeignPrincipal :param expires: When the token expires (or 0 for default expiration) :param timestamp: When the token was requested (None for "now") :param timeout: Timeout for the communication with the server. Defaults to the urllib2-default :param request_type: Which type of request to use ("xml" (default) or "json") :param admin_auth: This request should authenticate and generate an admin token. The "key"-parameter therefore holds the admin password (implies use_password) :param use_password: The "key"-parameter holds a password. Do a password- based user authentication. :param raise_on_error: Should I raise an exception when an authentication error occurs or just return None? :return: The authentication token or None :rtype: str or None or unicode
def close(self): """End the report.""" endpoint = self.endpoint.replace("/api/v1/spans", "") logger.debug("Zipkin trace may be located at this URL {}/traces/{}".format(endpoint, self.trace_id))
End the report.
def lpad(col, len, pad): """ Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')]
def _get_string_match_value(self, string, string_match_type): """Gets the match value""" if string_match_type == Type(**get_type_data('EXACT')): return string elif string_match_type == Type(**get_type_data('IGNORECASE')): return re.compile('^' + string, re.I) elif string_match_type == Type(**get_type_data('WORD')): return re.compile('.*' + string + '.*') elif string_match_type == Type(**get_type_data('WORDIGNORECASE')): return re.compile('.*' + string + '.*', re.I)
Gets the match value
def find_by_reference_ids(reference_ids, _connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """ List all videos identified by a list of reference ids """ if not isinstance(reference_ids, (list, tuple)): err = "Video.find_by_reference_ids expects an iterable argument" raise exceptions.PyBrightcoveError(err) ids = ','.join(reference_ids) return connection.ItemResultSet( 'find_videos_by_reference_ids', Video, _connection, page_size, page_number, sort_by, sort_order, reference_ids=ids)
List all videos identified by a list of reference ids
def serialize_dictionary(dictionary): """Function to stringify a dictionary recursively. :param dictionary: The dictionary. :type dictionary: dict :return: The string. :rtype: basestring """ string_value = {} for k, v in list(dictionary.items()): if isinstance(v, QUrl): string_value[k] = v.toString() elif isinstance(v, (QDate, QDateTime)): string_value[k] = v.toString(Qt.ISODate) elif isinstance(v, datetime): string_value[k] = v.isoformat() elif isinstance(v, date): string_value[k] = v.isoformat() elif isinstance(v, dict): # Recursive call string_value[k] = serialize_dictionary(v) else: string_value[k] = v return string_value
Function to stringify a dictionary recursively. :param dictionary: The dictionary. :type dictionary: dict :return: The string. :rtype: basestring
def clean_item_no_list(i): """ Return a json-clean item or None. Will log info message for failure. """ itype = type(i) if itype == dict: return clean_dict(i, clean_item_no_list) elif itype == list: return clean_tuple(i, clean_item_no_list) elif itype == tuple: return clean_tuple(i, clean_item_no_list) elif itype == numpy.float32: return float(i) elif itype == numpy.float64: return float(i) elif itype == numpy.int16: return int(i) elif itype == numpy.uint16: return int(i) elif itype == numpy.int32: return int(i) elif itype == numpy.uint32: return int(i) elif itype == float: return i elif itype == str: return i elif itype == int: return i elif itype == bool: return i elif itype == type(None): return i logging.info("[2] Unable to handle type %s", itype) return None
Return a json-clean item or None. Will log info message for failure.
def load_features(self, features, image_type=None, from_array=False, threshold=0.001): """ Load features from current Dataset instance or a list of files. Args: features: List containing paths to, or names of, features to extract. Each element in the list must be a string containing either a path to an image, or the name of a feature (as named in the current Dataset). Mixing of paths and feature names within the list is not allowed. image_type: Optional suffix indicating which kind of image to use for analysis. Only used if features are taken from the Dataset; if features is a list of filenames, image_type is ignored. from_array: If True, the features argument is interpreted as a string pointing to the location of a 2D ndarray on disk containing feature data, where rows are voxels and columns are individual features. threshold: If features are taken from the dataset, this is the threshold passed to the meta-analysis module to generate fresh images. """ if from_array: if isinstance(features, list): features = features[0] self._load_features_from_array(features) elif path.exists(features[0]): self._load_features_from_images(features) else: self._load_features_from_dataset( features, image_type=image_type, threshold=threshold)
Load features from current Dataset instance or a list of files. Args: features: List containing paths to, or names of, features to extract. Each element in the list must be a string containing either a path to an image, or the name of a feature (as named in the current Dataset). Mixing of paths and feature names within the list is not allowed. image_type: Optional suffix indicating which kind of image to use for analysis. Only used if features are taken from the Dataset; if features is a list of filenames, image_type is ignored. from_array: If True, the features argument is interpreted as a string pointing to the location of a 2D ndarray on disk containing feature data, where rows are voxels and columns are individual features. threshold: If features are taken from the dataset, this is the threshold passed to the meta-analysis module to generate fresh images.
def autoconfig_url_from_registry(): """ Get the PAC ``AutoConfigURL`` value from the Windows Registry. This setting is visible as the "use automatic configuration script" field in Internet Options > Connection > LAN Settings. :return: The value from the registry, or None if the value isn't configured or available. Note that it may be local filesystem path instead of a URL. :rtype: str|None :raises NotWindowsError: If called on a non-Windows platform. """ if not ON_WINDOWS: raise NotWindowsError() try: with winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings') as key: return winreg.QueryValueEx(key, 'AutoConfigURL')[0] except WindowsError: return
Get the PAC ``AutoConfigURL`` value from the Windows Registry. This setting is visible as the "use automatic configuration script" field in Internet Options > Connection > LAN Settings. :return: The value from the registry, or None if the value isn't configured or available. Note that it may be local filesystem path instead of a URL. :rtype: str|None :raises NotWindowsError: If called on a non-Windows platform.
def query_all(self): """ Query all records without limit and offset. """ return self.query_model(self.model, self.condition, order_by=self.order_by, group_by=self.group_by, having=self.having)
Query all records without limit and offset.
def get_comments_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get comments of delivery note per page :param delivery_note_id: the delivery note :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_COMMENTS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
Get comments of delivery note per page :param delivery_note_id: the delivery note :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
def to_config(self, k, v): """ Hook method that allows conversion of individual options. :param k: the key of the option :type k: str :param v: the value :type v: object :return: the potentially processed value :rtype: object """ if k == "setup": return base.to_commandline(v) return super(DataGenerator, self).to_config(k, v)
Hook method that allows conversion of individual options. :param k: the key of the option :type k: str :param v: the value :type v: object :return: the potentially processed value :rtype: object
def annotate(args): """ %prog annotate new.bed old.bed 2> log Annotate the `new.bed` with features from `old.bed` for the purpose of gene numbering. Ambiguity in ID assignment can be resolved by either of the following 2 methods: - `alignment`: make use of global sequence alignment score (calculated by `needle`) - `overlap`: make use of overlap length (calculated by `intersectBed`) Transfer over as many identifiers as possible while following guidelines: http://www.arabidopsis.org/portals/nomenclature/guidelines.jsp#editing Note: Following RegExp pattern describes the structure of the identifier assigned to features in the `new.bed` file. new_id_pat = re.compile(r"^\d+\.[cemtx]+\S+") Examples: 23231.m312389, 23231.t004898, 23231.tRNA.144 Adjust the value of `new_id_pat` manually as per your ID naming conventions. """ from jcvi.utils.grouper import Grouper valid_resolve_choices = ["alignment", "overlap"] p = OptionParser(annotate.__doc__) p.add_option("--resolve", default="alignment", choices=valid_resolve_choices, help="Resolve ID assignment based on a certain metric" \ + " [default: %default]") p.add_option("--atg_name", default=False, action="store_true", help="Specify is locus IDs in `new.bed` file follow ATG nomenclature" \ + " [default: %default]") g1 = OptionGroup(p, "Optional parameters (alignment):\n" \ + "Use if resolving ambiguities based on sequence `alignment`") g1.add_option("--pid", dest="pid", default=35., type="float", help="Percent identity cutoff [default: %default]") g1.add_option("--score", dest="score", default=250., type="float", help="Alignment score cutoff [default: %default]") p.add_option_group(g1) g2 = OptionGroup(p, "Optional parameters (overlap):\n" \ + "Use if resolving ambiguities based on `overlap` length\n" \ + "Parameters equivalent to `intersectBed`") g2.add_option("-f", dest="f", default=0.5, type="float", help="Minimum overlap fraction (0.0 - 1.0) [default: %default]") g2.add_option("-r", dest="r", default=False, action="store_true", help="Require fraction overlap to be reciprocal [default: %default]") g2.add_option("-s", dest="s", default=True, action="store_true", help="Require same strandedness [default: %default]") p.add_option_group(g2) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) nbedfile, obedfile = args npf, opf = nbedfile.rsplit(".", 1)[0], obedfile.rsplit(".", 1)[0] # Make consolidated.bed cbedfile = "consolidated.bed" if not os.path.isfile(cbedfile): consolidate(nbedfile, obedfile, cbedfile) else: logging.warning("`{0}` already exists. Skipping step".format(cbedfile)) logging.warning("Resolving ID assignment ambiguity based on `{0}`".\ format(opts.resolve)) if opts.resolve == "alignment": # Get pairs and prompt to run needle pairsfile = "nw.pairs" scoresfile = "nw.scores" if not os.path.isfile(pairsfile): get_pairs(cbedfile, pairsfile) else: logging.warning("`{0}` already exists. Checking for needle output".\ format(pairsfile)) # If needle scores do not exist, prompt user to run needle if not os.path.isfile(scoresfile): logging.error("`{0}` does not exist. Please process {1} using `needle`".\ format(scoresfile, pairsfile)) sys.exit() else: scoresfile = "ovl.scores" # Calculate overlap length using intersectBed calculate_ovl(nbedfile, obedfile, opts, scoresfile) logging.warning("`{0}' exists. Storing scores in memory".\ format(scoresfile)) scores = read_scores(scoresfile, opts) # Iterate through consolidated bed and # filter piles based on score abedline = {} cbed = Bed(cbedfile) g = Grouper() for c in cbed: accn = c.accn g.join(*accn.split(";")) nbedline = {} nbed = Bed(nbedfile) for line in nbed: nbedline[line.accn] = line splits = set() for chr, chrbed in nbed.sub_beds(): abedline, splits = annotate_chr(chr, chrbed, g, scores, nbedline, abedline, opts, splits) if splits is not None: abedline = process_splits(splits, scores, nbedline, abedline) abedfile = npf + ".annotated.bed" afh = open(abedfile, "w") for accn in abedline: print(abedline[accn], file=afh) afh.close() sort([abedfile, "-i"])
%prog annotate new.bed old.bed 2> log Annotate the `new.bed` with features from `old.bed` for the purpose of gene numbering. Ambiguity in ID assignment can be resolved by either of the following 2 methods: - `alignment`: make use of global sequence alignment score (calculated by `needle`) - `overlap`: make use of overlap length (calculated by `intersectBed`) Transfer over as many identifiers as possible while following guidelines: http://www.arabidopsis.org/portals/nomenclature/guidelines.jsp#editing Note: Following RegExp pattern describes the structure of the identifier assigned to features in the `new.bed` file. new_id_pat = re.compile(r"^\d+\.[cemtx]+\S+") Examples: 23231.m312389, 23231.t004898, 23231.tRNA.144 Adjust the value of `new_id_pat` manually as per your ID naming conventions.
def raw_from_delimited(msgs: DelimitedMsg) -> RawMsgs: """\ From a message consisting of header frames, delimiter frame, and payload frames, return a tuple `(header, payload)`. The payload frames may be returned as sequences of bytes (raw) or as `Message`s. """ delim = _rindex(msgs, b'') return tuple(msgs[:delim]), tuple(msgs[delim + 1:])
\ From a message consisting of header frames, delimiter frame, and payload frames, return a tuple `(header, payload)`. The payload frames may be returned as sequences of bytes (raw) or as `Message`s.
def fix_spelling(words, join=True, joinstring=' '): """Simple function for quickly correcting misspelled words. Parameters ---------- words: list of str or str Either a list of pretokenized words or a string. In case of a string, it will be splitted using default behaviour of string.split() function. join: boolean (default: True) Should we join the list of words into a single string. joinstring: str (default: ' ') The string that will be used to join together the fixed words. Returns ------- str In case join is True list of str In case join is False. """ return Vabamorf.instance().fix_spelling(words, join, joinstring)
Simple function for quickly correcting misspelled words. Parameters ---------- words: list of str or str Either a list of pretokenized words or a string. In case of a string, it will be splitted using default behaviour of string.split() function. join: boolean (default: True) Should we join the list of words into a single string. joinstring: str (default: ' ') The string that will be used to join together the fixed words. Returns ------- str In case join is True list of str In case join is False.
def similarity(w1, w2, threshold=0.5): """compare two strings 'words', and return ratio of smiliarity, be it larger than the threshold, or 0 otherwise. NOTE: if the result more like junk, increase the threshold value. """ ratio = SM(None, str(w1).lower(), str(w2).lower()).ratio() return ratio if ratio > threshold else 0
compare two strings 'words', and return ratio of smiliarity, be it larger than the threshold, or 0 otherwise. NOTE: if the result more like junk, increase the threshold value.
def getKnownPlayers(reset=False): """identify all of the currently defined players""" global playerCache if not playerCache or reset: jsonFiles = os.path.join(c.PLAYERS_FOLDER, "*.json") for playerFilepath in glob.glob(jsonFiles): filename = os.path.basename(playerFilepath) name = re.sub("^player_", "", filename) name = re.sub("\.json$", "", name) player = PlayerRecord(name) playerCache[player.name] = player return playerCache
identify all of the currently defined players
def get_disparity(self, pair): """ Compute disparity from image pair (left, right). First, convert images to grayscale if needed. Then pass to the ``_block_matcher`` for stereo matching. """ gray = [] if pair[0].ndim == 3: for side in pair: gray.append(cv2.cvtColor(side, cv2.COLOR_BGR2GRAY)) else: gray = pair return self._block_matcher.compute(gray[0], gray[1], disptype=cv2.CV_32F)
Compute disparity from image pair (left, right). First, convert images to grayscale if needed. Then pass to the ``_block_matcher`` for stereo matching.
def resume(profile_process='worker'): """ Resume paused profiling. Parameters ---------- profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXProcessProfilePause(int(0), profile_process2int[profile_process], profiler_kvstore_handle))
Resume paused profiling. Parameters ---------- profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
def choice_SlackBuild(self): """View .SlackBuild file """ SlackBuild = ReadSBo(self.sbo_url).slackbuild(self.name, ".SlackBuild") fill = self.fill_pager(SlackBuild) self.pager(SlackBuild + fill)
View .SlackBuild file
def _insert_update(self, index: int, length: int) -> None: """Update self._type_to_spans according to the added length.""" ss, se = self._span for spans in self._type_to_spans.values(): for span in spans: if index < span[1] or span[1] == index == se: span[1] += length # index is before s, or at s but not on self_span if index < span[0] or span[0] == index != ss: span[0] += length
Update self._type_to_spans according to the added length.
def get_text(self): ''' ::returns: a rendered string representation of the given row ''' row_lines = [] for line in zip_longest(*[column.get_cell_lines() for column in self.columns], fillvalue=' '): row_lines.append(' '.join(line)) return '\n'.join(row_lines)
::returns: a rendered string representation of the given row
def download_url(url, filename=None, spoof=False, iri_fallback=True, verbose=True, new=False, chunk_size=None): r""" downloads a url to a filename. Args: url (str): url to download filename (str): path to download to. Defaults to basename of url spoof (bool): if True pretends to by Firefox iri_fallback : falls back to requests get call if there is a UnicodeError References: http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/ http://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py TODO: Delete any partially downloaded files Example: >>> # DISABLE_DOCTEST >>> from utool.util_grabdata import * # NOQA >>> url = 'http://www.jrsoftware.org/download.php/ispack.exe' >>> fpath = download_url(url) >>> print(fpath) ispack.exe """ def reporthook_(num_blocks, block_nBytes, total_nBytes, start_time=0): total_seconds = time.time() - start_time + 1E-9 num_kb_down = int(num_blocks * block_nBytes) / 1024 num_mb_down = num_kb_down / 1024 percent_down = int(num_blocks * block_nBytes * 100 / total_nBytes) kb_per_second = int(num_kb_down / (total_seconds)) fmt_msg = '\r...%d%%, %d MB, %d KB/s, %d seconds passed' msg = fmt_msg % (percent_down, num_mb_down, kb_per_second, total_seconds) sys.stdout.write(msg) sys.stdout.flush() if verbose: reporthook = functools.partial(reporthook_, start_time=time.time()) else: reporthook = None if filename is None: filename = basename(url) if verbose: print('[utool] Downloading url=%r to filename=%r' % (url, filename)) if new: import requests #from contextlib import closing con = requests.get(url, stream=True, timeout=TIMEOUT) try: #import math content_length = con.headers.get('content-length', None) if content_length is None: # No progress available with open(filename, 'wb') as file_: file_.write(con.content) else: if chunk_size is None: chunk_size = 2 ** 20 # one megabyte at a time content_length = int(content_length) #length = int(math.ceil(content_length / chunk_size)) with open(filename, 'wb') as file_: chunk_iter = con.iter_content(chunk_size=chunk_size) #chunk_iter = ut.ProgIter(chunk_iter, length=length, lbl='downloading', freq=1) for count, chunk in enumerate(chunk_iter): if chunk: if reporthook: reporthook(count, chunk_size, content_length) file_.write(chunk) finally: con.close() return filename # Weird that we seem to need this here for tests import urllib # NOQA try: if spoof: # Different agents that can be used for spoofing user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', # NOQA 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', # NOQA 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', # NOQA 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9' ] class SpoofingOpener(urllib.FancyURLopener, object): version = user_agents[0] spoofing_opener = SpoofingOpener() spoofing_opener.retrieve(url, filename=filename, reporthook=reporthook) else: # no spoofing if six.PY2: urllib.urlretrieve(url, filename=filename, reporthook=reporthook) elif six.PY3: import urllib.request urllib.request.urlretrieve(url, filename=filename, reporthook=reporthook) else: assert False, 'unknown python' except UnicodeError as ex: import requests # iri error print('Detected iri error: %r' % (ex,)) print('Falling back to requests.get (no progress is shown)') resp = requests.get(url, timeout=TIMEOUT) with open(filename, 'wb') as file_: file_.write(resp.content) if verbose: print('') print('[utool] Finished downloading filename=%r' % (filename,)) return filename
r""" downloads a url to a filename. Args: url (str): url to download filename (str): path to download to. Defaults to basename of url spoof (bool): if True pretends to by Firefox iri_fallback : falls back to requests get call if there is a UnicodeError References: http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/ http://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py TODO: Delete any partially downloaded files Example: >>> # DISABLE_DOCTEST >>> from utool.util_grabdata import * # NOQA >>> url = 'http://www.jrsoftware.org/download.php/ispack.exe' >>> fpath = download_url(url) >>> print(fpath) ispack.exe
def generate_evenly_distributed_data(dim = 2000, num_active = 40, num_samples = 1000, num_negatives = 500): """ Generates a set of data drawn from a uniform distribution. The binning structure from Poirazi & Mel is ignored, and all (dim choose num_active) arrangements are possible. num_negatives samples are put into a separate negatives category for output compatibility with generate_data, but are otherwise identical. """ sparse_data = [numpy.random.choice(dim, size = num_active, replace = False) for i in range(num_samples)] data = [[0 for i in range(dim)] for i in range(num_samples)] for datapoint, sparse_datapoint in zip(data, sparse_data): for i in sparse_datapoint: datapoint[i] = 1 negatives = data[:num_negatives] positives = data[num_negatives:] return positives, negatives
Generates a set of data drawn from a uniform distribution. The binning structure from Poirazi & Mel is ignored, and all (dim choose num_active) arrangements are possible. num_negatives samples are put into a separate negatives category for output compatibility with generate_data, but are otherwise identical.
def data_find_text(data, path): """Return the text value of the element-as-tuple in tuple ``data`` using simplified XPath ``path``. """ el = data_find(data, path) if not isinstance(el, (list, tuple)): return None texts = [child for child in el[1:] if not isinstance(child, (tuple, list, dict))] if not texts: return None return " ".join( [ # How should we deal with decoding errors when `x` is binary? # For now, we're using the ``strict`` mode. Other options here: # https://docs.python.org/3/library/functions.html#open. six.ensure_text(x, encoding="utf-8", errors="strict") for x in texts ] )
Return the text value of the element-as-tuple in tuple ``data`` using simplified XPath ``path``.
def getPhysicalMaximum(self,chn=None): """ Returns the maximum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMaximum(0)==1000.0 True >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.physical_max(chn) else: return 0 else: physMax = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): physMax[i] = self.physical_max(i) return physMax
Returns the maximum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMaximum(0)==1000.0 True >>> f._close() >>> del f
def error(request, message, extra_tags='', fail_silently=False, async=False): """Adds a message with the ``ERROR`` level.""" if ASYNC and async: messages.debug(_get_user(request), message) else: add_message(request, constants.ERROR, message, extra_tags=extra_tags, fail_silently=fail_silently)
Adds a message with the ``ERROR`` level.
def serialize(ty, *values, **kwargs): """ Serialize value using type specification in ty. ABI.serialize('int256', 1000) ABI.serialize('(int, int256)', 1000, 2000) """ try: parsed_ty = abitypes.parse(ty) except Exception as e: # Catch and rebrand parsing errors raise EthereumError(str(e)) if parsed_ty[0] != 'tuple': if len(values) > 1: raise ValueError('too many values passed for non-tuple') values = values[0] if isinstance(values, str): values = values.encode() else: # implement type forgiveness for bytesM/string types # allow python strs also to be used for Solidity bytesM/string types values = tuple(val.encode() if isinstance(val, str) else val for val in values) result, dyn_result = ABI._serialize(parsed_ty, values) return result + dyn_result
Serialize value using type specification in ty. ABI.serialize('int256', 1000) ABI.serialize('(int, int256)', 1000, 2000)
def getFunc(self, o: Any) -> Callable: """ Get the next function from the list of routes that is capable of processing o's type. :param o: the object to process :return: the next function """ for cls, func in self.routes.items(): if isinstance(o, cls): return func logger.error("Unhandled msg {}, available handlers are:".format(o)) for cls in self.routes.keys(): logger.error(" {}".format(cls)) raise RuntimeError("unhandled msg: {}".format(o))
Get the next function from the list of routes that is capable of processing o's type. :param o: the object to process :return: the next function
def removeGaps(self) : """Remove all gaps between regions""" for i in range(1, len(self.children)) : if self.children[i].x1 > self.children[i-1].x2: aux_moveTree(self.children[i-1].x2-self.children[i].x1, self.children[i])
Remove all gaps between regions
def stopdocs(): "stop Sphinx watchdog" for i in range(4): pid = watchdog_pid() if pid: if not i: sh('ps {}'.format(pid)) sh('kill {}'.format(pid)) time.sleep(.5) else: break
stop Sphinx watchdog
def updateColumnValue(self, column, value, index=None): """ Assigns the value for the column of this record to the inputed value. :param index | <int> value | <variant> """ if index is None: index = self.treeWidget().column(column.name()) if type(value) == datetime.date: self.setData(index, Qt.EditRole, wrapVariant(value)) elif type(value) == datetime.time: self.setData(index, Qt.EditRole, wrapVariant(value)) elif type(value) == datetime.datetime: self.setData(index, Qt.EditRole, wrapVariant(value)) elif type(value) in (float, int): if column.enum(): self.setText(index, column.enum().displayText(value)) else: self.setData(index, Qt.EditRole, wrapVariant(value)) elif value is not None: self.setText(index, nativestring(value)) else: self.setText(index, '') self.setSortData(index, value) # map default value information try: mapper = self.treeWidget().columnMappers().get(column.columnName()) except AttributeError: mapper = None if mapper is None: form = column.stringFormat() if form: mapper = form.format if mapper: self.setText(index, mapper(value))
Assigns the value for the column of this record to the inputed value. :param index | <int> value | <variant>
def editpropset(self): ''' :foo=10 ''' self.ignore(whitespace) if not self.nextstr(':'): self._raiseSyntaxExpects(':') relp = self.relprop() self.ignore(whitespace) self.nextmust('=') self.ignore(whitespace) valu = self.valu() return s_ast.EditPropSet(kids=(relp, valu))
:foo=10
def length(self): """Return the length of this response. We expose this as an attribute since using len() directly can fail for responses larger than sys.maxint. Returns: Response length (as int or long) """ def ProcessContentRange(content_range): _, _, range_spec = content_range.partition(' ') byte_range, _, _ = range_spec.partition('/') start, _, end = byte_range.partition('-') return int(end) - int(start) + 1 if '-content-encoding' in self.info and 'content-range' in self.info: # httplib2 rewrites content-length in the case of a compressed # transfer; we can't trust the content-length header in that # case, but we *can* trust content-range, if it's present. return ProcessContentRange(self.info['content-range']) elif 'content-length' in self.info: return int(self.info.get('content-length')) elif 'content-range' in self.info: return ProcessContentRange(self.info['content-range']) return len(self.content)
Return the length of this response. We expose this as an attribute since using len() directly can fail for responses larger than sys.maxint. Returns: Response length (as int or long)
def alignment_a(self): """Computes the rotation matrix that aligns the unit cell with the Cartesian axes, starting with cell vector a. * a parallel to x * b in xy-plane with b_y positive * c with c_z positive """ from molmod.transformations import Rotation new_x = self.matrix[:, 0].copy() new_x /= np.linalg.norm(new_x) new_z = np.cross(new_x, self.matrix[:, 1]) new_z /= np.linalg.norm(new_z) new_y = np.cross(new_z, new_x) new_y /= np.linalg.norm(new_y) return Rotation(np.array([new_x, new_y, new_z]))
Computes the rotation matrix that aligns the unit cell with the Cartesian axes, starting with cell vector a. * a parallel to x * b in xy-plane with b_y positive * c with c_z positive
def RegisterArtifact(self, artifact_rdfvalue, source="datastore", overwrite_if_exists=False, overwrite_system_artifacts=False): """Registers a new artifact.""" artifact_name = artifact_rdfvalue.name if artifact_name in self._artifacts: if not overwrite_if_exists: details = "artifact already exists and `overwrite_if_exists` is unset" raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details) elif not overwrite_system_artifacts: artifact_obj = self._artifacts[artifact_name] if not artifact_obj.loaded_from.startswith("datastore:"): # This artifact was not uploaded to the datastore but came from a # file, refuse to overwrite. details = "system artifact cannot be overwritten" raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details) # Preserve where the artifact was loaded from to help debugging. artifact_rdfvalue.loaded_from = source # Clear any stale errors. artifact_rdfvalue.error_message = None self._artifacts[artifact_rdfvalue.name] = artifact_rdfvalue
Registers a new artifact.
def _check_allowed_values(self, parameters): """ Check whether the given parameter value is allowed. Log messages into ``self.result``. :param dict parameters: the given parameters """ for key, allowed_values in self.ALLOWED_VALUES: self.log([u"Checking allowed values for parameter '%s'", key]) if key in parameters: value = parameters[key] if value not in allowed_values: self._failed(u"Parameter '%s' has value '%s' which is not allowed." % (key, value)) return self.log(u"Passed")
Check whether the given parameter value is allowed. Log messages into ``self.result``. :param dict parameters: the given parameters
def _body(self, paragraphs): """Generate a body of text""" body = [] for i in range(paragraphs): paragraph = self._paragraph(random.randint(1, 10)) body.append(paragraph) return '\n'.join(body)
Generate a body of text
def load_feedback(): """ Open existing feedback file """ result = {} if os.path.exists(_feedback_file): f = open(_feedback_file, 'r') cont = f.read() f.close() else: cont = '{}' try: result = json.loads(cont) if cont else {} except ValueError as e: result = {"result":"crash", "text":"Feedback file has been modified by user !"} return result
Open existing feedback file
def initialize(self, *args): """Initialize a recommender by resetting stored users and items. """ # number of observed users self.n_user = 0 # store user data self.users = {} # number of observed items self.n_item = 0 # store item data self.items = {}
Initialize a recommender by resetting stored users and items.
def dir(): """Return the list of patched function names. Used for patching functions imported from the module. """ dir = [ 'abspath', 'dirname', 'exists', 'expanduser', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'realpath', 'relpath', 'split', 'splitdrive' ] if IS_PY2: dir.append('walk') if sys.platform != 'win32' or not IS_PY2: dir.append('samefile') return dir
Return the list of patched function names. Used for patching functions imported from the module.
def is_string_dtype(arr_or_dtype): """ Check whether the provided array or dtype is of the string dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the string dtype. Examples -------- >>> is_string_dtype(str) True >>> is_string_dtype(object) True >>> is_string_dtype(int) False >>> >>> is_string_dtype(np.array(['a', 'b'])) True >>> is_string_dtype(pd.Series([1, 2])) False """ # TODO: gh-15585: consider making the checks stricter. def condition(dtype): return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype) return _is_dtype(arr_or_dtype, condition)
Check whether the provided array or dtype is of the string dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the string dtype. Examples -------- >>> is_string_dtype(str) True >>> is_string_dtype(object) True >>> is_string_dtype(int) False >>> >>> is_string_dtype(np.array(['a', 'b'])) True >>> is_string_dtype(pd.Series([1, 2])) False
def step(self, batch_size, ignore_stale_grad=False): """Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update. """ rescale_grad = self._scale / batch_size self._check_and_rescale_grad(rescale_grad) if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() self._allreduce_grads() self._update(ignore_stale_grad)
Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update.
def build_paths(self, end_entity_cert): """ Builds a list of ValidationPath objects from a certificate in the operating system trust store to the end-entity certificate :param end_entity_cert: A byte string of a DER or PEM-encoded X.509 certificate, or an instance of asn1crypto.x509.Certificate :return: A list of certvalidator.path.ValidationPath objects that represent the possible paths from the end-entity certificate to one of the CA certs. """ if not isinstance(end_entity_cert, byte_cls) and not isinstance(end_entity_cert, x509.Certificate): raise TypeError(pretty_message( ''' end_entity_cert must be a byte string or an instance of asn1crypto.x509.Certificate, not %s ''', type_name(end_entity_cert) )) if isinstance(end_entity_cert, byte_cls): if pem.detect(end_entity_cert): _, _, end_entity_cert = pem.unarmor(end_entity_cert) end_entity_cert = x509.Certificate.load(end_entity_cert) path = ValidationPath(end_entity_cert) paths = [] failed_paths = [] self._walk_issuers(path, paths, failed_paths) if len(paths) == 0: cert_name = end_entity_cert.subject.human_friendly missing_issuer_name = failed_paths[0].first.issuer.human_friendly raise PathBuildingError(pretty_message( ''' Unable to build a validation path for the certificate "%s" - no issuer matching "%s" was found ''', cert_name, missing_issuer_name )) return paths
Builds a list of ValidationPath objects from a certificate in the operating system trust store to the end-entity certificate :param end_entity_cert: A byte string of a DER or PEM-encoded X.509 certificate, or an instance of asn1crypto.x509.Certificate :return: A list of certvalidator.path.ValidationPath objects that represent the possible paths from the end-entity certificate to one of the CA certs.
def connect(host='localhost', port=21050, database=None, timeout=None, use_ssl=False, ca_cert=None, auth_mechanism='NOSASL', user=None, password=None, kerberos_service_name='impala', use_ldap=None, ldap_user=None, ldap_password=None, use_kerberos=None, protocol=None, krb_host=None): """Get a connection to HiveServer2 (HS2). These options are largely compatible with the impala-shell command line arguments. See those docs for more information. Parameters ---------- host : str The hostname for HS2. For Impala, this can be any of the `impalad`s. port : int, optional The port number for HS2. The Impala default is 21050. The Hive port is likely different. database : str, optional The default database. If `None`, the result is implementation-dependent. timeout : int, optional Connection timeout in seconds. Default is no timeout. use_ssl : bool, optional Enable SSL. ca_cert : str, optional Local path to the the third-party CA certificate. If SSL is enabled but the certificate is not specified, the server certificate will not be validated. auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'} Specify the authentication mechanism. `'NOSASL'` for unsecured Impala. `'PLAIN'` for unsecured Hive (because Hive requires the SASL transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with LDAP. user : str, optional LDAP user, if applicable. password : str, optional LDAP password, if applicable. kerberos_service_name : str, optional Authenticate to a particular `impalad` service principal. Uses `'impala'` by default. use_ldap : bool, optional Specify `auth_mechanism='LDAP'` instead. .. deprecated:: 0.11.0 ldap_user : str, optional Use `user` parameter instead. .. deprecated:: 0.11.0 ldap_password : str, optional Use `password` parameter instead. .. deprecated:: 0.11.0 use_kerberos : bool, optional Specify `auth_mechanism='GSSAPI'` instead. .. deprecated:: 0.11.0 protocol : str, optional Do not use. HiveServer2 is the only protocol currently supported. .. deprecated:: 0.11.0 Returns ------- HiveServer2Connection A `Connection` object (DB API 2.0-compliant). """ # pylint: disable=too-many-locals if use_kerberos is not None: warn_deprecate('use_kerberos', 'auth_mechanism="GSSAPI"') if use_kerberos: auth_mechanism = 'GSSAPI' if use_ldap is not None: warn_deprecate('use_ldap', 'auth_mechanism="LDAP"') if use_ldap: auth_mechanism = 'LDAP' if auth_mechanism: auth_mechanism = auth_mechanism.upper() else: auth_mechanism = 'NOSASL' if auth_mechanism not in AUTH_MECHANISMS: raise NotSupportedError( 'Unsupported authentication mechanism: {0}'.format(auth_mechanism)) if ldap_user is not None: warn_deprecate('ldap_user', 'user') user = ldap_user if ldap_password is not None: warn_deprecate('ldap_password', 'password') password = ldap_password if protocol is not None: if protocol.lower() == 'hiveserver2': warn_protocol_param() else: raise NotSupportedError( "'{0}' is not a supported protocol; only HiveServer2 is " "supported".format(protocol)) service = hs2.connect(host=host, port=port, timeout=timeout, use_ssl=use_ssl, ca_cert=ca_cert, user=user, password=password, kerberos_service_name=kerberos_service_name, auth_mechanism=auth_mechanism, krb_host=krb_host) return hs2.HiveServer2Connection(service, default_db=database)
Get a connection to HiveServer2 (HS2). These options are largely compatible with the impala-shell command line arguments. See those docs for more information. Parameters ---------- host : str The hostname for HS2. For Impala, this can be any of the `impalad`s. port : int, optional The port number for HS2. The Impala default is 21050. The Hive port is likely different. database : str, optional The default database. If `None`, the result is implementation-dependent. timeout : int, optional Connection timeout in seconds. Default is no timeout. use_ssl : bool, optional Enable SSL. ca_cert : str, optional Local path to the the third-party CA certificate. If SSL is enabled but the certificate is not specified, the server certificate will not be validated. auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'} Specify the authentication mechanism. `'NOSASL'` for unsecured Impala. `'PLAIN'` for unsecured Hive (because Hive requires the SASL transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with LDAP. user : str, optional LDAP user, if applicable. password : str, optional LDAP password, if applicable. kerberos_service_name : str, optional Authenticate to a particular `impalad` service principal. Uses `'impala'` by default. use_ldap : bool, optional Specify `auth_mechanism='LDAP'` instead. .. deprecated:: 0.11.0 ldap_user : str, optional Use `user` parameter instead. .. deprecated:: 0.11.0 ldap_password : str, optional Use `password` parameter instead. .. deprecated:: 0.11.0 use_kerberos : bool, optional Specify `auth_mechanism='GSSAPI'` instead. .. deprecated:: 0.11.0 protocol : str, optional Do not use. HiveServer2 is the only protocol currently supported. .. deprecated:: 0.11.0 Returns ------- HiveServer2Connection A `Connection` object (DB API 2.0-compliant).
def _jar_paths(): """Produce potential paths for an h2o.jar executable.""" # PUBDEV-3534 hook to use arbitrary h2o.jar own_jar = os.getenv("H2O_JAR_PATH", "") if own_jar != "": if not os.path.isfile(own_jar): raise H2OStartupError("Environment variable H2O_JAR_PATH is set to '%d' but file does not exists, unset environment variable or provide valid path to h2o.jar file." % own_jar) yield own_jar # Check if running from an h2o-3 src folder (or any subfolder), in which case use the freshly-built h2o.jar cwd_chunks = os.path.abspath(".").split(os.path.sep) for i in range(len(cwd_chunks), 0, -1): if cwd_chunks[i - 1] == "h2o-3": yield os.path.sep.join(cwd_chunks[:i] + ["build", "h2o.jar"]) # Then check the backend/bin folder: # (the following works assuming this code is located in h2o/backend/server.py file) backend_dir = os.path.split(os.path.realpath(__file__))[0] yield os.path.join(backend_dir, "bin", "h2o.jar") # Then try several old locations where h2o.jar might have been installed prefix1 = prefix2 = sys.prefix # On Unix-like systems Python typically gets installed into /Library/... or /System/Library/... If one of # those paths is sys.prefix, then we also build its counterpart. if prefix1.startswith(os.path.sep + "Library"): prefix2 = os.path.join("", "System", prefix1) elif prefix1.startswith(os.path.sep + "System"): prefix2 = prefix1[len(os.path.join("", "System")):] yield os.path.join(prefix1, "h2o_jar", "h2o.jar") yield os.path.join(os.path.abspath(os.sep), "usr", "local", "h2o_jar", "h2o.jar") yield os.path.join(prefix1, "local", "h2o_jar", "h2o.jar") yield os.path.join(get_config_var("userbase"), "h2o_jar", "h2o.jar") yield os.path.join(prefix2, "h2o_jar", "h2o.jar")
Produce potential paths for an h2o.jar executable.
def getUserInfo(self): """ Returns a dictionary of user info that google stores. """ userJson = self.httpGet(ReaderUrl.USER_INFO_URL) result = json.loads(userJson, strict=False) self.userId = result['userId'] return result
Returns a dictionary of user info that google stores.
def harmonic(y, **kwargs): '''Extract harmonic elements from an audio time-series. Parameters ---------- y : np.ndarray [shape=(n,)] audio time series kwargs : additional keyword arguments. See `librosa.decompose.hpss` for details. Returns ------- y_harmonic : np.ndarray [shape=(n,)] audio time series of just the harmonic portion See Also -------- hpss : Separate harmonic and percussive components percussive : Extract only the percussive component librosa.decompose.hpss : HPSS for spectrograms Examples -------- >>> # Extract harmonic component >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> y_harmonic = librosa.effects.harmonic(y) >>> # Use a margin > 1.0 for greater harmonic separation >>> y_harmonic = librosa.effects.harmonic(y, margin=3.0) ''' # Compute the STFT matrix stft = core.stft(y) # Remove percussives stft_harm = decompose.hpss(stft, **kwargs)[0] # Invert the STFTs y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y)) return y_harm
Extract harmonic elements from an audio time-series. Parameters ---------- y : np.ndarray [shape=(n,)] audio time series kwargs : additional keyword arguments. See `librosa.decompose.hpss` for details. Returns ------- y_harmonic : np.ndarray [shape=(n,)] audio time series of just the harmonic portion See Also -------- hpss : Separate harmonic and percussive components percussive : Extract only the percussive component librosa.decompose.hpss : HPSS for spectrograms Examples -------- >>> # Extract harmonic component >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> y_harmonic = librosa.effects.harmonic(y) >>> # Use a margin > 1.0 for greater harmonic separation >>> y_harmonic = librosa.effects.harmonic(y, margin=3.0)
def parse_changelog(path, **kwargs): """ Load and parse changelog file from ``path``, returning data structures. This function does not alter any files on disk; it is solely for introspecting a Releases ``changelog.rst`` and programmatically answering questions like "are there any unreleased bugfixes for the 2.3 line?" or "what was included in release 1.2.1?". For example, answering the above questions is as simple as:: changelog = parse_changelog("/path/to/changelog") print("Unreleased issues for 2.3.x: {}".format(changelog['2.3'])) print("Contents of v1.2.1: {}".format(changelog['1.2.1'])) Aside from the documented arguments, any additional keyword arguments are passed unmodified into an internal `get_doctree` call (which then passes them to `make_app`). :param str path: A relative or absolute file path string. :returns: A dict whose keys map to lists of ``releases.models.Issue`` objects, as follows: - Actual releases are full version number keys, such as ``"1.2.1"`` or ``"2.0.0"``. - Unreleased bugs (or bug-like issues; see the Releases docs) are stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``. - Unreleased features (or feature-like issues) are found in ``"unreleased_N_feature"``, where ``N`` is one of the major release families (so, a changelog spanning only 1.x will only have ``unreleased_1_feature``, whereas one with 1.x and 2.x releases will have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc). .. versionchanged:: 1.6 Added support for passing kwargs to `get_doctree`/`make_app`. """ app, doctree = get_doctree(path, **kwargs) # Have to semi-reproduce the 'find first bullet list' bit from main code, # which is unfortunately side-effect-heavy (thanks to Sphinx plugin # design). first_list = None for node in doctree[0]: if isinstance(node, bullet_list): first_list = node break # Initial parse into the structures Releases finds useful internally releases, manager = construct_releases(first_list.children, app) ret = changelog2dict(releases) # Stitch them together into something an end-user would find better: # - nuke unreleased_N.N_Y as their contents will be represented in the # per-line buckets for key in ret.copy(): if key.startswith('unreleased'): del ret[key] for family in manager: # - remove unreleased_bugfix, as they are accounted for in the per-line # buckets too. No need to store anywhere. manager[family].pop('unreleased_bugfix', None) # - bring over each major family's unreleased_feature as # unreleased_N_feature unreleased = manager[family].pop('unreleased_feature', None) if unreleased is not None: ret['unreleased_{}_feature'.format(family)] = unreleased # - bring over all per-line buckets from manager (flattening) # Here, all that's left in the per-family bucket should be lines, not # unreleased_* ret.update(manager[family]) return ret
Load and parse changelog file from ``path``, returning data structures. This function does not alter any files on disk; it is solely for introspecting a Releases ``changelog.rst`` and programmatically answering questions like "are there any unreleased bugfixes for the 2.3 line?" or "what was included in release 1.2.1?". For example, answering the above questions is as simple as:: changelog = parse_changelog("/path/to/changelog") print("Unreleased issues for 2.3.x: {}".format(changelog['2.3'])) print("Contents of v1.2.1: {}".format(changelog['1.2.1'])) Aside from the documented arguments, any additional keyword arguments are passed unmodified into an internal `get_doctree` call (which then passes them to `make_app`). :param str path: A relative or absolute file path string. :returns: A dict whose keys map to lists of ``releases.models.Issue`` objects, as follows: - Actual releases are full version number keys, such as ``"1.2.1"`` or ``"2.0.0"``. - Unreleased bugs (or bug-like issues; see the Releases docs) are stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``. - Unreleased features (or feature-like issues) are found in ``"unreleased_N_feature"``, where ``N`` is one of the major release families (so, a changelog spanning only 1.x will only have ``unreleased_1_feature``, whereas one with 1.x and 2.x releases will have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc). .. versionchanged:: 1.6 Added support for passing kwargs to `get_doctree`/`make_app`.
def add_sender_info( self, sender_txhash, nulldata_vin_outpoint, sender_out_data ): """ Record sender information in our block info. @sender_txhash: txid of the sender @nulldata_vin_outpoint: the 'vout' index from the nulldata tx input that this transaction funded """ assert sender_txhash in self.sender_info.keys(), "Missing sender info for %s" % sender_txhash assert nulldata_vin_outpoint in self.sender_info[sender_txhash], "Missing outpoint %s for sender %s" % (nulldata_vin_outpoint, sender_txhash) block_hash = self.sender_info[sender_txhash][nulldata_vin_outpoint]['block_hash'] relindex = self.sender_info[sender_txhash][nulldata_vin_outpoint]['relindex'] relinput_index = self.sender_info[sender_txhash][nulldata_vin_outpoint]['relinput'] value_in_satoshis = sender_out_data['value'] script_pubkey = sender_out_data['script'] script_info = bits.btc_tx_output_parse_script(script_pubkey) script_type = script_info['type'] addresses = script_info.get('addresses', []) sender_info = { "value": value_in_satoshis, "script_pubkey": script_pubkey, "script_type": script_type, "addresses": addresses, "nulldata_vin_outpoint": nulldata_vin_outpoint, "txid": sender_txhash, } # debit this tx's total value self.block_info[block_hash]['txns'][relindex]['fee'] += value_in_satoshis # remember this sender, but put it in the right place. # senders[i] must correspond to tx['vin'][i] self.block_info[block_hash]['txns'][relindex]['senders'][relinput_index] = sender_info self.block_info[block_hash]['num_senders'] += 1 return True
Record sender information in our block info. @sender_txhash: txid of the sender @nulldata_vin_outpoint: the 'vout' index from the nulldata tx input that this transaction funded
def distance(p_a, p_b): """ Euclidean distance, between two points Args: p_a (:obj:`Point`) p_b (:obj:`Point`) Returns: float: distance, in degrees """ return sqrt((p_a.lat - p_b.lat) ** 2 + (p_a.lon - p_b.lon) ** 2)
Euclidean distance, between two points Args: p_a (:obj:`Point`) p_b (:obj:`Point`) Returns: float: distance, in degrees
def latexsnippet(code, kvs, staffsize=17, initiallines=1): """Take in account key/values""" snippet = '' staffsize = int(kvs['staffsize']) if 'staffsize' in kvs \ else staffsize initiallines = int(kvs['initiallines']) if 'initiallines' in kvs \ else initiallines annotationsize = .5 * staffsize if 'mode' in kvs: snippet = ( "\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" % (annotationsize, annotationsize, kvs['mode']) ) + snippet if 'annotation' in kvs: snippet = ( "\\grechangedim{annotationseparation}{%s mm}{fixed}\n" "\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" % (staffsize / 60, annotationsize, annotationsize, kvs['annotation']) ) + snippet snippet = ( "\\gresetinitiallines{%s}\n" % initiallines + "\\grechangestaffsize{%s}\n" % staffsize + "\\grechangestyle{initial}{\\fontsize{%s}{%s}\\selectfont{}}" % (2.5 * staffsize, 2.5 * staffsize) ) + snippet snippet = "\\setlength{\\parskip}{0pt}\n" + snippet + code return snippet
Take in account key/values
def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st
Stringify time in ISO 8601 format.
def save_form(self, request, form, change): """Here we pluck out the data to create a new cloned repo. Form is an instance of NewRepoForm. """ name = form.cleaned_data['name'] origin_url = form.cleaned_data['origin_url'] res = ClonedRepo(name=name, origin=origin_url) LOG.info("New repo form produced %s" % str(res)) form.save(commit=False) return res
Here we pluck out the data to create a new cloned repo. Form is an instance of NewRepoForm.
def loglike(self, endog, mu, freq_weights=1., scale=1.): """ The log-likelihood in terms of the fitted mean response. Parameters ---------- endog : array-like Endogenous response variable mu : array-like Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional Scales the loglikelihood function. The default is 1. Returns ------- llf : float The value of the loglikelihood function evaluated at (endog,mu,freq_weights,scale) as defined below. """ if isinstance(self.link, L.Power) and self.link.power == 1: # This is just the loglikelihood for classical OLS nobs2 = endog.shape[0] / 2. SSR = np.sum((endog-self.fitted(mu))**2, axis=0) llf = -np.log(SSR) * nobs2 llf -= (1+np.log(np.pi/nobs2))*nobs2 return llf else: return np.sum(freq_weights * ((endog * mu - mu**2/2)/scale - endog**2/(2 * scale) - .5*np.log(2 * np.pi * scale)))
The log-likelihood in terms of the fitted mean response. Parameters ---------- endog : array-like Endogenous response variable mu : array-like Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional Scales the loglikelihood function. The default is 1. Returns ------- llf : float The value of the loglikelihood function evaluated at (endog,mu,freq_weights,scale) as defined below.
def _create_subplots(self, fig, layout): """ Create suplots and return axs """ num_panels = len(layout) axsarr = np.empty((self.nrow, self.ncol), dtype=object) # Create axes i = 1 for row in range(self.nrow): for col in range(self.ncol): axsarr[row, col] = fig.add_subplot(self.nrow, self.ncol, i) i += 1 # Rearrange axes # They are ordered to match the positions in the layout table if self.dir == 'h': order = 'C' if not self.as_table: axsarr = axsarr[::-1] elif self.dir == 'v': order = 'F' if not self.as_table: axsarr = np.array([row[::-1] for row in axsarr]) axs = axsarr.ravel(order) # Delete unused axes for ax in axs[num_panels:]: fig.delaxes(ax) axs = axs[:num_panels] return axs
Create suplots and return axs
def compile_template(instance, template, additionnal_context=None): """ Fill the given template with the instance's datas and return the odt file For every instance class, common values are also inserted in the context dict (and so can be used) : * config values :param obj instance: the instance of a model (like Userdatas, Company) :param template: the template object to use :param dict additionnal_context: A dict containing datas we'd like to add to the py3o compilation template :return: a stringIO object filled with the resulting odt's informations """ py3o_context = get_compilation_context(instance) if additionnal_context is not None: py3o_context.update(additionnal_context) output_doc = StringIO() odt_builder = Template(template, output_doc) odt_builder.render(py3o_context) return output_doc
Fill the given template with the instance's datas and return the odt file For every instance class, common values are also inserted in the context dict (and so can be used) : * config values :param obj instance: the instance of a model (like Userdatas, Company) :param template: the template object to use :param dict additionnal_context: A dict containing datas we'd like to add to the py3o compilation template :return: a stringIO object filled with the resulting odt's informations
def get_dbcollection_with_es(self, **kwargs): """ Get DB objects collection by first querying ES. """ es_objects = self.get_collection_es() db_objects = self.Model.filter_objects(es_objects) return db_objects
Get DB objects collection by first querying ES.
def _get_kwargs(profile=None, **connection_args): ''' get connection args ''' if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' look in connection_args first, then default to config file ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', 'ADMIN') tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0/') insecure = get('insecure', False) token = get('token') endpoint = get('endpoint', 'http://127.0.0.1:35357/v2.0') user_domain_name = get('user_domain_name', 'Default') project_domain_name = get('project_domain_name', 'Default') if token: kwargs = {'token': token, 'endpoint': endpoint} else: kwargs = {'username': user, 'password': password, 'tenant_name': tenant, 'tenant_id': tenant_id, 'auth_url': auth_url, 'user_domain_name': user_domain_name, 'project_domain_name': project_domain_name} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True return kwargs
get connection args
def get_url(cls, url, uid, **kwargs): """ Construct the URL for talking to an individual resource. http://myapi.com/api/resource/1 Args: url: The url for this resource uid: The unique identifier for an individual resource kwargs: Additional keyword argueents returns: final_url: The URL for this individual resource """ if uid: url = '{}/{}'.format(url, uid) else: url = url return cls._parse_url_and_validate(url)
Construct the URL for talking to an individual resource. http://myapi.com/api/resource/1 Args: url: The url for this resource uid: The unique identifier for an individual resource kwargs: Additional keyword argueents returns: final_url: The URL for this individual resource
def wr_txt(self, fout_txt): """Write to a file GOEA results in an ASCII text format.""" with open(fout_txt, 'w') as prt: for line in self.ver_list: prt.write("{LINE}\n".format(LINE=line)) self.prt_txt(prt) print(" WROTE: {TXT}".format(TXT=fout_txt))
Write to a file GOEA results in an ASCII text format.
def _find_cellid(self, code): """Determines the most similar cell (if any) to the specified code. It must have at least 50% overlap ratio and have been a loop-intercepted cell previously. Args: code (str): contents of the code cell that were executed. """ from difflib import SequenceMatcher maxvalue = 0. maxid = None for cellid, c in self.cellids.items(): matcher = SequenceMatcher(a=c, b=code) ratio = matcher.quick_ratio() if ratio > maxvalue and ratio > 0.5: maxid, maxvalue = cellid, ratio return maxid
Determines the most similar cell (if any) to the specified code. It must have at least 50% overlap ratio and have been a loop-intercepted cell previously. Args: code (str): contents of the code cell that were executed.
def updates(self): ''' Get the contents of ``_updates`` (all updates) and puts them in an Updates class to expose the list and summary functions. Returns: Updates: An instance of the Updates class with all updates for the system. .. code-block:: python import salt.utils.win_update wua = salt.utils.win_update.WindowsUpdateAgent() updates = wua.updates() # To get a list updates.list() # To get a summary updates.summary() ''' updates = Updates() found = updates.updates for update in self._updates: found.Add(update) return updates
Get the contents of ``_updates`` (all updates) and puts them in an Updates class to expose the list and summary functions. Returns: Updates: An instance of the Updates class with all updates for the system. .. code-block:: python import salt.utils.win_update wua = salt.utils.win_update.WindowsUpdateAgent() updates = wua.updates() # To get a list updates.list() # To get a summary updates.summary()
def getDefaultItems(self): """ Returns a list with the default plugins in the repo tree item registry. """ return [ RtiRegItem('HDF-5 file', 'argos.repo.rtiplugins.hdf5.H5pyFileRti', extensions=['hdf5', 'h5', 'h5e', 'he5', 'nc']), # hdf extension is for HDF-4 RtiRegItem('MATLAB file', 'argos.repo.rtiplugins.scipyio.MatlabFileRti', extensions=['mat']), RtiRegItem('NetCDF file', 'argos.repo.rtiplugins.ncdf.NcdfFileRti', #extensions=['nc', 'nc3', 'nc4']), extensions=['nc', 'nc4']), #extensions=[]), RtiRegItem('NumPy binary file', 'argos.repo.rtiplugins.numpyio.NumpyBinaryFileRti', extensions=['npy']), RtiRegItem('NumPy compressed file', 'argos.repo.rtiplugins.numpyio.NumpyCompressedFileRti', extensions=['npz']), RtiRegItem('NumPy text file', 'argos.repo.rtiplugins.numpyio.NumpyTextFileRti', #extensions=['txt', 'text']), extensions=['dat']), RtiRegItem('IDL save file', 'argos.repo.rtiplugins.scipyio.IdlSaveFileRti', extensions=['sav']), RtiRegItem('Pandas CSV file', 'argos.repo.rtiplugins.pandasio.PandasCsvFileRti', extensions=['csv']), RtiRegItem('Pillow image', 'argos.repo.rtiplugins.pillowio.PillowFileRti', extensions=['bmp', 'eps', 'im', 'gif', 'jpg', 'jpeg', 'msp', 'pcx', 'png', 'ppm', 'spi', 'tif', 'tiff', 'xbm', 'xv']), RtiRegItem('Wav file', 'argos.repo.rtiplugins.scipyio.WavFileRti', extensions=['wav'])]
Returns a list with the default plugins in the repo tree item registry.
def unregister_provider(self, provider): """ Unregister a provider. Blocks until this RpcConsumer is unregistered from its QueueConsumer, which only happens when all providers have asked to unregister. """ self._unregistering_providers.add(provider) remaining_providers = self._providers - self._unregistering_providers if not remaining_providers: _log.debug('unregistering from queueconsumer %s', self) self.queue_consumer.unregister_provider(self) _log.debug('unregistered from queueconsumer %s', self) self._unregistered_from_queue_consumer.send(True) _log.debug('waiting for unregister from queue consumer %s', self) self._unregistered_from_queue_consumer.wait() super(RpcConsumer, self).unregister_provider(provider)
Unregister a provider. Blocks until this RpcConsumer is unregistered from its QueueConsumer, which only happens when all providers have asked to unregister.
def load(dbname, dbmode='a'): """Load an existing hdf5 database. Return a Database instance. :Parameters: filename : string Name of the hdf5 database to open. mode : 'a', 'r' File mode : 'a': append, 'r': read-only. """ if dbmode == 'w': raise AttributeError("dbmode='w' not allowed for load.") db = Database(dbname, dbmode=dbmode) return db
Load an existing hdf5 database. Return a Database instance. :Parameters: filename : string Name of the hdf5 database to open. mode : 'a', 'r' File mode : 'a': append, 'r': read-only.
def remove_negativescores_nodes(self): """\ if there are elements inside our top node that have a negative gravity score, let's give em the boot """ gravity_items = self.parser.css_select(self.top_node, "*[gravityScore]") for item in gravity_items: score = self.parser.getAttribute(item, 'gravityScore') score = int(score, 0) if score < 1: item.getparent().remove(item)
\ if there are elements inside our top node that have a negative gravity score, let's give em the boot
def _parse_01(ofiles, individual=False): """ a subfunction for summarizing results """ ## parse results from outfiles cols = [] dats = [] for ofile in ofiles: ## parse file with open(ofile) as infile: dat = infile.read() lastbits = dat.split(".mcmc.txt\n\n")[1:] results = lastbits[0].split("\n\n")[0].split() ## get shape from ... shape = (((len(results) - 3) / 4), 4) dat = np.array(results[3:]).reshape(shape) cols.append(dat[:, 3].astype(float)) if not individual: ## get mean results across reps cols = np.array(cols) cols = cols.sum(axis=0) / len(ofiles) #10. dat[:, 3] = cols.astype(str) ## format as a DF df = pd.DataFrame(dat[:, 1:]) df.columns = ["delim", "prior", "posterior"] nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1) df["nspecies"] = nspecies return df else: ## get mean results across reps #return cols res = [] for i in xrange(len(cols)): x = dat x[:, 3] = cols[i].astype(str) x = pd.DataFrame(x[:, 1:]) x.columns = ['delim', 'prior', 'posterior'] nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1) x["nspecies"] = nspecies res.append(x) return res
a subfunction for summarizing results
def post(self, request, *args, **kwargs): """ Returns a token identifying the user in Centrifugo. """ current_timestamp = "%.0f" % time.time() user_id_str = u"{0}".format(request.user.id) token = generate_token(settings.CENTRIFUGE_SECRET, user_id_str, "{0}".format(current_timestamp), info="") # we get all the channels to which the user can subscribe participant = Participant.objects.get(id=request.user.id) # we use the threads as channels ids channels = [] for thread in Thread.managers.get_threads_where_participant_is_active(participant_id=participant.id): channels.append( build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, thread.id, thread.participants.all()) ) # we also have a channel to alert us about new threads threads_channel = build_channel(settings.CENTRIFUGO_THREAD_NAMESPACE, request.user.id, [request.user.id]) # he is the only one to have access to the channel channels.append(threads_channel) # we return the information to_return = { 'user': user_id_str, 'timestamp': current_timestamp, 'token': token, 'connection_url': "{0}connection/".format(settings.CENTRIFUGE_ADDRESS), 'channels': channels, 'debug': settings.DEBUG, } return HttpResponse(json.dumps(to_return), content_type='application/json; charset=utf-8')
Returns a token identifying the user in Centrifugo.
def _unique_by_email(users_and_watches): """Given a sequence of (User/EmailUser, [Watch, ...]) pairs clustered by email address (which is never ''), yield from each cluster a single pair like this:: (User/EmailUser, [Watch, Watch, ...]). The User/Email is that of... (1) the first incoming pair where the User has an email and is not anonymous, or, if there isn't such a user... (2) the first pair. The list of Watches consists of all those found in the cluster. Compares email addresses case-insensitively. """ def ensure_user_has_email(user, cluster_email): """Make sure the user in the user-watch pair has an email address. The caller guarantees us an email from either the user or the watch. If the passed-in user has no email, we return an EmailUser instead having the email address from the watch. """ # Some of these cases shouldn't happen, but we're tolerant. if not getattr(user, 'email', ''): user = EmailUser(cluster_email) return user # TODO: Do this instead with clever SQL that somehow returns just the # best row for each email. cluster_email = '' # email of current cluster favorite_user = None # best user in cluster so far watches = [] # all watches in cluster for u, w in users_and_watches: # w always has at least 1 Watch. All the emails are the same. row_email = u.email or w[0].email if cluster_email.lower() != row_email.lower(): # Starting a new cluster. if cluster_email != '': # Ship the favorites from the previous cluster: yield (ensure_user_has_email(favorite_user, cluster_email), watches) favorite_user, watches = u, [] cluster_email = row_email elif ((not favorite_user.email or not u.is_authenticated) and u.email and u.is_authenticated): favorite_user = u watches.extend(w) if favorite_user is not None: yield ensure_user_has_email(favorite_user, cluster_email), watches
Given a sequence of (User/EmailUser, [Watch, ...]) pairs clustered by email address (which is never ''), yield from each cluster a single pair like this:: (User/EmailUser, [Watch, Watch, ...]). The User/Email is that of... (1) the first incoming pair where the User has an email and is not anonymous, or, if there isn't such a user... (2) the first pair. The list of Watches consists of all those found in the cluster. Compares email addresses case-insensitively.
def adjustMask(self): """ Updates the alpha mask for this popup widget. """ if self.currentMode() == XPopupWidget.Mode.Dialog: self.clearMask() return path = self.borderPath() bitmap = QBitmap(self.width(), self.height()) bitmap.fill(QColor('white')) with XPainter(bitmap) as painter: painter.setRenderHint(XPainter.Antialiasing) pen = QPen(QColor('black')) pen.setWidthF(0.75) painter.setPen(pen) painter.setBrush(QColor('black')) painter.drawPath(path) self.setMask(bitmap)
Updates the alpha mask for this popup widget.
def create_cherry_pick(self, cherry_pick_to_create, project, repository_id): """CreateCherryPick. [Preview API] Cherry pick a specific commit or commits that are associated to a pull request into a new branch. :param :class:`<GitAsyncRefOperationParameters> <azure.devops.v5_0.git.models.GitAsyncRefOperationParameters>` cherry_pick_to_create: :param str project: Project ID or project name :param str repository_id: ID of the repository. :rtype: :class:`<GitCherryPick> <azure.devops.v5_0.git.models.GitCherryPick>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') content = self._serialize.body(cherry_pick_to_create, 'GitAsyncRefOperationParameters') response = self._send(http_method='POST', location_id='033bad68-9a14-43d1-90e0-59cb8856fef6', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('GitCherryPick', response)
CreateCherryPick. [Preview API] Cherry pick a specific commit or commits that are associated to a pull request into a new branch. :param :class:`<GitAsyncRefOperationParameters> <azure.devops.v5_0.git.models.GitAsyncRefOperationParameters>` cherry_pick_to_create: :param str project: Project ID or project name :param str repository_id: ID of the repository. :rtype: :class:`<GitCherryPick> <azure.devops.v5_0.git.models.GitCherryPick>`
def delete(self, upload_id): """ Deletes an upload by ID. """ return super(UploadsProxy, self).delete(upload_id, file_upload=True)
Deletes an upload by ID.
def read_data(self, size): """Receive data from the device. If the read fails for any reason, an :obj:`IOError` exception is raised. :param size: the number of bytes to read. :type size: int :return: the data received. :rtype: list(int) """ result = self.dev.bulkRead(0x81, size, timeout=1200) if not result or len(result) < size: raise IOError('pywws.device_libusb1.USBDevice.read_data failed') # Python2 libusb1 version 1.5 and earlier returns a string if not isinstance(result[0], int): result = map(ord, result) return list(result)
Receive data from the device. If the read fails for any reason, an :obj:`IOError` exception is raised. :param size: the number of bytes to read. :type size: int :return: the data received. :rtype: list(int)
def dereference_object(object_type, object_uuid, status): """Show linked persistent identifier(s).""" from .models import PersistentIdentifier pids = PersistentIdentifier.query.filter_by( object_type=object_type, object_uuid=object_uuid ) if status: pids = pids.filter_by(status=status) for found_pid in pids.all(): click.echo( '{0.pid_type} {0.pid_value} {0.pid_provider}'.format(found_pid) )
Show linked persistent identifier(s).
def descriptionHtml(self): """ HTML help describing the class. For use in the detail editor. """ if self.cls is None: return None elif hasattr(self.cls, 'descriptionHtml'): return self.cls.descriptionHtml() else: return ''
HTML help describing the class. For use in the detail editor.
def search_process_log(self, pid, filter={}, start=0, limit=1000): ''' search_process_log(self, pid, filter={}, start=0, limit=1000) Search in process logs :Parameters: * *pid* (`string`) -- Identifier of an existing process * *start* (`int`) -- start index to retrieve from. Default is 0 * *limit* (`int`) -- maximum number of entities to retrieve. Default is 100 * *filter* (`object`) -- free text search pattern (checks in process log data) :return: Count of records found and list of search results or empty list :Example: .. code-block:: python filter = {'generic': 'my product param'} search_result = opereto_client.search_globals(filter=filter) if search_result['total'] > 0 print(search_result['list']) ''' pid = self._get_pid(pid) request_data = {'start': start, 'limit': limit, 'filter': filter} return self._call_rest_api('post', '/processes/' + pid + '/log/search', data=request_data, error='Failed to search in process log')
search_process_log(self, pid, filter={}, start=0, limit=1000) Search in process logs :Parameters: * *pid* (`string`) -- Identifier of an existing process * *start* (`int`) -- start index to retrieve from. Default is 0 * *limit* (`int`) -- maximum number of entities to retrieve. Default is 100 * *filter* (`object`) -- free text search pattern (checks in process log data) :return: Count of records found and list of search results or empty list :Example: .. code-block:: python filter = {'generic': 'my product param'} search_result = opereto_client.search_globals(filter=filter) if search_result['total'] > 0 print(search_result['list'])
def get_player(self, *tags: crtag, **params: keys): """Get a player information Parameters ---------- \*tags: str Valid player tags. Minimum length: 3 Valid characters: 0289PYLQGRJCUV \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.PLAYER + '/' + ','.join(tags) return self._get_model(url, FullPlayer, **params)
Get a player information Parameters ---------- \*tags: str Valid player tags. Minimum length: 3 Valid characters: 0289PYLQGRJCUV \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
def string_chain(text, filters): """ Chain several filters after each other, applies the filter on the entire string :param text: String to format :param filters: Sequence of filters to apply on String :return: The formatted String """ if filters is None: return text for filter_function in filters: text = filter_function(text) return text
Chain several filters after each other, applies the filter on the entire string :param text: String to format :param filters: Sequence of filters to apply on String :return: The formatted String
def read_file(self): """ Open the file and assiging the permission to read/write and return the content in json formate. Return : json data """ file_obj = open(self.file, 'r') content = file_obj.read() file_obj.close() if content: content = json.loads(content) return content else: return {}
Open the file and assiging the permission to read/write and return the content in json formate. Return : json data
def get_default_config(self): """ Returns the default collector settings """ default_config = super(FlumeCollector, self).get_default_config() default_config['path'] = 'flume' default_config['req_host'] = 'localhost' default_config['req_port'] = 41414 default_config['req_path'] = '/metrics' return default_config
Returns the default collector settings
def get_application(*args): ''' Returns a WSGI application function. If you supply the WSGI app and config it will use that, otherwise it will try to obtain them from a local Salt installation ''' opts_tuple = args def wsgi_app(environ, start_response): root, _, conf = opts_tuple or bootstrap_app() cherrypy.config.update({'environment': 'embedded'}) cherrypy.tree.mount(root, '/', conf) return cherrypy.tree(environ, start_response) return wsgi_app
Returns a WSGI application function. If you supply the WSGI app and config it will use that, otherwise it will try to obtain them from a local Salt installation
def loop_exit_label(self, loop_type): """ Returns the label for the given loop type which exits the loop. loop_type must be one of 'FOR', 'WHILE', 'DO' """ for i in range(len(self.LOOPS) - 1, -1, -1): if loop_type == self.LOOPS[i][0]: return self.LOOPS[i][1] raise InvalidLoopError(loop_type)
Returns the label for the given loop type which exits the loop. loop_type must be one of 'FOR', 'WHILE', 'DO'
def process_create_ex(self, executable, arguments, environment_changes, flags, timeout_ms, priority, affinity): """Creates a new process running in the guest with the extended options for setting the process priority and affinity. See :py:func:`IGuestSession.process_create` for more information. in executable of type str Full path to the file to execute in the guest. The file has to exists in the guest VM with executable right to the session user in order to succeed. If empty/null, the first entry in the @a arguments array will be used instead (i.e. argv[0]). in arguments of type str Array of arguments passed to the new process. Starting with VirtualBox 5.0 this array starts with argument 0 instead of argument 1 as in previous versions. Whether the zeroth argument can be passed to the guest depends on the VBoxService version running there. If you depend on this, check that the :py:func:`IGuestSession.protocol_version` is 3 or higher. in environment_changes of type str Set of environment changes to complement :py:func:`IGuestSession.environment_changes` . Takes precedence over the session ones. The changes are in putenv format, i.e. "VAR=VALUE" for setting and "VAR" for unsetting. The changes are applied to the base environment of the impersonated guest user (:py:func:`IGuestSession.environment_base` ) when creating the process. (This is done on the guest side of things in order to be compatible with older guest additions. That is one of the motivations for not passing in the whole environment here.) in flags of type :class:`ProcessCreateFlag` Process creation flags, see :py:class:`ProcessCreateFlag` for detailed description of available flags. in timeout_ms of type int Timeout (in ms) for limiting the guest process' running time. Pass 0 for an infinite timeout. On timeout the guest process will be killed and its status will be put to an appropriate value. See :py:class:`ProcessStatus` for more information. in priority of type :class:`ProcessPriority` Process priority to use for execution, see :py:class:`ProcessPriority` for available priority levels. This is silently ignored if not supported by guest additions. in affinity of type int Processor affinity to set for the new process. This is a list of guest CPU numbers the process is allowed to run on. This is silently ignored if the guest does not support setting the affinity of processes, or if the guest additions does not implemet this feature. return guest_process of type :class:`IGuestProcess` Guest process object of the newly created process. """ if not isinstance(executable, basestring): raise TypeError("executable can only be an instance of type basestring") if not isinstance(arguments, list): raise TypeError("arguments can only be an instance of type list") for a in arguments[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(environment_changes, list): raise TypeError("environment_changes can only be an instance of type list") for a in environment_changes[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(flags, list): raise TypeError("flags can only be an instance of type list") for a in flags[:10]: if not isinstance(a, ProcessCreateFlag): raise TypeError( "array can only contain objects of type ProcessCreateFlag") if not isinstance(timeout_ms, baseinteger): raise TypeError("timeout_ms can only be an instance of type baseinteger") if not isinstance(priority, ProcessPriority): raise TypeError("priority can only be an instance of type ProcessPriority") if not isinstance(affinity, list): raise TypeError("affinity can only be an instance of type list") for a in affinity[:10]: if not isinstance(a, baseinteger): raise TypeError( "array can only contain objects of type baseinteger") guest_process = self._call("processCreateEx", in_p=[executable, arguments, environment_changes, flags, timeout_ms, priority, affinity]) guest_process = IGuestProcess(guest_process) return guest_process
Creates a new process running in the guest with the extended options for setting the process priority and affinity. See :py:func:`IGuestSession.process_create` for more information. in executable of type str Full path to the file to execute in the guest. The file has to exists in the guest VM with executable right to the session user in order to succeed. If empty/null, the first entry in the @a arguments array will be used instead (i.e. argv[0]). in arguments of type str Array of arguments passed to the new process. Starting with VirtualBox 5.0 this array starts with argument 0 instead of argument 1 as in previous versions. Whether the zeroth argument can be passed to the guest depends on the VBoxService version running there. If you depend on this, check that the :py:func:`IGuestSession.protocol_version` is 3 or higher. in environment_changes of type str Set of environment changes to complement :py:func:`IGuestSession.environment_changes` . Takes precedence over the session ones. The changes are in putenv format, i.e. "VAR=VALUE" for setting and "VAR" for unsetting. The changes are applied to the base environment of the impersonated guest user (:py:func:`IGuestSession.environment_base` ) when creating the process. (This is done on the guest side of things in order to be compatible with older guest additions. That is one of the motivations for not passing in the whole environment here.) in flags of type :class:`ProcessCreateFlag` Process creation flags, see :py:class:`ProcessCreateFlag` for detailed description of available flags. in timeout_ms of type int Timeout (in ms) for limiting the guest process' running time. Pass 0 for an infinite timeout. On timeout the guest process will be killed and its status will be put to an appropriate value. See :py:class:`ProcessStatus` for more information. in priority of type :class:`ProcessPriority` Process priority to use for execution, see :py:class:`ProcessPriority` for available priority levels. This is silently ignored if not supported by guest additions. in affinity of type int Processor affinity to set for the new process. This is a list of guest CPU numbers the process is allowed to run on. This is silently ignored if the guest does not support setting the affinity of processes, or if the guest additions does not implemet this feature. return guest_process of type :class:`IGuestProcess` Guest process object of the newly created process.
def search_dashboard_for_facets(self, **kwargs): # noqa: E501 """Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_dashboard_for_facets(async_req=True) >>> result = thread.get() :param async_req bool :param FacetsSearchRequestContainer body: :return: ResponseContainerFacetsResponseContainer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501 return data
Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_dashboard_for_facets(async_req=True) >>> result = thread.get() :param async_req bool :param FacetsSearchRequestContainer body: :return: ResponseContainerFacetsResponseContainer If the method is called asynchronously, returns the request thread.
def agg(self, func, *fields, **name): """ Calls the aggregation function `func` on each group in the GroubyTable, and leaves the results in a new column with the name of the aggregation function. Call `.agg` with `name='desired_column_name' to choose a column name for this aggregation. """ if name: if len(name) > 1 or 'name' not in name: raise TypeError("Unknown keyword args passed into `agg`: %s" % name) name = name.get('name') if not isinstance(name, basestring): raise TypeError("Column names must be strings, not `%s`" % type(name)) else: name = name elif func.__name__ == '<lambda>': name = "lambda%04d" % self.__lambda_num self.__lambda_num += 1 name += "(%s)" % ','.join(fields) else: name = func.__name__ name += "(%s)" % ','.join(fields) aggregated_column = [] if len(fields) > 1: for groupkey in self.__grouptable['groupkey']: agg_data = [tuple([row[field] for field in fields]) for row in self.__key_to_group_map[groupkey]] aggregated_column.append(func(agg_data)) elif len(fields) == 1: field = fields[0] for groupkey in self.__grouptable['groupkey']: agg_data = [row[field] for row in self.__key_to_group_map[groupkey]] aggregated_column.append(func(agg_data)) else: for groupkey in self.__grouptable['groupkey']: agg_data = self.__key_to_group_map[groupkey] aggregated_column.append(func(agg_data)) self.__grouptable[name] = aggregated_column return self
Calls the aggregation function `func` on each group in the GroubyTable, and leaves the results in a new column with the name of the aggregation function. Call `.agg` with `name='desired_column_name' to choose a column name for this aggregation.
def write_serializable_array(self, array): """ Write an array of serializable objects to the stream. Args: array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin """ if array is None: self.write_byte(0) else: self.write_var_int(len(array)) for item in array: item.Serialize(self)
Write an array of serializable objects to the stream. Args: array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin
def _read_structure_attributes(f): """ function to read information from a PEST-style structure file Parameters ---------- f : (file handle) file handle open for reading Returns ------- nugget : float the GeoStruct nugget transform : str the GeoStruct transformation variogram_info : dict dictionary of structure-level variogram information """ line = '' variogram_info = {} while "end structure" not in line: line = f.readline() if line == '': raise Exception("EOF while reading structure") line = line.strip().lower().split() if line[0].startswith('#'): continue if line[0] == "nugget": nugget = float(line[1]) elif line[0] == "transform": transform = line[1] elif line[0] == "numvariogram": numvariograms = int(line[1]) elif line[0] == "variogram": variogram_info[line[1]] = float(line[2]) elif line[0] == "end": break elif line[0] == "mean": warnings.warn("'mean' attribute not supported, skipping",PyemuWarning) else: raise Exception("unrecognized line in structure definition:{0}".\ format(line[0])) assert numvariograms == len(variogram_info) return nugget,transform,variogram_info
function to read information from a PEST-style structure file Parameters ---------- f : (file handle) file handle open for reading Returns ------- nugget : float the GeoStruct nugget transform : str the GeoStruct transformation variogram_info : dict dictionary of structure-level variogram information
def create_metric(metric_type, metric_id, data): """ Create Hawkular-Metrics' submittable structure. :param metric_type: MetricType to be matched (required) :param metric_id: Exact string matching metric id :param data: A datapoint or a list of datapoints created with create_datapoint(value, timestamp, tags) """ if not isinstance(data, list): data = [data] return { 'type': metric_type,'id': metric_id, 'data': data }
Create Hawkular-Metrics' submittable structure. :param metric_type: MetricType to be matched (required) :param metric_id: Exact string matching metric id :param data: A datapoint or a list of datapoints created with create_datapoint(value, timestamp, tags)
def unseal(self, data, return_options=False): '''Unseal data''' data = self._remove_magic(data) data = urlsafe_nopadding_b64decode(data) options = self._read_header(data) data = self._add_magic(data) data = self._unsign_data(data, options) data = self._remove_magic(data) data = self._remove_header(data, options) data = self._decrypt_data(data, options) data = self._decompress_data(data, options) data = self._unserialize_data(data, options) if return_options: return data, options else: return data
Unseal data
def open(filepath, edit_local=False): """Open any wt5 file, returning the top-level object (data or collection). Parameters ---------- filepath : path-like Path to file. Can be either a local or remote file (http/ftp). Can be compressed with gz/bz2, decompression based on file name. edit_local : boolean (optional) If True, the file itself will be opened for editing. Otherwise, a copy will be created. Default is False. Returns ------- WrightTools Collection or Data Root-level object in file. """ filepath = os.fspath(filepath) ds = np.DataSource(None) if edit_local is False: tf = tempfile.mkstemp(prefix="", suffix=".wt5") with _open(tf[1], "w+b") as tff: with ds.open(str(filepath), "rb") as f: tff.write(f.read()) filepath = tf[1] f = h5py.File(filepath) class_name = f["/"].attrs["class"] name = f["/"].attrs["name"] if class_name == "Data": obj = wt_data.Data(filepath=str(filepath), name=name, edit_local=True) elif class_name == "Collection": obj = wt_collection.Collection(filepath=str(filepath), name=name, edit_local=True) else: obj = wt_group.Group(filepath=str(filepath), name=name, edit_local=True) if edit_local is False: setattr(obj, "_tmpfile", tf) weakref.finalize(obj, obj.close) return obj
Open any wt5 file, returning the top-level object (data or collection). Parameters ---------- filepath : path-like Path to file. Can be either a local or remote file (http/ftp). Can be compressed with gz/bz2, decompression based on file name. edit_local : boolean (optional) If True, the file itself will be opened for editing. Otherwise, a copy will be created. Default is False. Returns ------- WrightTools Collection or Data Root-level object in file.
def info_community(self,teamid): '''Get comunity info using a ID''' headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/standings.phtml',"User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/teamInfo.phtml?tid='+teamid,headers=headers).content soup = BeautifulSoup(req) info = [] for i in soup.find('table',cellpadding=2).find_all('tr')[1:]: info.append('%s\t%s\t%s\t%s\t%s'%(i.find('td').text,i.find('a')['href'].split('pid=')[1],i.a.text,i.find_all('td')[2].text,i.find_all('td')[3].text)) return info
Get comunity info using a ID
def validate_image_size(image): """ Validate that a particular image size. """ config = get_app_config() valid_max_image_size_in_bytes = config.valid_max_image_size * 1024 if config and not image.size <= valid_max_image_size_in_bytes: raise ValidationError( _("The logo image file size must be less than or equal to %s KB.") % config.valid_max_image_size)
Validate that a particular image size.
def parse_manifest(self, manifest_xml): """ Parse manifest xml file :type manifest_xml: str :param manifest_xml: raw xml content of manifest file """ manifest = dict() try: mdata = xmltodict.parse(manifest_xml)['modules']['module'] for module in mdata: mod = dict() mod['type'] = module['@type'] mod['name'] = module['name'] mod['arch'] = module['arch'] mod['checksum'] = module['checksum'] mod['version'] = module['version'] mod['packager'] = module['packager'] mod['location'] = module['location']['@href'] mod['signature'] = module['signature']['@href'] mod['platform'] = module['platform'] manifest[mod['version']] = mod except Exception as e: raise return manifest
Parse manifest xml file :type manifest_xml: str :param manifest_xml: raw xml content of manifest file
def _initialize_plugin_system(self) -> None: """Initialize the plugin system""" self._preloop_hooks = [] self._postloop_hooks = [] self._postparsing_hooks = [] self._precmd_hooks = [] self._postcmd_hooks = [] self._cmdfinalization_hooks = []
Initialize the plugin system
def delete_ip_address(context, id): """Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete. """ LOG.info("delete_ip_address %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): ip_address = db_api.ip_address_find( context, id=id, scope=db_api.ONE) if not ip_address or ip_address.deallocated: raise q_exc.IpAddressNotFound(addr_id=id) iptype = ip_address.address_type if iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip: raise n_exc.BadRequest( resource="ip_addresses", msg="Fixed ips cannot be updated using this interface.") if ip_address.has_any_shared_owner(): raise q_exc.PortRequiresDisassociation() db_api.update_port_associations_for_ip(context, [], ip_address) ipam_driver.deallocate_ip_address(context, ip_address)
Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete.
def renders(self, template_content, context=None, at_paths=None, at_encoding=anytemplate.compat.ENCODING, **kwargs): """ :param template_content: Template content :param context: A dict or dict-like object to instantiate given template file or None :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string """ kwargs = self.filter_options(kwargs, self.render_valid_options()) paths = anytemplate.utils.mk_template_paths(None, at_paths) if context is None: context = {} LOGGER.debug("Render template %s... %s context, options=%s", template_content[:10], "without" if context is None else "with a", str(kwargs)) return self.renders_impl(template_content, context, at_paths=paths, at_encoding=at_encoding, **kwargs)
:param template_content: Template content :param context: A dict or dict-like object to instantiate given template file or None :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string
def p_version_def(t): """version_def : VERSION ID LBRACE procedure_def procedure_def_list RBRACE EQUALS constant SEMI""" global name_dict id = t[2] value = t[8] lineno = t.lineno(1) if id_unique(id, 'version', lineno): name_dict[id] = const_info(id, value, lineno)
version_def : VERSION ID LBRACE procedure_def procedure_def_list RBRACE EQUALS constant SEMI