Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
371,400
def address_offset(self): if self.inst.is_array: if self.current_idx is None: raise ValueError("Index of array element must be known to derive address") idx = 0 for i in range(len(self.current_idx)): sz = 1 for j in range(i+1, len(self.inst.array_dimensions)): sz *= self.inst.array_dimensions[j] idx += sz * self.current_idx[i] offset = self.inst.addr_offset + idx * self.inst.array_stride else: offset = self.inst.addr_offset return offset
Byte address offset of this node relative to it's parent If this node is an array, it's index must be known Raises ------ ValueError If this property is referenced on a node whose array index is not fully defined
371,401
def oauth_login(self, provider, id_column, id, attrs, defaults, redirect_url=None): user = self.query.filter(**dict([(id_column, id)])).first() if not redirect_url: redirect_url = request.args.get() or url_for(self.options["redirect_after_login"]) if self.logged_in(): if user and user != self.current: if self.options["oauth_user_already_exists_message"]: flash(self.options["oauth_user_already_exists_message"].format(provider=provider), "error") return redirect(redirect_url) if provider not in self.current.auth_providers: self.current.auth_providers.append(provider) current_app.features.models.save(self.current, **attrs) elif not user: return self.oauth_signup(provider, attrs, defaults, redirect_url=redirect_url) else: self.login(user, provider=provider, **attrs) return redirect(redirect_url)
Execute a login via oauth. If no user exists, oauth_signup() will be called
371,402
def log(cls, event=None, actor=None, data=None): from cloud_inquisitor.log import auditlog auditlog(event=event, actor=actor, data=data)
Generate and insert a new event Args: event (str): Action performed actor (str): Actor (user or subsystem) triggering the event data (dict): Any extra data necessary for describing the event Returns: `None`
371,403
def request(self): headers = {: } if self.api_key: headers[] = self.api_key return requests,headers else: if self.token: return OAuth2Session(self.client_id, token=self.token),headers else: raise APIError("No API key and no OAuth session available")
Returns an OAuth2 Session to be used to make requests. Returns None if a token hasn't yet been received.
371,404
def push(package, is_public=False, is_team=False, reupload=False, hash=None): team, owner, pkg, subpath = parse_package(package, allow_subpath=True) _check_team_id(team) session = _get_session(team) store, pkgroot = PackageStore.find_package(team, owner, pkg, pkghash=hash) if pkgroot is None: raise CommandException("Package {package} not found.".format(package=package)) pkghash = hash_contents(pkgroot) if hash is not None: assert pkghash == hash contents = pkgroot for component in subpath: try: contents = contents.children[component] except (AttributeError, KeyError): raise CommandException("Invalid subpath: %r" % component) def _push_package(dry_run=False, sizes=dict()): data = json.dumps(dict( dry_run=dry_run, is_public=is_public, is_team=is_team, contents=contents, description="", sizes=sizes ), default=encode_node) compressed_data = gzip_compress(data.encode()) if subpath: return session.post( "{url}/api/package_update/{owner}/{pkg}/{subpath}".format( url=get_registry_url(team), owner=owner, pkg=pkg, subpath=.join(subpath) ), data=compressed_data, headers={ : } ) else: return session.put( "{url}/api/package/{owner}/{pkg}/{hash}".format( url=get_registry_url(team), owner=owner, pkg=pkg, hash=pkghash ), data=compressed_data, headers={ : } ) print("Fetching upload URLs from the registry...") resp = _push_package(dry_run=True) obj_urls = resp.json()[] assert set(obj_urls) == set(find_object_hashes(contents)) obj_sizes = { obj_hash: os.path.getsize(store.object_path(obj_hash)) for obj_hash in obj_urls } success = upload_fragments(store, obj_urls, obj_sizes, reupload=reupload) if not success: raise CommandException("Failed to upload fragments") print("Uploading package metadata...") resp = _push_package(sizes=obj_sizes) package_url = resp.json()[] if not subpath: print("Updating the tag...") session.put( "{url}/api/tag/{owner}/{pkg}/{tag}".format( url=get_registry_url(team), owner=owner, pkg=pkg, tag=LATEST_TAG ), data=json.dumps(dict( hash=pkghash )) ) print("Push complete. %s is live:\n%s" % (package, package_url))
Push a Quilt data package to the server
371,405
def _parse_status(self, output): parsed = self._parse_machine_readable_output(output) statuses = [] for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]): info = {kind: data for timestamp, _, kind, data in tuples} status = Status(name=target, state=info.get(), provider=info.get()) statuses.append(status) return statuses
Unit testing is so much easier when Vagrant is removed from the equation.
371,406
def find_genome_length(self): for sample in self.metadata: sample[self.analysistype].genome_length = sum(sample[self.analysistype].contig_lengths)
Determine the total length of all the contigs for each strain
371,407
def _set_formatter(self): if hasattr(self._config, "formatter") and self._config.formatter == "json": self._formatter = "json" else: self._formatter = "text"
Inspects config and sets the name of the formatter to either "json" or "text" as instance attr. If not present in config, default is "text"
371,408
def set_nest_transactions_with_savepoints(self, nest_transactions_with_savepoints): if self._transaction_nesting_level > 0: raise DBALConnectionError.may_not_alter_nested_transaction_with_savepoints_in_transaction() if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self._nest_transactions_with_savepoints = bool(nest_transactions_with_savepoints)
Sets if nested transactions should use savepoints. :param nest_transactions_with_savepoints: `True` or `False`
371,409
def _run_runner(self): import salt.minion ret = {} low = {: self.opts[]} try: async_pub = self._gen_async_pub() self.jid = async_pub[] fun_args = salt.utils.args.parse_input( self.opts[], no_parse=self.opts.get(, [])) verify_fun(self.functions, low[]) args, kwargs = salt.minion.load_args_and_kwargs( self.functions[low[]], fun_args) low[] = args low[] = kwargs if self.opts.get(): if in self.opts: try: with salt.utils.files.fopen(os.path.join(self.opts[], ), ) as fp_: low[] = salt.utils.stringutils.to_unicode(fp_.readline()) except IOError: low[] = self.opts[] import salt.auth resolver = salt.auth.Resolver(self.opts) res = resolver.cli(self.opts[]) if self.opts[] and res: tok = resolver.token_cli( self.opts[], res ) if tok: low[] = tok.get(, ) if not res: log.error() return ret low.update(res) low[] = self.opts[] else: user = salt.utils.user.get_specific_user() if low[] in [, , ]: low[][] = async_pub[] if self.opts.get(, False): if self.opts.get(): async_pub = self.cmd_async(low) else: async_pub = self.asynchronous(self.opts[], low, user=user, pub=async_pub) log.warning( , async_pub[] ) return async_pub[] if self.opts.get(): ret = self.cmd_sync(low) if isinstance(ret, dict) and set(ret) == {, }: outputter = ret[] ret = ret[] else: outputter = None display_output(ret, outputter, self.opts) else: ret = self._proc_function(self.opts[], low, user, async_pub[], async_pub[], daemonize=False) except salt.exceptions.SaltException as exc: evt = salt.utils.event.get_event(, opts=self.opts) evt.fire_event({: False, : .format(exc), : 254, : self.opts[], : fun_args, : self.jid}, tag=.format(self.jid)) if in low: ret = self.get_docs(.format(low[])) else: ret = None if not ret: ret = { : salt.defaults.exitcodes.EX_SOFTWARE, } log.debug(, ret) return ret
Actually execute specific runner :return:
371,410
def convert_all(cls, records): out = ["<collection>"] for rec in records: conversion = cls(rec) out.append(conversion.convert()) out.append("</collection>") return "\n".join(out)
Convert the list of bibrecs into one MARCXML. >>> from harvestingkit.bibrecord import BibRecordPackage >>> from harvestingkit.inspire_cds_package import Inspire2CDS >>> bibrecs = BibRecordPackage("inspire.xml") >>> bibrecs.parse() >>> xml = Inspire2CDS.convert_all(bibrecs.get_records()) :param records: list of BibRecord dicts :type records: list :returns: MARCXML as string
371,411
def varify_user_lock(repository_path, session_token): with open(cpjoin(repository_path, ), ) as fd2: content = fd2.read() if len(content) == 0: return False try: res = json.loads(content) except ValueError: return False return res[] == session_token and int(time.time()) < int(res[]) return False
Verify that a returning user has a valid token and their lock has not expired
371,412
def is_pickle_file(abspath): abspath = abspath.lower() fname, ext = os.path.splitext(abspath) if ext in [".pickle", ".pk", ".p"]: is_pickle = True elif ext == ".gz": is_pickle = False elif ext == ".tmp": return is_pickle_file(fname) else: raise PickleExtError( " is not a valid pickle file. " "extension has to be for uncompressed, " "for compressed." % abspath) return is_pickle
Parse file extension. - *.pickle: uncompressed, utf-8 encode pickle file - *.gz: compressed, utf-8 encode pickle file
371,413
def cmd_log(self, reopen=False, rotate=False): cmd = b if reopen: cmd += b if rotate: cmd += b return self.send_command(cmd)
Allows managing of uWSGI log related stuff :param bool reopen: Reopen log file. Could be required after third party rotation. :param bool rotate: Trigger built-in log rotation.
371,414
def _which_display(self, log: str, output: str) -> HTML: lines = re.split(r, log) i = 0 elog = [] for line in lines: i += 1 e = [] if line.startswith(): logger.debug("In ERROR Condition") e = lines[(max(i - 15, 0)):(min(i + 16, len(lines)))] elog = elog + e tlog = .join(elog) logger.debug("elog count: " + str(len(elog))) logger.debug("tlog: " + str(tlog)) color_log = highlight(log, SASLogLexer(), HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>")) self.cachedlog = color_log if len(elog) == 0 and len(output) > self.lst_len: debug1 = 1 logger.debug("DEBUG1: " + str(debug1) + " no error and LST output ") return HTML(output) elif len(elog) == 0 and len(output) <= self.lst_len: debug1 = 2 logger.debug("DEBUG1: " + str(debug1) + " no error and no LST") return HTML(color_log) elif len(elog) > 0 and len(output) <= self.lst_len: debug1 = 3 logger.debug("DEBUG1: " + str(debug1) + " error and no LST") return HTML(color_log) else: debug1 = 4 logger.debug("DEBUG1: " + str(debug1) + " errors and LST") return HTML(color_log + output)
Determines if the log or lst should be returned as the results for the cell based on parsing the log looking for errors and the presence of lst output. :param log: str log from code submission :param output: None or str lst output if there was any :return: The correct results based on log and lst :rtype: str
371,415
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs): if filepath is None: import tempfile filepath = tempfile.mktemp(prefix=, suffix=) atexit.register(os.remove, filepath) h5dcreate_kwargs.setdefault(, True) def decorator(user_function): if group is None: container = user_function.__name__ else: container = group def wrapper(*args, **kwargs): no_cache = kwargs.pop(, False) key = _make_key(args, kwargs, typed) if hashed_key: key = str(hash(key)) else: key = str(key).replace(, ) return _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs) wrapper.cache_filepath = filepath return update_wrapper(wrapper, user_function) return decorator
HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9)
371,416
def _read(self, source): if source.startswith() or source.startswith(): source = url_content(source, cache_duration=self._cache_duration, from_cache_on_error=True) return super(RemoteConfig, self)._read(source)
Reads and parses the config source :param file/str source: Config source URL (http/https), or string, file name, or file pointer.
371,417
def _star_comparison(filter_value, tested_value): if not is_string(tested_value): return False parts = filter_value.split("*") i = 0 last_part = len(parts) - 1 idx = 0 for part in parts: idx = tested_value.find(part, idx) if idx == -1: return False len_part = len(part) if i == 0 and len_part != 0 and idx != 0: return False if ( i == last_part and len_part != 0 and idx != len(tested_value) - len_part ): return False idx += len_part i += 1 return True
Tests a filter containing a joker
371,418
def get_spaces(self): self.spaces = [] for resource in self._get_spaces()[]: self.spaces.append(resource[][]) return self.spaces
Return a flat list of the names for spaces in the organization.
371,419
def serialize(self, pid, record, links_factory=None): return simpledc.tostring( self.transform_record(pid, record, links_factory))
Serialize a single record and persistent identifier. :param pid: Persistent identifier instance. :param record: Record instance. :param links_factory: Factory function for record links.
371,420
def _swap_on_miss(partition_result): before, item, after = partition_result return (before, item, after) if item else (after, item, before)
Given a partition_dict result, if the partition missed, swap the before and after.
371,421
def _symbol_extract(self, regex, plus = True, brackets=False): charplus = self.pos[1] + (1 if plus else -1) consider = self.current_line[:charplus][::-1] if brackets==True: rightb = [] lastchar = None for i in range(len(consider)): if consider[i] == ")": rightb.append(i) elif consider[i] == "(" and len(rightb) > 0: lastchar = i rightb.pop() if lastchar is not None: consider = + consider[lastchar+1:] rematch = regex.match(consider) if rematch is not None: return rematch.group("symbol")[::-1] else: return ""
Extracts a symbol or full symbol from the current line, optionally including the character under the cursor. :arg regex: the compiled regular expression to use for extraction. :arg plus: when true, the character under the cursor *is* included. :arg brackets: when true, matching pairs of brackets are first removed before the regex is run.
371,422
def compute(self): self._compute_primary_smooths() self._smooth_the_residuals() self._select_best_smooth_at_each_point() self._enhance_bass() self._smooth_best_span_estimates() self._apply_best_spans_to_primaries() self._smooth_interpolated_smooth() self._store_unsorted_results(self.smooth_result, numpy.zeros(len(self.smooth_result)))
Run the SuperSmoother.
371,423
def smooth_magseries_savgol(mags, windowsize, polyorder=2): smoothed = savgol_filter(mags, windowsize, polyorder) return smoothed
This smooths the magseries with a Savitsky-Golay filter. Parameters ---------- mags : np.array The input mags/flux time-series to smooth. windowsize : int This is a odd integer containing the smoothing window size. polyorder : int This is an integer containing the polynomial degree order to use when generating the Savitsky-Golay filter. Returns ------- np.array The smoothed mag/flux time-series array.
371,424
def lxc_path(cls, *join_paths): response = subwrap.run([, ]) output = response.std_out lxc_path = output.splitlines()[0] lxc_path = lxc_path.strip() return os.path.join(lxc_path, *join_paths)
Returns the LXC path (default on ubuntu is /var/lib/lxc)
371,425
def get_member_named(self, name): result = None members = self.members if len(name) > 5 and name[-5] == : potential_discriminator = name[-4:] result = utils.get(members, name=name[:-5], discriminator=potential_discriminator) if result is not None: return result def pred(m): return m.nick == name or m.name == name return utils.find(pred, members)
Returns the first member found that matches the name provided. The name can have an optional discriminator argument, e.g. "Jake#0001" or "Jake" will both do the lookup. However the former will give a more precise result. Note that the discriminator must have all 4 digits for this to work. If a nickname is passed, then it is looked up via the nickname. Note however, that a nickname + discriminator combo will not lookup the nickname but rather the username + discriminator combo due to nickname + discriminator not being unique. If no member is found, ``None`` is returned. Parameters ----------- name: :class:`str` The name of the member to lookup with an optional discriminator. Returns -------- :class:`Member` The member in this guild with the associated name. If not found then ``None`` is returned.
371,426
def _css_helper(self): entries = [entry for entry in self._plugin_manager.call_hook("css") if entry is not None] entries += self._get_ctx()["css"] entries = ["<link href= rel=>" for entry in entries] return "\n".join(entries)
Add CSS links for the current page and for the plugins
371,427
def get_argparser(): parser = argparse.ArgumentParser("twarc") parser.add_argument(, choices=commands) parser.add_argument(, nargs=, default=None) parser.add_argument("--log", dest="log", default="twarc.log", help="log file") parser.add_argument("--consumer_key", default=None, help="Twitter API consumer key") parser.add_argument("--consumer_secret", default=None, help="Twitter API consumer secret") parser.add_argument("--access_token", default=None, help="Twitter API access key") parser.add_argument("--access_token_secret", default=None, help="Twitter API access token secret") parser.add_argument(, help="Config file containing Twitter keys and secrets") parser.add_argument(, help="Name of a profile in your configuration file") parser.add_argument(, action=, help="Include warning messages in output") parser.add_argument("--connection_errors", type=int, default="0", help="Number of connection errors before giving up") parser.add_argument("--http_errors", type=int, default="0", help="Number of http errors before giving up") parser.add_argument("--max_id", dest="max_id", help="maximum tweet id to search for") parser.add_argument("--since_id", dest="since_id", help="smallest id to search for") parser.add_argument("--result_type", dest="result_type", choices=["mixed", "recent", "popular"], default="recent", help="search result type") parser.add_argument("--lang", dest="lang", help="limit to ISO 639-1 language code"), parser.add_argument("--geocode", dest="geocode", help="limit by latitude,longitude,radius") parser.add_argument("--locations", dest="locations", help="limit filter stream to location(s)") parser.add_argument("--follow", dest="follow", help="limit filter to tweets from given user id(s)") parser.add_argument("--recursive", dest="recursive", action="store_true", help="also fetch replies to replies") parser.add_argument("--tweet_mode", action="store", default="extended", dest="tweet_mode", choices=["compat", "extended"], help="set tweet mode") parser.add_argument("--protected", dest="protected", action="store_true", help="include protected tweets") parser.add_argument("--output", action="store", default=None, dest="output", help="write output to file path") parser.add_argument("--format", action="store", default="json", dest="format", choices=["json", "csv", "csv-excel"], help="set output format") parser.add_argument("--split", action="store", type=int, default=0, help="used with --output to split into numbered files") parser.add_argument("--skip_key_validation", action="store_true", help="skip checking keys are valid on startup") return parser
Get the command line argument parser.
371,428
def set_window_class(self, window, name, class_): _libxdo.xdo_set_window_class(self._xdo, window, name, class_)
Change the window's classname and or class. :param name: The new class name. If ``None``, no change. :param class_: The new class. If ``None``, no change.
371,429
def _prepare_args(log_likelihood_fn, state, log_likelihood=None, description=): state_parts = list(state) if mcmc_util.is_list_like(state) else [state] state_parts = [tf.convert_to_tensor(s, name=) for s in state_parts] log_likelihood = _maybe_call_fn( log_likelihood_fn, state_parts, log_likelihood, description) return [state_parts, log_likelihood]
Processes input args to meet list-like assumptions.
371,430
def yieldOutput(self): width = self.__width if width: num_cols = len(width) fmt = [ % -w for w in width] if width[-1] > 0: fmt[-1] = fmt = self.__sep.join(fmt) for row in self.__cols: row.extend( [] * (num_cols - len(row)) ) yield fmt % tuple(row)
Generate the text output for the table. @rtype: generator of str @return: Text output.
371,431
def execute_lines(self, lines): self.shell.execute_lines(to_text_string(lines)) self.shell.setFocus()
Execute lines and give focus to shell
371,432
def to_glyphs_guidelines(self, ufo_obj, glyphs_obj): if not ufo_obj.guidelines: return for guideline in ufo_obj.guidelines: new_guideline = self.glyphs_module.GSGuideLine() name = guideline.name if name is not None and name.endswith(LOCKED_NAME_SUFFIX): name = name[: -len(LOCKED_NAME_SUFFIX)] new_guideline.locked = True if guideline.color: name = (name or "") + COLOR_NAME_SUFFIX % str(guideline.color) if guideline.identifier: name = (name or "") + IDENTIFIER_NAME_SUFFIX % guideline.identifier new_guideline.name = name new_guideline.position = Point(guideline.x or 0, guideline.y or 0) if guideline.angle is not None: new_guideline.angle = guideline.angle % 360 elif _is_vertical(guideline.x, guideline.y, None): new_guideline.angle = 90 glyphs_obj.guides.append(new_guideline)
Set guidelines.
371,433
def notify(self, msgtype, method, params): self.dispatch.call(method, params)
Handle an incoming notify request.
371,434
def _send_delete_request(self, path, headers): r = requests.delete(self.endpoint + path, headers=headers) return r.text
Sends the DELETE request to the Route53 endpoint. :param str path: The path to tack on to the endpoint URL for the query. :param dict headers: A dict of headers to send with the request. :rtype: str :returns: The body of the response.
371,435
def format_value(value): if isinstance(value, basestring): value = value.replace(, ) value = u.format(value) elif isinstance(value, bool): value = str(value) elif isinstance(value, int): value = "{0}i".format(value) elif isinstance(value, float): value = str(value) return value
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted (e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i. If they do not they will be written as floats. Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10). Boolean values indicate true or false. Valid boolean strings for line protocol are (t, T, true, True, TRUE, f, F, false, False and FALSE). Strings are text values. All string field values must be surrounded in double-quotes ". If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
371,436
def create_ustar_header(self, info, encoding, errors): info["magic"] = POSIX_MAGIC if len(info["linkname"]) > LENGTH_LINK: raise ValueError("linkname is too long") if len(info["name"]) > LENGTH_NAME: info["prefix"], info["name"] = self._posix_split_name(info["name"]) return self._create_header(info, USTAR_FORMAT, encoding, errors)
Return the object as a ustar header block.
371,437
def visit_BoolOp(self, node): self.generic_visit(node) [self.combine(node, value) for value in node.values]
Merge BoolOp operand type. BoolOp are "and" and "or" and may return any of these results so all operands should have the combinable type.
371,438
def send(self): if not self.from_email: self.from_email = getattr(settings, , settings.DEFAULT_FROM_EMAIL) MessageClass = message_class_for(self.drip_model.message_class) count = 0 for user in self.get_queryset(): message_instance = MessageClass(self, user) try: result = message_instance.message.send() if result: SentDrip.objects.create( drip=self.drip_model, user=user, from_email=self.from_email, from_email_name=self.from_email_name, subject=message_instance.subject, body=message_instance.body ) count += 1 except Exception as e: logging.error("Failed to send drip %s to user %s: %s" % (self.drip_model.id, user, e)) return count
Send the message to each user on the queryset. Create SentDrip for each user that gets a message. Returns count of created SentDrips.
371,439
def gpg_fetch_key( key_url, key_id=None, config_dir=None ): dat = None from_blockstack = False if not from_blockstack and key_id is None: log.error( "No key ID given for key located at %s" % key_url ) return None if key_id is not None: rc = gpg_verify_key( key_id, key_data, config_dir=config_dir ) if not rc: log.error("Failed to verify key %s" % key_id) return None dat = key_data else: key_server = key_url if in key_server: key_server = urlparse.urlparse(key_server).netloc dat = gpg_download_key( key_id, key_server, config_dir=config_dir ) assert dat is not None and len(dat) > 0, "BUG: no key data received for from " % (key_id, key_url) return dat
Fetch a GPG public key from the given URL. Supports anything urllib2 supports. If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it. The key is not accepted into any keyrings. Return the key data on success. If key_id is given, verify the key matches. Return None on error, or on failure to carry out any key verification
371,440
def extract_arguments(text): regexp = re.compile("/\w*(@\w*)*\s*([\s\S]*)",re.IGNORECASE) result = regexp.match(text) return result.group(2) if is_command(text) else None
Returns the argument after the command. Examples: extract_arguments("/get name"): 'name' extract_arguments("/get"): '' extract_arguments("/get@botName name"): 'name' :param text: String to extract the arguments from a command :return: the arguments if `text` is a command (according to is_command), else None.
371,441
def rotate_scale(im, angle, scale, borderValue=0, interp=cv2.INTER_CUBIC): im = np.asarray(im, dtype=np.float32) rows, cols = im.shape M = cv2.getRotationMatrix2D( (cols / 2, rows / 2), -angle * 180 / np.pi, 1 / scale) im = cv2.warpAffine(im, M, (cols, rows), borderMode=cv2.BORDER_CONSTANT, flags=interp, borderValue=borderValue) return im
Rotates and scales the image Parameters ---------- im: 2d array The image angle: number The angle, in radians, to rotate scale: positive number The scale factor borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array the rotated and scaled image Notes ----- The output image has the same size as the input. Therefore the image may be cropped in the process.
371,442
def get_packages(self, show): if show == or show == "all": all_packages = [] for package in self.environment: for i in range(len(self.environment[package])): if self.environment[package][i]: all_packages.append(self.environment[package][i]) return all_packages else: return self.working_set
Return list of Distributions filtered by active status or all @param show: Type of package(s) to show; active, non-active or all @type show: string: "active", "non-active", "all" @returns: list of pkg_resources Distribution objects
371,443
def phase_estimation(U: np.ndarray, accuracy: int, reg_offset: int = 0) -> Program: assert isinstance(accuracy, int) rows, cols = U.shape m = int(log2(rows)) output_qubits = range(0, accuracy) U_qubits = range(accuracy, accuracy + m) p = Program() ro = p.declare(, , len(output_qubits)) for i in output_qubits: p.inst(H(i)) for i in output_qubits: if i > 0: U = np.dot(U, U) cU = controlled(U) name = "CONTROLLED-U{0}".format(2 ** i) p.defgate(name, cU) p.inst((name, i) + tuple(U_qubits)) p = p + inverse_qft(output_qubits) for i in output_qubits: p.measure(i, ro[reg_offset + i]) return p
Generate a circuit for quantum phase estimation. :param U: A unitary matrix. :param accuracy: Number of bits of accuracy desired. :param reg_offset: Where to start writing measurements (default 0). :return: A Quil program to perform phase estimation.
371,444
def _deshuffle_field(self, *args): ip = self._invpermutation fields = [] for arg in args: fields.append( arg[ip] ) if len(fields) == 1: return fields[0] else: return fields
Return to original ordering
371,445
def _run_atexit(): global _atexit for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:]
Hook frameworks must invoke this after the main hook body has successfully completed. Do not invoke it if the hook fails.
371,446
def create(self, model_name): body = {: model_name} parent = + self._project_id return self._api.projects().models().create(body=body, parent=parent).execute()
Create a model. Args: model_name: the short name of the model, such as "iris". Returns: If successful, returns informaiton of the model, such as {u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'} Raises: If the model creation failed.
371,447
async def has_started(self): timeout = False auth_in_progress = False if self._handler._connection.cbs: timeout, auth_in_progress = await self._handler._auth.handle_token_async() if timeout: raise EventHubError("Authorization timeout.") if auth_in_progress: return False if not await self._handler._client_ready_async(): return False return True
Whether the handler has completed all start up processes such as establishing the connection, session, link and authentication, and is not ready to process messages. **This function is now deprecated and will be removed in v2.0+.** :rtype: bool
371,448
def is_less_than(self, other): try: unittest_case.assertTrue(self._subject < other) except self._catch as err: raise self._error_factory(_format("Expected {} to be less than {}", self._subject, other)) return ChainInspector(self._subject)
Ensures :attr:`subject` is less than *other*.
371,449
def _parse_entry(self, dom): entry = {} for tag in self._cap_tags: if tag == : try: geotypes = [] return entry
Sigh....
371,450
def list_tasks(collector): print("Usage: aws_syncr <environment> <task>") print("") print("Available environments to choose from are") print("-----------------------------------------") print("") for environment in os.listdir(collector.configuration_folder): location = os.path.join(collector.configuration_folder, environment) if os.path.isdir(location) and not environment.startswith("."): print("\t{0}".format(environment)) print("") print("Available tasks to choose from are:") print("-----------------------------------") print("") keygetter = lambda item: item[1].label tasks = sorted(available_actions.items(), key=keygetter) sorted_tasks = sorted(list(tasks), key=lambda item: len(item[0])) max_length = max(len(name) for name, _ in sorted_tasks) for key, task in sorted_tasks: desc = dedent(task.__doc__ or "").strip().split()[0] print("\t{0}{1} :-: {2}".format(" " * (max_length-len(key)), key, desc)) print("")
List the available_tasks
371,451
def fix_windows_stdout_stderr(): if hasattr(sys, "frozen") and sys.platform == "win32": try: sys.stdout.write("\n") sys.stdout.flush() except: class DummyStream(object): def __init__(self): pass def write(self, data): pass def read(self, data): pass def flush(self): pass def close(self): pass sys.stdout, sys.stderr, sys.stdin = DummyStream(), DummyStream(), DummyStream() sys.__stdout__, sys.__stderr__, sys.__stdin__ = DummyStream(), DummyStream(), DummyStream()
Processes can't write to stdout/stderr on frozen windows apps because they do not exist here if process tries it anyway we get a nasty dialog window popping up, so we redirect the streams to a dummy see https://github.com/jopohl/urh/issues/370
371,452
def file_hash(fname): chunksize = 65536 hasher = hashlib.sha256() with open(fname, "rb") as fin: buff = fin.read(chunksize) while buff: hasher.update(buff) buff = fin.read(chunksize) return hasher.hexdigest()
Calculate the SHA256 hash of a given file. Useful for checking if a file has changed or been corrupted. Parameters ---------- fname : str The name of the file. Returns ------- hash : str The hash of the file. Examples -------- >>> fname = "test-file-for-hash.txt" >>> with open(fname, "w") as f: ... __ = f.write("content of the file") >>> print(file_hash(fname)) 0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00 >>> import os >>> os.remove(fname)
371,453
def new_multidigraph(self, name, data=None, **attr): self._init_graph(name, ) mdg = MultiDiGraph(self, name, data, **attr) self._graph_objs[name] = mdg return mdg
Return a new instance of type MultiDiGraph, initialized with the given data if provided. :arg name: a name for the graph :arg data: dictionary or NetworkX graph object providing initial state
371,454
def _set_raw_return(self, sep): raw = if self.dst.style[] == : raw += spaces = * 4 with_space = lambda s: .join([self.docs[][] + spaces + l.lstrip() if i > 0 else l for i, l in enumerate(s.splitlines())]) raw += self.dst.numpydoc.get_key_section_header(, self.docs[][]) if self.docs[][]: rtype = self.docs[][] else: rtype = if type(self.docs[][]) is list: for ret_elem in self.docs[][]: if type(ret_elem) is tuple and len(ret_elem) == 3: rtype = ret_elem[2] if rtype is None: rtype = raw += self.docs[][] if ret_elem[0]: raw += ret_elem[0] + raw += rtype + + self.docs[][] + spaces + with_space(ret_elem[1]).strip() + else: raw += self.docs[][] + rtype + raw += self.docs[][] + spaces + with_space(str(ret_elem)).strip() + elif self.docs[][] is not None: raw += self.docs[][] + rtype raw += + self.docs[][] + spaces + with_space(self.docs[][]).strip() + elif self.dst.style[] == : raw += spaces = * 2 with_space = lambda s: .join([self.docs[][] + spaces +\ l.lstrip() if i > 0 else\ l for i, l in enumerate(s.splitlines())]) raw += self.dst.googledoc.get_key_section_header(, self.docs[][]) if self.docs[][]: rtype = self.docs[][] else: rtype = None if type(self.docs[][]) is list: for ret_elem in self.docs[][]: if type(ret_elem) is tuple and len(ret_elem) == 3: rtype = ret_elem[2] if rtype is None: rtype = raw += self.docs[][] + spaces raw += rtype + + with_space(ret_elem[1]).strip() + else: if rtype: raw += self.docs[][] + spaces + rtype + raw += with_space(str(ret_elem)).strip() + else: raw += self.docs[][] + spaces + with_space(str(ret_elem)).strip() + elif self.docs[][] is not None: if rtype: raw += self.docs[][] + spaces + rtype + raw += with_space(self.docs[][]).strip() + else: raw += self.docs[][] + spaces + with_space(self.docs[][]).strip() + elif self.dst.style[] == : pass else: with_space = lambda s: .join([self.docs[][] + l if i > 0 else l for i, l in enumerate(s.splitlines())]) if self.docs[][]: if not self.docs[][]: raw += raw += self.docs[][] + self.dst.get_key(, ) + sep + with_space(self.docs[][].rstrip()).strip() + if self.docs[][]: if not self.docs[][]: raw += raw += self.docs[][] + self.dst.get_key(, ) + sep + self.docs[][].rstrip() + return raw
Set the output raw return section :param sep: the separator of current style
371,455
def save(self, obj, run_id): id_code = self.generate_save_identifier(obj, run_id) self.store.save(obj, id_code)
Save a workflow obj - instance of a workflow to save run_id - unique id to give the run
371,456
def similar_movies(self, **kwargs): path = self._get_id_path() response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the similar movies for a specific movie id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
371,457
def create_from_root(self, root_source): root_dto = ObjectRoot() root_dto.configuration = root_source.configuration root_dto.versions = [Version(x) for x in root_source.versions.values()] for version in sorted(root_source.versions.values()): hydrator = Hydrator(version, root_source.versions, root_source.versions[version.name].types) for method in version.methods.values(): hydrator.hydrate_method(root_dto, root_source, method) for type in version.types.values(): hydrator.hydrate_type(root_dto, root_source, type) self.define_changes_status(root_dto) return root_dto
Return a populated Object Root from dictionnary datas
371,458
def create(self, request): if self.readonly: return HTTPMethodNotAllowed(headers={: }) collection = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(collection, FeatureCollection): return HTTPBadRequest() session = self.Session() objects = [] for feature in collection.features: create = False obj = None if hasattr(feature, ) and feature.id is not None: obj = session.query(self.mapped_class).get(feature.id) if self.before_create is not None: self.before_create(request, feature, obj) if obj is None: obj = self.mapped_class(feature) create = True else: obj.__update__(feature) if create: session.add(obj) objects.append(obj) session.flush() collection = FeatureCollection(objects) if len(objects) > 0 else None request.response.status_int = 201 return collection
Read the GeoJSON feature collection from the request body and create new objects in the database.
371,459
def is_valid(self): for lineedit in self.lineedits: if lineedit in self.validate_data and lineedit.isEnabled(): validator, invalid_msg = self.validate_data[lineedit] text = to_text_string(lineedit.text()) if not validator(text): QMessageBox.critical(self, self.get_name(), "%s:<br><b>%s</b>" % (invalid_msg, text), QMessageBox.Ok) return False return True
Return True if all widget contents are valid
371,460
def guess_codec(file, errors="strict", require_char=False): gedcom_char_to_codec = { : , } bom_codec = check_bom(file) bom_size = file.tell() codec = bom_codec or while True: line = file.readline() if not line: raise IOError("Unexpected EOF while reading GEDCOM header") line = line.lstrip().rstrip(b"\r\n") words = line.split() if len(words) >= 2 and words[0] == b"0" and words[1] != b"HEAD": if require_char: raise CodecError("GEDCOM header does not have CHAR record") else: break elif len(words) >= 3 and words[0] == b"1" and words[1] == b"CHAR": try: encoding = words[2].decode(codec, errors) encoding = gedcom_char_to_codec.get(encoding.lower(), encoding.lower()) new_codec = codecs.lookup(encoding).name except LookupError: raise CodecError("Unknown codec name {0}".format(encoding)) if bom_codec is None: codec = new_codec elif new_codec != bom_codec: raise CodecError("CHAR codec {0} is different from BOM " "codec {1}".format(new_codec, bom_codec)) break return codec, bom_size
Look at file contents and guess its correct encoding. File must be open in binary mode and positioned at offset 0. If BOM record is present then it is assumed to be UTF-8 or UTF-16 encoded file. GEDCOM header is searched for CHAR record and encoding name is extracted from it, if BOM record is present then CHAR record must match BOM-defined encoding. :param file: File object, must be open in binary mode. :param str errors: Controls error handling behavior during string decoding, accepts same values as standard `codecs.decode` method. :param bool require_char: If True then exception is thrown if CHAR record is not found in a header, if False and CHAR is not in the header then codec determined from BOM or "gedcom" is returned. :returns: Tuple (codec_name, bom_size) :raises: :py:class:`CodecError` when codec name in file is unknown or when codec name in file contradicts codec determined from BOM. :raises: :py:class:`UnicodeDecodeError` when codec fails to decode input lines and `errors` is set to "strict" (default).
371,461
def _percentile(N, percent, key=lambda x:x): if not N: return None k = (len(N)-1) * percent f = math.floor(k) c = math.ceil(k) if f == c: return key(N[int(k)]) d0 = key(N[int(f)]) * (c-k) d1 = key(N[int(c)]) * (k-f) return d0+d1
Find the percentile of a list of values. @parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0.0 to 1.0. @parameter key - optional key function to compute value from each element of N. @return - the percentile of the values
371,462
def argument(self) -> bool: next = self.peek() if next == "";{+'" if quoted else ""))
Parse statement argument. Return ``True`` if the argument is followed by block of substatements.
371,463
def load_bernoulli_mnist_dataset(directory, split_name): amat_file = download(directory, FILE_TEMPLATE.format(split=split_name)) dataset = tf.data.TextLineDataset(amat_file) str_to_arr = lambda string: np.array([c == b"1" for c in string.split()]) def _parser(s): booltensor = tf.compat.v1.py_func(str_to_arr, [s], tf.bool) reshaped = tf.reshape(booltensor, [28, 28, 1]) return tf.cast(reshaped, dtype=tf.float32), tf.constant(0, tf.int32) return dataset.map(_parser)
Returns Hugo Larochelle's binary static MNIST tf.data.Dataset.
371,464
def lowpass(ts, cutoff_hz, order=3): orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] channels = ts.shape[1] fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0]) nyq = 0.5 * fs cutoff = cutoff_hz/nyq b, a = signal.butter(order, cutoff, btype=) if not np.all(np.abs(np.roots(a)) < 1.0): raise ValueError() dtype = ts.dtype output = np.zeros((len(ts), channels), dtype) for i in range(channels): output[:, i] = signal.filtfilt(b, a, ts[:, i]) if orig_ndim is 1: output = output[:, 0] return Timeseries(output, ts.tspan, labels=ts.labels)
forward-backward butterworth low-pass filter
371,465
def supercell_composite(mucape, effective_storm_helicity, effective_shear): r effective_shear = np.clip(atleast_1d(effective_shear), None, 20 * units()) effective_shear[effective_shear < 10 * units()] = 0 * units() effective_shear = effective_shear / (20 * units()) return ((mucape / (1000 * units())) * (effective_storm_helicity / (50 * units())) * effective_shear).to()
r"""Calculate the supercell composite parameter. The supercell composite parameter is designed to identify environments favorable for the development of supercells, and is calculated using the formula developed by [Thompson2004]_: .. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} * \frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} * \frac{\text{Effective Shear}}{20 \text{m/s}} The effective_shear term is set to zero below 10 m/s and capped at 1 when effective_shear exceeds 20 m/s. Parameters ---------- mucape : `pint.Quantity` Most-unstable CAPE effective_storm_helicity : `pint.Quantity` Effective-layer storm-relative helicity effective_shear : `pint.Quantity` Effective bulk shear Returns ------- array-like supercell composite
371,466
def sleep(self, ms=1): self.scheduled.add(ms, getcurrent()) self.loop.switch()
Pauses the current green thread for *ms* milliseconds:: p = h.pipe() @h.spawn def _(): p.send('1') h.sleep(50) p.send('2') p.recv() # returns '1' p.recv() # returns '2' after 50 ms
371,467
def save_log_to_html(self): html = html_header() html += ( % resources_path()) html += ( % self.tr()) for item in self.dynamic_messages_log: html += "%s\n" % item.to_html() html += html_footer() if self.log_path is not None: html_to_file(html, self.log_path) else: msg = self.tr() raise InvalidParameterError(msg)
Helper to write the log out as an html file.
371,468
def instantiate(self, params): try: self.send(self.Meta.collection_endpoint, "POST", json=params) except CartoRateLimitException as e: raise e except Exception as e: raise CartoException(e)
Allows you to fetch the map tiles of a created map :param params: The json with the styling info for the named map :type params: dict :return: :raise: CartoException
371,469
def get_movielens(variant="20m"): filename = "movielens_%s.hdf5" % variant path = os.path.join(_download.LOCAL_CACHE_DIR, filename) if not os.path.isfile(path): log.info("Downloading dataset to ", path) _download.download_file(URL_BASE + filename, path) else: log.info("Using cached dataset at ", path) with h5py.File(path, ) as f: m = f.get() plays = csr_matrix((m.get(), m.get(), m.get())) return np.array(f[]), plays
Gets movielens datasets Parameters --------- variant : string Which version of the movielens dataset to download. Should be one of '20m', '10m', '1m' or '100k'. Returns ------- movies : ndarray An array of the movie titles. ratings : csr_matrix A sparse matrix where the row is the movieId, the column is the userId and the value is the rating.
371,470
def infer_gtr(self, print_raw=False, marginal=False, normalized_rate=True, fixed_pi=None, pc=5.0, **kwargs): if marginal: _ml_anc = self._ml_anc_marginal else: _ml_anc = self._ml_anc_joint self.logger("TreeAnc.infer_gtr: inferring the GTR model from the tree...", 1) if (self.tree is None) or (self.aln is None): self.logger("TreeAnc.infer_gtr: ERROR, alignment or tree are missing", 0) return ttconf.ERROR _ml_anc(final=True, **kwargs) alpha = list(self.gtr.alphabet) n=len(alpha) nij = np.zeros((n,n)) Ti = np.zeros(n) self.logger("TreeAnc.infer_gtr: counting mutations...", 2) for node in self.tree.find_clades(): if hasattr(node,): for a,pos, d in node.mutations: i,j = alpha.index(d), alpha.index(a) nij[i,j]+=1 Ti[j] += 0.5*self._branch_length_to_gtr(node) Ti[i] -= 0.5*self._branch_length_to_gtr(node) for ni,nuc in enumerate(node.cseq): i = alpha.index(nuc) Ti[i] += self._branch_length_to_gtr(node)*self.multiplicity[ni] self.logger("TreeAnc.infer_gtr: counting mutations...done", 3) if print_raw: print(,alpha) print(, nij, nij.sum()) print(, Ti, Ti.sum()) root_state = np.array([np.sum((self.tree.root.cseq==nuc)*self.multiplicity) for nuc in alpha]) self._gtr = GTR.infer(nij, Ti, root_state, fixed_pi=fixed_pi, pc=pc, alphabet=self.gtr.alphabet, logger=self.logger, prof_map = self.gtr.profile_map) if normalized_rate: self.logger("TreeAnc.infer_gtr: setting overall rate to 1.0...", 2) self._gtr.mu=1.0 return self._gtr
Calculates a GTR model given the multiple sequence alignment and the tree. It performs ancestral sequence inferrence (joint or marginal), followed by the branch lengths optimization. Then, the numbers of mutations are counted in the optimal tree and related to the time within the mutation happened. From these statistics, the relative state transition probabilities are inferred, and the transition matrix is computed. The result is used to construct the new GTR model of type 'custom'. The model is assigned to the TreeAnc and is used in subsequent analysis. Parameters ----------- print_raw : bool If True, print the inferred GTR model marginal : bool If True, use marginal sequence reconstruction normalized_rate : bool If True, sets the mutation rate prefactor to 1.0. fixed_pi : np.array Provide the equilibrium character concentrations. If None is passed, the concentrations will be inferred from the alignment. pc: float Number of pseudo counts to use in gtr inference Returns ------- gtr : GTR The inferred GTR model
371,471
def smoothed_joint(seg0, seg1, maxjointsize=3, tightness=1.99): assert seg0.end == seg1.start assert 0 < maxjointsize assert 0 < tightness < 2 q = seg0.end try: v = seg0.unit_tangent(1) except: v = seg0.unit_tangent(1 - 1e-4) try: w = seg1.unit_tangent(0) except: w = seg1.unit_tangent(1e-4) max_a = maxjointsize / 2 a = min(max_a, min(seg1.length(), seg0.length()) / 20) if isinstance(seg0, Line) and isinstance(seg1, Line): (0) = bv, and c b = (2 - tightness)*a elbow = CubicBezier(q - a*v, q - (a - b/3)*v, q + (a - b/3)*w, q + a*w) seg0_trimmed = Line(seg0.start, elbow.start) seg1_trimmed = Line(elbow.end, seg1.end) return seg0_trimmed, [elbow], seg1_trimmed elif isinstance(seg0, Line): (0) = bv, and c b = (4 - tightness)*a elbow = CubicBezier(q - a*v, q + (b/3 - a)*v, q - b/3*w, q) seg0_trimmed = Line(seg0.start, elbow.start) return seg0_trimmed, [elbow], seg1 elif isinstance(seg1, Line): args = (seg1.reversed(), seg0.reversed(), maxjointsize, tightness) rseg1_trimmed, relbow, rseg0 = smoothed_joint(*args) elbow = relbow[0].reversed() return seg0, [elbow], rseg1_trimmed.reversed() else: t0 = seg0.ilength(seg0.length() - a/2) t1 = seg1.ilength(a/2) seg0_trimmed = seg0.cropped(0, t0) seg1_trimmed = seg1.cropped(t1, 1) seg0_line = Line(seg0_trimmed.end, q) seg1_line = Line(q, seg1_trimmed.start) args = (seg0_trimmed, seg0_line, maxjointsize, tightness) dummy, elbow0, seg0_line_trimmed = smoothed_joint(*args) args = (seg1_line, seg1_trimmed, maxjointsize, tightness) seg1_line_trimmed, elbow1, dummy = smoothed_joint(*args) args = (seg0_line_trimmed, seg1_line_trimmed, maxjointsize, tightness) seg0_line_trimmed, elbowq, seg1_line_trimmed = smoothed_joint(*args) elbow = elbow0 + [seg0_line_trimmed] + elbowq + [seg1_line_trimmed] + elbow1 return seg0_trimmed, elbow, seg1_trimmed
See Andy's notes on Smoothing Bezier Paths for an explanation of the method. Input: two segments seg0, seg1 such that seg0.end==seg1.start, and jointsize, a positive number Output: seg0_trimmed, elbow, seg1_trimmed, where elbow is a cubic bezier object that smoothly connects seg0_trimmed and seg1_trimmed.
371,472
def team(self, team_id): json = None if int(team_id) > 0: url = self._build_url(, str(team_id)) json = self._json(self._get(url), 200) return Team(json, self._session) if json else None
Returns Team object with information about team specified by ``team_id``. :param int team_id: (required), unique id for the team :returns: :class:`Team <Team>`
371,473
def greedy_mapping(self, reference, hypothesis, uem=None): if uem: reference, hypothesis = self.uemify(reference, hypothesis, uem=uem) return self.mapper_(hypothesis, reference)
Greedy label mapping Parameters ---------- reference : Annotation hypothesis : Annotation Reference and hypothesis diarization uem : Timeline Evaluation map Returns ------- mapping : dict Mapping between hypothesis (key) and reference (value) labels
371,474
def slice_by_size(seq, size): filling = null for it in zip(*(itertools_chain(seq, [filling] * size),) * size): if filling in it: it = tuple(i for i in it if i is not filling) if it: yield it
Slice a sequence into chunks, return as a generation of chunks with `size`.
371,475
def _save_params(self): self.model.save_params_to_file(self.current_params_fname) utils.cleanup_params_files(self.model.output_dir, self.max_params_files_to_keep, self.state.checkpoint, self.state.best_checkpoint, self.keep_initializations)
Saves model parameters at current checkpoint and optionally cleans up older parameter files to save disk space.
371,476
def remove_from_model(self, remove_orphans=False): self._model.remove_reactions([self], remove_orphans=remove_orphans)
Removes the reaction from a model. This removes all associations between a reaction the associated model, metabolites and genes. The change is reverted upon exit when using the model as a context. Parameters ---------- remove_orphans : bool Remove orphaned genes and metabolites from the model as well
371,477
def get_alarms_list(self, num_items=100, params=None): if params and set(params.keys()) - VALID_ALARM_PARAMS: self.log.error("Invalid alarm query parameters: {set(params.keys()) - VALID_ALARM_PARAMS}") return None return self._retrieve_items(item_type="alarms", num_items=num_items, params=params)
Get alarms as list of dictionaries :param int num_items: Max items to retrieve :param dict params: Additional params dictionary according to: https://www.alienvault.com/documentation/api/usm-anywhere-api.htm#/alarms :returns list: list of alarms
371,478
def chatToId(url): match = re.search(r"conversations/([0-9]+:[^/]+)", url) return match.group(1) if match else None
Extract the conversation ID from a conversation URL. Matches addresses containing ``conversations/<chat>``. Args: url (str): Skype API URL Returns: str: extracted identifier
371,479
def generic_filename(path): for sep in common_path_separators: if sep in path: _, path = path.rsplit(sep, 1) return path
Extract filename of given path os-indepently, taking care of known path separators. :param path: path :return: filename :rtype: str or unicode (depending on given path)
371,480
def parse(self, lines): state = 0 entry = Entry() for line in lines: if not line: if state == 1: entry = Entry() state = 0 elif state == 2: self._add_entry(entry) entry = Entry() state = 0 i = line.find() if i >= 0: line = line[:i] line = line.strip() if not line: continue line = line.split(, 1) if len(line) == 2: line[0] = line[0].strip().lower() line[1] = urllib.parse.unquote(line[1].strip()) if line[0] == "user-agent": if state == 2: self._add_entry(entry) entry = Entry() entry.useragents.append(line[1]) state = 1 elif line[0] == "disallow": if state != 0: entry.rulelines.append(RuleLine(line[1], False)) state = 2 elif line[0] == "allow": if state != 0: entry.rulelines.append(RuleLine(line[1], True)) state = 2 if state == 2: self._add_entry(entry)
Parse the input lines from a robots.txt file. We allow that a user-agent: line is not preceded by one or more blank lines.
371,481
def edit(text, pos, key): if key in _key_bindings: return _key_bindings[key](text, pos) elif len(key) == 1: return text[:pos] + key + text[pos:], pos + 1 else: return text, pos
Process a key input in the context of a line, and return the resulting text and cursor position. `text' and `key' must be of type str or unicode, and `pos' must be an int in the range [0, len(text)]. If `key' is in keys(), the corresponding command is executed on the line. Otherwise, if `key' is a single character, that character is inserted at the cursor position. If neither condition is met, `text' and `pos' are returned unmodified.
371,482
def _save_results(self, zipdata, outdir, module, gmt, rank_metric, permutation_type): res = OrderedDict() for gs, gseale, ind, RES in zipdata: rdict = OrderedDict() rdict[] = gseale[0] rdict[] = gseale[1] rdict[] = gseale[2] rdict[] = gseale[3] rdict[] = len(gmt[gs]) rdict[] = len(ind) _genes = rank_metric.index.values[ind] rdict[] = ";".join([ str(g).strip() for g in _genes ]) if self.module != : if rdict[] > 0: idx = RES.argmax() ldg_pos = list(filter(lambda x: x<= idx, ind)) elif rdict[] < 0: idx = RES.argmin() ldg_pos = list(filter(lambda x: x >= idx, ind)) else: ldg_pos = ind rdict[] = .join(list(map(str,rank_metric.iloc[ldg_pos].index))) rdict[] = RES rdict[] = ind res[gs] = rdict self.results = res res_df = pd.DataFrame.from_dict(res, orient=) res_df.index.name = res_df.drop([,], axis=1, inplace=True) res_df.sort_values(by=[,], inplace=True) self.res2d = res_df if self._outdir is None: return out = os.path.join(outdir,.format(b=module, c=permutation_type)) if self.module == : out = out.replace(".csv",".txt") with open(out, ) as f: f.write() f.write(" res_df.to_csv(f, sep=) else: res_df.to_csv(out) return
reformat gsea results, and save to txt
371,483
def delay_1(year): months = trunc(((235 * year) - 234) / 19) parts = 12084 + (13753 * months) day = trunc((months * 29) + parts / 25920) if ((3 * (day + 1)) % 7) < 3: day += 1 return day
Test for delay of start of new year and to avoid
371,484
def get_auth(self, username, password, authoritative_source, auth_options=None): if auth_options is None: auth_options = {} if (authoritative_source is None): raise AuthError("Missing authoritative_source.") rem = list() for key in self._auth_cache: if self._auth_cache[key][] < datetime.utcnow(): rem.append(key) for key in rem: del(self._auth_cache[key]) user_authbackend = username.rsplit(, 1) backend = "" if len(user_authbackend) == 1: backend = self._config.get(, ) self._logger.debug("Using default auth backend %s" % backend) else: backend = user_authbackend[1] auth_str = ( str(username) + str(password) + str(authoritative_source) + str(auth_options) ) if auth_str in self._auth_cache: self._logger.debug( % username) return self._auth_cache[auth_str][] try: auth = self._backends[backend](backend, user_authbackend[0], password, authoritative_source, auth_options) except KeyError: raise AuthError("Invalid auth backend specified" % str(backend)) self._auth_cache[auth_str] = { : datetime.utcnow() + timedelta(seconds=self._config.getint(, )), : auth } return auth
Returns an authentication object. Examines the auth backend given after the '@' in the username and returns a suitable instance of a subclass of the BaseAuth class. * `username` [string] Username to authenticate as. * `password` [string] Password to authenticate with. * `authoritative_source` [string] Authoritative source of the query. * `auth_options` [dict] A dict which, if authenticated as a trusted user, can override `username` and `authoritative_source`.
371,485
def _gen_pool_xml(name, ptype, target=None, permissions=None, source_devices=None, source_dir=None, source_adapter=None, source_hosts=None, source_auth=None, source_name=None, source_format=None): hosts = [host.split() for host in source_hosts or []] context = { : name, : ptype, : {: target, : permissions}, : { : source_devices or [], : source_dir, : source_adapter, : [{: host[0], : host[1] if len(host) > 1 else None} for host in hosts], : source_auth, : source_name, : source_format } } fn_ = try: template = JINJA.get_template(fn_) except jinja2.exceptions.TemplateNotFound: log.error(, fn_) return return template.render(**context)
Generate the XML string to define a libvirt storage pool
371,486
def gss(args): p = OptionParser(gss.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, mappingfile = args seen = defaultdict(int) clone = defaultdict(set) plateMapping = DictFile(mappingfile) fw = open("MetaData.txt", "w") print(PublicationTemplate.format(**vars), file=fw) print(LibraryTemplate.format(**vars), file=fw) print(ContactTemplate.format(**vars), file=fw) logging.debug("Meta data written to `{0}`".format(fw.name)) fw = open("GSS.txt", "w") fw_log = open("GSS.log", "w") for rec in SeqIO.parse(fastafile, "fasta"): description = rec.description a = parse_description(description) direction = a["direction"][0] sequencer_plate_barcode = a["sequencer_plate_barcode"][0] sequencer_plate_well_coordinates = \ a["sequencer_plate_well_coordinates"][0] sequencer_plate_96well_quadrant = \ a["sequencer_plate_96well_quadrant"][0] sequencer_plate_96well_coordinates = \ a["sequencer_plate_96well_coordinates"][0] w96 = sequencer_plate_96well_coordinates w96quad = int(sequencer_plate_96well_quadrant) w384 = sequencer_plate_well_coordinates assert convert_96_to_384(w96, w96quad) == w384 plate = sequencer_plate_barcode assert plate in plateMapping, \ "{0} not found in `{1}` !".format(plate, mappingfile) plate = plateMapping[plate] d = Directions[direction] cloneID = "{0}{1}".format(plate, w384) gssID = "{0}{1}".format(cloneID, d) seen[gssID] += 1 if seen[gssID] > 1: gssID = "{0}{1}".format(gssID, seen[gssID]) seen[gssID] += 1 clone[cloneID].add(gssID) seen = defaultdict(int) for rec in SeqIO.parse(fastafile, "fasta"): description = rec.description a = parse_description(description) direction = a["direction"][0] sequencer_plate_barcode = a["sequencer_plate_barcode"][0] sequencer_plate_well_coordinates = \ a["sequencer_plate_well_coordinates"][0] w384 = sequencer_plate_well_coordinates plate = sequencer_plate_barcode plate = plateMapping[plate] d = Directions[direction] cloneID = "{0}{1}".format(plate, w384) gssID = "{0}{1}".format(cloneID, d) seen[gssID] += 1 if seen[gssID] > 1: logging.error("duplicate key {0} found".format(gssID)) gssID = "{0}{1}".format(gssID, seen[gssID]) othergss = clone[cloneID] - set([gssID]) othergss = ", ".join(sorted(othergss)) vars.update(locals()) print(GSSTemplate.format(**vars), file=fw) print("{0}\t{1}".format(gssID, description), file=fw_log) print("=" * 60, file=fw_log) logging.debug("A total of {0} seqs written to `{1}`".\ format(len(seen), fw.name)) fw.close() fw_log.close()
%prog gss fastafile plateMapping Generate sequence files and metadata templates suited for gss submission. The FASTA file is assumed to be exported from the JCVI data delivery folder which looks like: >1127963806024 /library_name=SIL1T054-B-01-120KB /clear_start=0 /clear_end=839 /primer_id=1049000104196 /trace_id=1064147620169 /trace_file_id=1127963805941 /clone_insert_id=1061064364776 /direction=reverse /sequencer_run_id=1064147620155 /sequencer_plate_barcode=B906423 /sequencer_plate_well_coordinates=C3 /sequencer_plate_96well_quadrant=1 /sequencer_plate_96well_coordinates=B02 /template_plate_barcode=CC0251602AB /growth_plate_barcode=BB0273005AB AGCTTTAGTTTCAAGGATACCTTCATTGTCATTCCCGGTTATGATGATATCATCAAGATAAACAAGAATG ACAATGATACCTGTTTGGTTCTGAAGTGTAAAGAGGGTATGTTCAGCTTCAGATCTTCTAAACCCTTTGT CTAGTAAGCTGGCACTTAGCTTCCTATACCAAACCCTTTGTGATTGCTTCAGTCCATAAATTGCCTTTTT Plate mapping file maps the JTC `sequencer_plate_barcode` to external IDs. For example: B906423 SIL-001
371,487
def subrouters(self): yield from filter(lambda mw: isinstance(mw.func, Router), self.mw_list)
Generator of sub-routers (middleware inheriting from Router) contained within this router.
371,488
def blackbox_network(): num_nodes = 6 num_states = 2 ** num_nodes tpm = np.zeros((num_states, num_nodes)) for index, previous_state in enumerate(all_states(num_nodes)): current_state = [0 for i in range(num_nodes)] if previous_state[5] == 1: current_state[0] = 1 current_state[1] = 1 if previous_state[0] == 1 and previous_state[1]: current_state[2] = 1 if previous_state[2] == 1: current_state[3] = 1 current_state[4] = 1 if previous_state[3] == 1 and previous_state[4] == 1: current_state[5] = 1 tpm[index, :] = current_state cm = np.array([ [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0] ]) return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])
A micro-network to demonstrate blackboxing. Diagram:: +----------+ +-------------------->+ A (COPY) + <---------------+ | +----------+ | | +----------+ | | +-----------+ B (COPY) + <-------------+ | v v +----------+ | | +-+-----+-+ +-+-----+-+ | | | | | C (AND) | | F (AND) | | | | | +-+-----+-+ +-+-----+-+ | | ^ ^ | | +----------+ | | | +---------> + D (COPY) +---------------+ | | +----------+ | | +----------+ | +-------------------> + E (COPY) +-----------------+ +----------+ Connectivity Matrix: +---+---+---+---+---+---+---+ | . | A | B | C | D | E | F | +---+---+---+---+---+---+---+ | A | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | B | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | C | 0 | 0 | 0 | 1 | 1 | 0 | +---+---+---+---+---+---+---+ | D | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | E | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | F | 1 | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ In the documentation example, the state is (0, 0, 0, 0, 0, 0).
371,489
def BVirial_Pitzer_Curl(T, Tc, Pc, omega, order=0): r Tr = T/Tc if order == 0: B0 = 0.1445 - 0.33/Tr - 0.1385/Tr**2 - 0.0121/Tr**3 B1 = 0.073 + 0.46/Tr - 0.5/Tr**2 - 0.097/Tr**3 - 0.0073/Tr**8 elif order == 1: B0 = Tc*(3300*T**2 + 2770*T*Tc + 363*Tc**2)/(10000*T**4) B1 = Tc*(-2300*T**7 + 5000*T**6*Tc + 1455*T**5*Tc**2 + 292*Tc**7)/(5000*T**9) elif order == 2: B0 = -3*Tc*(1100*T**2 + 1385*T*Tc + 242*Tc**2)/(5000*T**5) B1 = Tc*(1150*T**7 - 3750*T**6*Tc - 1455*T**5*Tc**2 - 657*Tc**7)/(1250*T**10) elif order == 3: B0 = 3*Tc*(330*T**2 + 554*T*Tc + 121*Tc**2)/(500*T**6) B1 = 3*Tc*(-230*T**7 + 1000*T**6*Tc + 485*T**5*Tc**2 + 438*Tc**7)/(250*T**11) elif order == -1: B0 = 289*T/2000 - 33*Tc*log(T)/100 + (2770*T*Tc**2 + 121*Tc**3)/(20000*T**2) B1 = 73*T/1000 + 23*Tc*log(T)/50 + (35000*T**6*Tc**2 + 3395*T**5*Tc**3 + 73*Tc**8)/(70000*T**7) elif order == -2: B0 = 289*T**2/4000 - 33*T*Tc*log(T)/100 + 33*T*Tc/100 + 277*Tc**2*log(T)/2000 - 121*Tc**3/(20000*T) B1 = 73*T**2/2000 + 23*T*Tc*log(T)/50 - 23*T*Tc/50 + Tc**2*log(T)/2 - (20370*T**5*Tc**3 + 73*Tc**8)/(420000*T**6) else: raise Exception() Br = B0 + omega*B1 return Br*R*Tc/Pc
r'''Calculates the second virial coefficient using the model in [1]_. Designed for simple calculations. .. math:: B_r=B^{(0)}+\omega B^{(1)} B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3 B^{(1)} = 0.073+0.46/T_r-0.5/T_r^2 -0.097/T_r^3 - 0.0073/T_r^8 Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of the fluid [Pa] omega : float Acentric factor for fluid, [-] order : int, optional Order of the calculation. 0 for the calculation of B itself; for 1/2/3, the first/second/third derivative of B with respect to temperature; and for -1/-2, the first/second indefinite integral of B with respect to temperature. No other integrals or derivatives are implemented, and an exception will be raised if any other order is given. Returns ------- B : float Second virial coefficient in density form or its integral/derivative if specified, [m^3/mol or m^3/mol/K^order] Notes ----- Analytical models for derivatives and integrals are available for orders -2, -1, 1, 2, and 3, all obtained with SymPy. For first temperature derivative of B: .. math:: \frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}} \frac{d B^{(1)}}{dT} = - \frac{23 Tc}{50 T^{2}} + \frac{Tc^{2}}{T^{3}} + \frac{291 Tc^{3}}{1000 T^{4}} + \frac{73 Tc^{8}}{1250 T^{9}} For the second temperature derivative of B: .. math:: \frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{5000 T^{3}} \left(1100 + \frac{1385 Tc}{T} + \frac{242 Tc^{2}}{T^{2}}\right) \frac{d^2 B^{(1)}}{dT^2} = \frac{Tc}{T^{3}} \left(\frac{23}{25} - \frac{3 Tc}{T} - \frac{291 Tc^{2}}{250 T^{2}} - \frac{657 Tc^{7}}{1250 T^{7}}\right) For the third temperature derivative of B: .. math:: \frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{500 T^{4}} \left(330 + \frac{554 Tc}{T} + \frac{121 Tc^{2}}{T^{2}}\right) \frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc}{T^{4}} \left(- \frac{23}{25} + \frac{4 Tc}{T} + \frac{97 Tc^{2}}{50 T^{2}} + \frac{219 Tc^{7}}{125 T^{7}}\right) For the first indefinite integral of B: .. math:: \int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \log{\left (T \right )} + \frac{1}{20000 T^{2}} \left(2770 T Tc^{2} + 121 Tc^{3}\right) \int{B^{(1)}} dT = \frac{73 T}{1000} + \frac{23 Tc}{50} \log{\left (T \right )} + \frac{1}{70000 T^{7}} \left(35000 T^{6} Tc^{2} + 3395 T^{5} Tc^{3} + 73 Tc^{8}\right) For the second indefinite integral of B: .. math:: \int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \log{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \log{\left (T \right )} - \frac{121 Tc^{3}}{20000 T} \int\int B^{(1)} dT dT = \frac{73 T^{2}}{2000} + \frac{23 T}{50} Tc \log{\left (T \right )} - \frac{23 T}{50} Tc + \frac{Tc^{2}}{2} \log{\left (T \right )} - \frac{1}{420000 T^{6}} \left(20370 T^{5} Tc^{3} + 73 Tc^{8}\right) Examples -------- Example matching that in BVirial_Abbott, for isobutane. >>> BVirial_Pitzer_Curl(510., 425.2, 38E5, 0.193) -0.0002084535541385102 References ---------- .. [1] Pitzer, Kenneth S., and R. F. Curl. "The Volumetric and Thermodynamic Properties of Fluids. III. Empirical Equation for the Second Virial Coefficient1." Journal of the American Chemical Society 79, no. 10 (May 1, 1957): 2369-70. doi:10.1021/ja01567a007.
371,490
def send_subscribe(self, dup, topics): pkt = MqttPkt() pktlen = 2 + sum([2+len(topic)+1 for (topic, qos) in topics]) pkt.command = NC.CMD_SUBSCRIBE | (dup << 3) | (1 << 1) pkt.remaining_length = pktlen ret = pkt.alloc() if ret != NC.ERR_SUCCESS: return ret mid = self.mid_generate() pkt.write_uint16(mid) for (topic, qos) in topics: pkt.write_string(topic) pkt.write_byte(qos) return self.packet_queue(pkt)
Send subscribe COMMAND to server.
371,491
def stop_all(self): if self.aotask is not None: self.aotask.stop() self.aitask.stop() self.daq_lock.release() self.aitask = None self.aotask = None
Halts both the analog output and input tasks
371,492
def do_results(args): config,name,label,coord = args filenames = make_filenames(config,label) srcfile = filenames[] samples = filenames[] if not exists(srcfile): logger.warning("Couldnt find %s; skipping..."%samples) return logger.info("Writing %s..."%srcfile) from ugali.analysis.results import write_results write_results(srcfile,config,srcfile,samples)
Write the results output file
371,493
def duplicate_nodes(self): if len(self.geometry) == 0: return [] mesh_hash = {k: int(m.identifier_md5, 16) for k, m in self.geometry.items()} node_names = np.array(self.graph.nodes_geometry) node_geom = np.array([self.graph[i][1] for i in node_names]) node_hash = np.array([mesh_hash[v] for v in node_geom]) node_groups = grouping.group(node_hash) duplicates = [np.sort(node_names[g]).tolist() for g in node_groups] return duplicates
Return a sequence of node keys of identical meshes. Will combine meshes duplicated by copying in space with different keys in self.geometry, as well as meshes repeated by self.nodes. Returns ----------- duplicates: (m) sequence of keys to self.nodes that represent identical geometry
371,494
def simOnePeriod(self): self.getMortality() if self.read_shocks: self.readShocks() else: self.getShocks() self.getStates() self.getPostStates() self.t_age = self.t_age + 1 self.t_cycle = self.t_cycle + 1 self.t_cycle[self.t_cycle == self.T_cycle] = 0
Simulates one period for this type. Calls the methods getMortality(), getShocks() or readShocks, getStates(), getControls(), and getPostStates(). These should be defined for AgentType subclasses, except getMortality (define its components simDeath and simBirth instead) and readShocks. Parameters ---------- None Returns ------- None
371,495
def copy(self): ret = super().copy() for cmd in self.commands: ret.add_command(cmd.copy()) return ret
Creates a copy of this :class:`Group`.
371,496
def plot_ts(fignum, dates, ts): vertical_plot_init(fignum, 10, 3) TS, Chrons = pmag.get_ts(ts) p = 1 X, Y = [], [] for d in TS: if d <= dates[1]: if d >= dates[0]: if len(X) == 0: ind = TS.index(d) X.append(TS[ind - 1]) Y.append(p % 2) X.append(d) Y.append(p % 2) p += 1 X.append(d) Y.append(p % 2) else: X.append(dates[1]) Y.append(p % 2) plt.plot(X, Y, ) plot_vs(fignum, dates, , ) plot_hs(fignum, [1.1, -.1], , ) plt.xlabel("Age (Ma): " + ts) isign = -1 for c in Chrons: off = -.1 isign = -1 * isign if isign > 0: off = 1.05 if c[1] >= X[0] and c[1] < X[-1]: plt.text(c[1] - .2, off, c[0]) return
plot the geomagnetic polarity time scale Parameters __________ fignum : matplotlib figure number dates : bounding dates for plot ts : time scale ck95, gts04, or gts12
371,497
def rgb_view(qimage, byteorder = ): if byteorder is None: byteorder = _sys.byteorder bytes = byte_view(qimage, byteorder) if bytes.shape[2] != 4: raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)") if byteorder == : return bytes[...,:3] else: return bytes[...,1:]
Returns RGB view of a given 32-bit color QImage_'s memory. Similarly to byte_view(), the result is a 3D numpy.uint8 array, but reduced to the rgb dimensions (without alpha), and reordered (using negative strides in the last dimension) to have the usual [R,G,B] order. The image must have 32 bit pixel size, i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. (Note that in the latter case, the values are of course premultiplied with alpha.) The order of channels in the last axis depends on the `byteorder`, which defaults to 'big', i.e. RGB order. You may set the argument `byteorder` to 'little' to get BGR, or use None which means sys.byteorder here, i.e. return native order for the machine the code is running on. For your convenience, `qimage` may also be a filename, see `Loading and Saving Images`_ in the documentation. :param qimage: image whose memory shall be accessed via NumPy :type qimage: QImage_ with 32-bit pixel type :param byteorder: specify order of channels in last axis :rtype: numpy.ndarray_ with shape (height, width, 3) and dtype uint8
371,498
def normalize_fragment(text, encoding=): path = percent_encode(text, encoding=encoding, encode_set=FRAGMENT_ENCODE_SET) return uppercase_percent_encoding(path)
Normalize a fragment. Percent-encodes unacceptable characters and ensures percent-encoding is uppercase.
371,499
def milestone(self, extra_params=None): if self.get(, None): milestones = self.space.milestones(id=self[], extra_params=extra_params) if milestones: return milestones[0]
The Milestone that the Ticket is a part of