positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def unicode_escape_sequence_fix(self, value): """ It is possible to define unicode characters in the config either as the actual utf-8 character or using escape sequences the following all will show the Greek delta character. Δ \N{GREEK CAPITAL LETTER DELTA} \U00000394 \u0394 """ def fix_fn(match): # we don't escape an escaped backslash if match.group(0) == r"\\": return r"\\" return match.group(0).encode("utf-8").decode("unicode-escape") return re.sub(r"\\\\|\\u\w{4}|\\U\w{8}|\\N\{([^}\\]|\\.)+\}", fix_fn, value)
It is possible to define unicode characters in the config either as the actual utf-8 character or using escape sequences the following all will show the Greek delta character. Δ \N{GREEK CAPITAL LETTER DELTA} \U00000394 \u0394
def _from_binary_idx_root(cls, binary_stream): """See base class.""" ''' Attribute type - 4 Collation rule - 4 Bytes per index record - 4 Clusters per index record - 1 Padding - 3 ''' attr_type, collation_rule, b_per_idx_r, c_per_idx_r = cls._REPR.unpack(binary_stream[:cls._REPR.size]) node_header = IndexNodeHeader.create_from_binary(binary_stream[cls._REPR.size:]) attr_type = AttrTypes(attr_type) if attr_type else None index_entry_list = [] offset = cls._REPR.size + node_header.start_offset #loads all index entries related to the root node while True: entry = IndexEntry.create_from_binary(binary_stream[offset:], attr_type) index_entry_list.append(entry) if entry.flags & IndexEntryFlags.LAST_ENTRY: break else: offset += len(entry) nw_obj = cls((attr_type, CollationRule(collation_rule), b_per_idx_r, c_per_idx_r, node_header, index_entry_list )) _MOD_LOGGER.debug("Attempted to unpack INDEX_ROOT Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
See base class.
def write_content(self, content, destination): """ Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written. """ directory = os.path.dirname(destination) if directory and not os.path.exists(directory): os.makedirs(directory) with io.open(destination, 'w', encoding='utf-8') as f: f.write(content) return destination
Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written.
def create_sitemap(app, exception): """Generates the sitemap.xml from the collected HTML page links""" if (not app.config['html_theme_options'].get('base_url', '') or exception is not None or not app.sitemap_links): return filename = app.outdir + "/sitemap.xml" print("Generating sitemap.xml in %s" % filename) root = ET.Element("urlset") root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9") for link in app.sitemap_links: url = ET.SubElement(root, "url") ET.SubElement(url, "loc").text = link ET.ElementTree(root).write(filename)
Generates the sitemap.xml from the collected HTML page links
def any2unicode(text, encoding='utf8', errors='strict'): """Convert a string (bytestring in `encoding` or unicode), to unicode.""" if isinstance(text, unicode): return text return unicode(text, encoding, errors=errors)
Convert a string (bytestring in `encoding` or unicode), to unicode.
def _scan(self, type): """ Returns the matched text, and moves to the next token """ tok = self._scanner.token(self._pos, frozenset([type])) if tok[2] != type: err = SyntaxError("SyntaxError[@ char %s: %s]" % (repr(tok[0]), "Trying to find " + type)) err.pos = tok[0] raise err self._pos += 1 return tok[3]
Returns the matched text, and moves to the next token
def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray: """Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis. """ # pyplot import is deferred because it requires a system dependency # (python3-tk) that `python -m pip install cirq` can't handle for the user. # This allows cirq to be usable without python3-tk. import matplotlib.pyplot as plt num_qubits = len(result.measurements.keys()) states = 2**num_qubits values = np.zeros(states) # measurements is a dict of {measurement gate key: # array(repetitions, boolean result)} # Convert this to an array of repetitions, each with an array of booleans. # e.g. {q1: array([[True, True]]), q2: array([[False, False]])} # --> array([[True, False], [True, False]]) measurement_by_result = np.array([ v.transpose()[0] for k, v in result.measurements.items()]).transpose() for meas in measurement_by_result: # Convert each array of booleans to a string representation. # e.g. [True, False] -> [1, 0] -> '10' -> 2 state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2) values[state_ind] += 1 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)] plt.bar(np.arange(states), values, tick_label=plot_labels) plt.xlabel('qubit state') plt.ylabel('result count') plt.show() return values
Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis.
def block_quote(node): """ A block quote """ o = nodes.block_quote() o.line = node.sourcepos[0][0] for n in MarkDown(node): o += n return o
A block quote
def get(self): """Reloads the check with its current values.""" new = self.manager.get(self) if new: self._add_details(new._info)
Reloads the check with its current values.
def get_degenerate_statements(self): """Get all degenerate BEL statements. Stores the results of the query in self.degenerate_stmts. """ logger.info("Checking for 'degenerate' statements...\n") # Get rules of type protein X -> activity Y q_stmts = prefixes + """ SELECT ?stmt WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasSubject ?subj . ?stmt belvoc:hasObject ?obj . { { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . } UNION { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . } } { { ?subj a belvoc:ProteinAbundance . } UNION { ?subj a belvoc:ModifiedProteinAbundance . } } ?subj belvoc:hasConcept ?xName . { { ?obj a belvoc:ProteinAbundance . ?obj belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:ModifiedProteinAbundance . ?obj belvoc:hasChild ?proteinY . ?proteinY belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:AbundanceActivity . ?obj belvoc:hasChild ?objChild . ?objChild a belvoc:ProteinAbundance . ?objChild belvoc:hasConcept ?yName . } } FILTER (?xName != ?yName) } """ res_stmts = self.g.query(q_stmts) logger.info("Protein -> Protein/Activity statements:") logger.info("---------------------------------------") for stmt in res_stmts: stmt_str = strip_statement(stmt[0]) logger.info(stmt_str) self.degenerate_stmts.append(stmt_str)
Get all degenerate BEL statements. Stores the results of the query in self.degenerate_stmts.
def get_entry_url(entry, blog_page, root_page): """ Get the entry url given and entry page a blog page instances. It will use an url or another depending if blog_page is the root page. """ if root_page == blog_page: return reverse('entry_page_serve', kwargs={ 'year': entry.date.strftime('%Y'), 'month': entry.date.strftime('%m'), 'day': entry.date.strftime('%d'), 'slug': entry.slug }) else: # The method get_url_parts provides a tuple with a custom URL routing # scheme. In the last position it finds the subdomain of the blog, which # it is used to construct the entry url. # Using the stripped subdomain it allows Puput to generate the urls for # every sitemap level blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part) return reverse('entry_page_serve_slug', kwargs={ 'blog_path': blog_path, 'year': entry.date.strftime('%Y'), 'month': entry.date.strftime('%m'), 'day': entry.date.strftime('%d'), 'slug': entry.slug })
Get the entry url given and entry page a blog page instances. It will use an url or another depending if blog_page is the root page.
def run(self): """ Append version number to vegas/__init__.py """ with open('src/vegas/__init__.py', 'a') as vfile: vfile.write("\n__version__ = '%s'\n" % VEGAS_VERSION) _build_py.run(self)
Append version number to vegas/__init__.py
def _preallocate_samples(self): """Preallocate samples for faster adaptive sampling. """ self.prealloc_samples_ = [] for i in range(self.num_prealloc_samples_): self.prealloc_samples_.append(self.sample())
Preallocate samples for faster adaptive sampling.
def OnTextColor(self, event): """Text color choice event handler""" color = event.GetValue().GetRGB() post_command_event(self, self.TextColorMsg, color=color)
Text color choice event handler
def stats_list(self, list=None, date=None, headers=None): """ Retrieve information about your subscriber counts on a particular list, on a particular day. http://docs.sailthru.com/api/stat """ data = {'stat': 'list'} if list is not None: data['list'] = list if date is not None: data['date'] = date return self._stats(data, headers)
Retrieve information about your subscriber counts on a particular list, on a particular day. http://docs.sailthru.com/api/stat
def identify_filepath(arg, real_path=None, show_directory=None, find_source=None, hide_init=None): """Discover and return the disk file path of the Python module named in `arg` by importing the module and returning its ``__file__`` attribute. If `find_source` is `True`, the named module is a ``pyc`` or ``pyo`` file, and a corresponding ``.py`` file exists on disk, the path to the ``.py`` file is returned instead. If `show_directory` is `True`, the path to the directory containing the discovered module file is returned. Similarly, if `hide_init` is `True` and the named module is the ``__init__`` module of a package, the function returns the path to the package directory containing the ``__init__.py`` filename. If `real_path` is `True` and the discovered module was loaded via symlink, the real path (as determined by `os.path.realpath()`) is returned. If the named module cannot be imported or its path on disk determined, this function raises a `pywhich.ModuleNotFound` exception. """ mod = identify_module(arg) # raises ModuleNotFound try: filename = mod.__file__ except AttributeError: raise ModuleNotFound("module has no '__file__' attribute; is it a " "built-in or C module?") if find_source and (filename.endswith('.pyc') or filename.endswith('.pyo')): log.debug("Filename ends in pyc or pyo, so looking for the .py file") sourcefile = filename[:-1] if os.access(sourcefile, os.F_OK): filename = sourcefile else: log.debug("Did not find .py file for path %r, using as-is", filename) if real_path: filename = os.path.realpath(filename) if show_directory or (hide_init and os.path.basename(filename).startswith('__init__.')): log.debug("Showing directories or hiding __init__s, so returning " "directory of %r", filename) filename = os.path.dirname(filename) return filename
Discover and return the disk file path of the Python module named in `arg` by importing the module and returning its ``__file__`` attribute. If `find_source` is `True`, the named module is a ``pyc`` or ``pyo`` file, and a corresponding ``.py`` file exists on disk, the path to the ``.py`` file is returned instead. If `show_directory` is `True`, the path to the directory containing the discovered module file is returned. Similarly, if `hide_init` is `True` and the named module is the ``__init__`` module of a package, the function returns the path to the package directory containing the ``__init__.py`` filename. If `real_path` is `True` and the discovered module was loaded via symlink, the real path (as determined by `os.path.realpath()`) is returned. If the named module cannot be imported or its path on disk determined, this function raises a `pywhich.ModuleNotFound` exception.
def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body'))
put any object in kubernetes based on URL
def enabled(name, runas=None): ''' Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd ''' # Try to list the service. If it can't be listed, it's not enabled try: list_(name=name, runas=runas) return True except CommandExecutionError: return False
Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd
def build_route_timetable( feed: "Feed", route_id: str, dates: List[str] ) -> DataFrame: """ Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips` """ dates = feed.restrict_dates(dates) if not dates: return pd.DataFrame() t = pd.merge(feed.trips, feed.stop_times) t = t[t["route_id"] == route_id].copy() a = feed.compute_trip_activity(dates) frames = [] for date in dates: # Slice to trips active on date ids = a.loc[a[date] == 1, "trip_id"] f = t[t["trip_id"].isin(ids)].copy() f["date"] = date # Groupby trip ID and sort groups by their minimum departure time. # For some reason NaN departure times mess up the transform below. # So temporarily fill NaN departure times as a workaround. f["dt"] = f["departure_time"].fillna(method="ffill") f["min_dt"] = f.groupby("trip_id")["dt"].transform(min) frames.append(f) f = pd.concat(frames) return f.sort_values(["date", "min_dt", "stop_sequence"]).drop( ["min_dt", "dt"], axis=1 )
Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips`
def setup_db(session, botconfig, confdir): """Sets up the database.""" Base.metadata.create_all(session.connection()) # If we're creating a fresh db, we don't need to worry about migrations. if not session.get_bind().has_table('alembic_version'): conf_obj = config.Config() conf_obj.set_main_option('bot_config_path', confdir) with resources.path('cslbot', botconfig['alembic']['script_location']) as script_location: conf_obj.set_main_option('script_location', str(script_location)) command.stamp(conf_obj, 'head') # Populate permissions table with owner. owner_nick = botconfig['auth']['owner'] if not session.query(Permissions).filter(Permissions.nick == owner_nick).count(): session.add(Permissions(nick=owner_nick, role='owner'))
Sets up the database.
def extend_with_default(validator_class: Any) -> Any: """Append defaults from schema to instance need to be validated. :param validator_class: Apply the change for given validator class. """ validate_properties = validator_class.VALIDATORS['properties'] def set_defaults(validator: Any, properties: dict, instance: dict, schema: dict) -> Iterator[ValidationError]: for prop, subschema in properties.items(): if 'default' in subschema: instance.setdefault(prop, subschema['default']) for error in validate_properties( validator, properties, instance, schema, ): yield error # pragma: no cover return extend(validator_class, {'properties': set_defaults})
Append defaults from schema to instance need to be validated. :param validator_class: Apply the change for given validator class.
def _adjust_bin_edges(datetime_bins, offset, closed, index, labels): """This is required for determining the bin edges resampling with daily frequencies greater than one day, month end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00, 2000-02-01 12:00:00], dtype='object') Without this adjustment, _get_time_bins with month-end frequency will return the following index for the bin edges (default closed='right' and label='right' in this case): CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') If 2000-01-31 is used as a bound for a bin, the value on 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the month of January. To account for this, pandas adds a day minus one worth of microseconds to the bin edges generated by cftime range, so that we do bin the value at noon on January 31st in the January bin. This results in an index with bin edges like the following: CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59, 2000-02-29 23:59:59], dtype='object') The labels are still: CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') This is also required for daily frequencies longer than one day and year-end frequencies. """ is_super_daily = (isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)) or (isinstance(offset, Day) and offset.n > 1)) if is_super_daily: if closed == 'right': datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1) if datetime_bins[-2] > index.max(): datetime_bins = datetime_bins[:-1] labels = labels[:-1] return datetime_bins, labels
This is required for determining the bin edges resampling with daily frequencies greater than one day, month end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00, 2000-02-01 12:00:00], dtype='object') Without this adjustment, _get_time_bins with month-end frequency will return the following index for the bin edges (default closed='right' and label='right' in this case): CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') If 2000-01-31 is used as a bound for a bin, the value on 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the month of January. To account for this, pandas adds a day minus one worth of microseconds to the bin edges generated by cftime range, so that we do bin the value at noon on January 31st in the January bin. This results in an index with bin edges like the following: CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59, 2000-02-29 23:59:59], dtype='object') The labels are still: CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') This is also required for daily frequencies longer than one day and year-end frequencies.
def mark_job_as_errored(job_id, error_object): """Mark a job as failed with an error. :param job_id: the job_id of the job to be updated :type job_id: unicode :param error_object: the error returned by the job :type error_object: either a string or a dict with a "message" key whose value is a string """ update_dict = { "status": "error", "error": error_object, "finished_timestamp": datetime.datetime.now(), } _update_job(job_id, update_dict)
Mark a job as failed with an error. :param job_id: the job_id of the job to be updated :type job_id: unicode :param error_object: the error returned by the job :type error_object: either a string or a dict with a "message" key whose value is a string
def _session(self): """Provide a transactional scope around a series of operations.""" # Taken from the session docs. session = self._session_maker() try: yield session session.commit() except: session.rollback() raise finally: session.close()
Provide a transactional scope around a series of operations.
def is_serializable(obj): """Return `True` if the given object conforms to the Serializable protocol. :rtype: bool """ if inspect.isclass(obj): return Serializable.is_serializable_type(obj) return isinstance(obj, Serializable) or hasattr(obj, '_asdict')
Return `True` if the given object conforms to the Serializable protocol. :rtype: bool
def update(self, **kwargs): """We need to implement the custom exclusive parameter check.""" self._check_exclusive_parameters(**kwargs) return super(Rule, self)._update(**kwargs)
We need to implement the custom exclusive parameter check.
def verify(self, public_pair, val, sig): """ :param: public_pair: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve :param: val: an integer value :param: sig: a pair of integers ``(r, s)`` representing an ecdsa signature :returns: True if and only if the signature ``sig`` is a valid signature of ``val`` using ``public_pair`` public key. """ order = self._order r, s = sig if r < 1 or r >= order or s < 1 or s >= order: return False s_inverse = self.inverse(s) u1 = val * s_inverse u2 = r * s_inverse point = u1 * self + u2 * self.Point(*public_pair) v = point[0] % order return v == r
:param: public_pair: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve :param: val: an integer value :param: sig: a pair of integers ``(r, s)`` representing an ecdsa signature :returns: True if and only if the signature ``sig`` is a valid signature of ``val`` using ``public_pair`` public key.
def file_content(self, value): """The Base64 encoded content of the attachment :param value: The Base64 encoded content of the attachment :type value: FileContent, string """ if isinstance(value, FileContent): self._file_content = value else: self._file_content = FileContent(value)
The Base64 encoded content of the attachment :param value: The Base64 encoded content of the attachment :type value: FileContent, string
def _add_recent(self, doc, logged_id): "Keep a tab on the most recent message for each channel" spec = dict(channel=doc['channel']) doc['ref'] = logged_id doc.pop('_id') self._recent.replace_one(spec, doc, upsert=True)
Keep a tab on the most recent message for each channel
def copytree(src, dst, symlinks=False, ignore=None): """ Function recursively copies from directory to directory. Args ---- src (string): the full path of source directory dst (string): the full path of destination directory symlinks (boolean): the switch for tracking symlinks ignore (list): the ignore list """ if not os.path.exists(dst): os.mkdir(dst) try: for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) except Exception as e: raise FolderExistsError("Folder already exists in %s" % dst)
Function recursively copies from directory to directory. Args ---- src (string): the full path of source directory dst (string): the full path of destination directory symlinks (boolean): the switch for tracking symlinks ignore (list): the ignore list
def update_host(self, url: URL) -> None: """Update destination host, port and connection type (ssl).""" # get host/port if not url.host: raise InvalidURL(url) # basic auth info username, password = url.user, url.password if username: self.auth = helpers.BasicAuth(username, password or '')
Update destination host, port and connection type (ssl).
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
Returns a numeric code for the first character in a text string
def get_html(url, headers=None, timeout=None, errors="strict", wait_time=None, driver=None, zillow_only=False, cache_only=False, zillow_first=False, cache_first=False, random=False, **kwargs): """ Use Google Cached Url. :param cache_only: if True, then real zillow site will never be used. :param driver: selenium browser driver。 """ if wait_time is None: wait_time = Config.Crawler.wait_time # prepare url cache_url1 = prefix + url + "/" cache_url2 = prefix + url zillow_url = url only_flags = [zillow_only, cache_only] if sum(only_flags) == 0: first_flags = [zillow_first, cache_first] if sum(first_flags) == 0: if random: if randint(0, 1): all_url = [zillow_url, cache_url1, cache_url2] else: all_url = [cache_url1, cache_url2, zillow_url] else: all_url = [zillow_url, cache_url1, cache_url2] elif sum(first_flags) == 1: if zillow_first: all_url = [zillow_url, cache_url1, cache_url2] elif cache_first: all_url = [cache_url1, cache_url2, zillow_url] else: raise ValueError( "Only zero or one `xxx_first` argument could be `True`!") elif sum(only_flags) == 1: if zillow_only: all_url = [zillow_url, ] elif cache_only: all_url = [cache_url1, cache_url2] else: raise ValueError( "Only zero or one `xxx_only` argument could be `True`!") for url in all_url: try: html = _get_html(url, headers, timeout, errors, wait_time, driver, **kwargs) return html except Exception as e: pass raise e
Use Google Cached Url. :param cache_only: if True, then real zillow site will never be used. :param driver: selenium browser driver。
def update_flags(self, idlist, flags): """ A thin back compat wrapper around build_update(flags=X) """ return self.update_bugs(idlist, self.build_update(flags=flags))
A thin back compat wrapper around build_update(flags=X)
def on_message(self, message): """Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt's "real time" event stream. """ log.debug('Got websocket message %s', message) if message == 'websocket client ready': if self.connected: # TBD: Add ability to run commands in this branch log.debug('Websocket already connected, returning') return self.connected = True evt_processor = event_processor.SaltInfo(self) client = salt.netapi.NetapiClient(self.application.opts) client.run({ 'fun': 'grains.items', 'tgt': '*', 'token': self.token, 'mode': 'client', 'asynchronous': 'local_async', 'client': 'local' }) while True: try: event = yield self.application.event_listener.get_event(self) evt_processor.process(event, self.token, self.application.opts) # self.write_message('data: {0}\n\n'.format(salt.utils.json.dumps(event, _json_module=_json))) except Exception as err: log.debug('Error! Ending server side websocket connection. Reason = %s', err) break self.close() else: # TBD: Add logic to run salt commands here pass
Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt's "real time" event stream.
def get_all_subdomains(self, offset=None, count=None, min_sequence=None, cur=None): """ Get and all subdomain names, optionally over a range """ get_cmd = 'SELECT DISTINCT fully_qualified_subdomain FROM {}'.format(self.subdomain_table) args = () if min_sequence is not None: get_cmd += ' WHERE sequence >= ?' args += (min_sequence,) if count is not None: get_cmd += ' LIMIT ?' args += (count,) if offset is not None: get_cmd += ' OFFSET ?' args += (offset,) get_cmd += ';' cursor = None if cur is None: cursor = self.conn.cursor() else: cursor = cur rows = db_query_execute(cursor, get_cmd, args) subdomains = [] for row in rows: subdomains.append(row['fully_qualified_subdomain']) return subdomains
Get and all subdomain names, optionally over a range
def available_dtypes(): """Return the set of data types available in this implementation. Notes ----- This is all dtypes available in Numpy. See ``numpy.sctypes`` for more information. The available dtypes may depend on the specific system used. """ all_dtypes = [] for lst in np.sctypes.values(): for dtype in lst: if dtype not in (np.object, np.void): all_dtypes.append(np.dtype(dtype)) # Need to add these manually since np.sctypes['others'] will only # contain one of them (depending on Python version) all_dtypes.extend([np.dtype('S'), np.dtype('U')]) return tuple(sorted(set(all_dtypes)))
Return the set of data types available in this implementation. Notes ----- This is all dtypes available in Numpy. See ``numpy.sctypes`` for more information. The available dtypes may depend on the specific system used.
def valid_hotp( token, secret, last=1, trials=1000, digest_method=hashlib.sha1, token_length=6, ): """Check if given token is valid for given secret. Return interval number that was successful, or False if not found. :param token: token being checked :type token: int or str :param secret: secret for which token is checked :type secret: str :param last: last used interval (start checking with next one) :type last: int :param trials: number of intervals to check after 'last' :type trials: int :param digest_method: method of generating digest (hashlib.sha1 by default) :type digest_method: callable :param token_length: length of the token (6 by default) :type token_length: int :return: interval number, or False if check unsuccessful :rtype: int or bool >>> secret = b'MFRGGZDFMZTWQ2LK' >>> valid_hotp(713385, secret, last=1, trials=5) 4 >>> valid_hotp(865438, secret, last=1, trials=5) False >>> valid_hotp(713385, secret, last=4, trials=5) False """ if not _is_possible_token(token, token_length=token_length): return False for i in six.moves.xrange(last + 1, last + trials + 1): token_candidate = get_hotp( secret=secret, intervals_no=i, digest_method=digest_method, token_length=token_length, ) if token_candidate == int(token): return i return False
Check if given token is valid for given secret. Return interval number that was successful, or False if not found. :param token: token being checked :type token: int or str :param secret: secret for which token is checked :type secret: str :param last: last used interval (start checking with next one) :type last: int :param trials: number of intervals to check after 'last' :type trials: int :param digest_method: method of generating digest (hashlib.sha1 by default) :type digest_method: callable :param token_length: length of the token (6 by default) :type token_length: int :return: interval number, or False if check unsuccessful :rtype: int or bool >>> secret = b'MFRGGZDFMZTWQ2LK' >>> valid_hotp(713385, secret, last=1, trials=5) 4 >>> valid_hotp(865438, secret, last=1, trials=5) False >>> valid_hotp(713385, secret, last=4, trials=5) False
def create_configuration(self, node, ports): """Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } """ target_raid_config = node.get('target_raid_config', {}).copy() return hpssa_manager.create_configuration( raid_config=target_raid_config)
Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] }
def get_display(self): """ returns information about the display, including brightness, screensaver etc. """ log.debug("getting display information...") cmd, url = DEVICE_URLS["get_display"] return self._exec(cmd, url)
returns information about the display, including brightness, screensaver etc.
def get_groups(self, **kwargs): """Obtain line types and details. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[GeoGroupItem]), or message string in case of error. """ # Endpoint parameters params = { 'cultureInfo': util.language_code(kwargs.get('lang')) } # Request result = self.make_request('geo', 'get_groups', **params) if not util.check_result(result): return False, result.get('resultDescription', 'UNKNOWN ERROR') # Parse values = util.response_list(result, 'resultValues') return True, [emtype.GeoGroupItem(**a) for a in values]
Obtain line types and details. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[GeoGroupItem]), or message string in case of error.
def _get_range(self, endpoint, *args, method='GET', **kwargs): """ Helper that returns another range""" if args: url = self.build_url(self._endpoints.get(endpoint).format(*args)) else: url = self.build_url(self._endpoints.get(endpoint)) if not kwargs: kwargs = None if method == 'GET': response = self.session.get(url, params=kwargs) elif method == 'POST': response = self.session.post(url, data=kwargs) if not response: return None return self.__class__(parent=self, **{self._cloud_data_key: response.json()})
Helper that returns another range
def destroy(self, stream=False): """ Run a 'terraform destroy' :param stream: whether or not to stream TF output in realtime :type stream: bool """ self._setup_tf(stream=stream) args = ['-refresh=true', '-force', '.'] logger.warning('Running terraform destroy: %s', ' '.join(args)) out = self._run_tf('destroy', cmd_args=args, stream=stream) if stream: logger.warning('Terraform destroy finished successfully.') else: logger.warning("Terraform destroy finished successfully:\n%s", out)
Run a 'terraform destroy' :param stream: whether or not to stream TF output in realtime :type stream: bool
def _cache_index(self, dbname, collection, index, cache_for): """Add an index to the index cache for ensure_index operations.""" now = datetime.datetime.utcnow() expire = datetime.timedelta(seconds=cache_for) + now with self.__index_cache_lock: if dbname not in self.__index_cache: self.__index_cache[dbname] = {} self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire elif collection not in self.__index_cache[dbname]: self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire else: self.__index_cache[dbname][collection][index] = expire
Add an index to the index cache for ensure_index operations.
def score(self, periods=None): """Compute the periodogram for the given period or periods Parameters ---------- periods : float or array_like Array of periods at which to compute the periodogram. Returns ------- scores : np.ndarray Array of normalized powers (between 0 and 1) for each period. Shape of scores matches the shape of the provided periods. """ periods = np.asarray(periods) return self._score(periods.ravel()).reshape(periods.shape)
Compute the periodogram for the given period or periods Parameters ---------- periods : float or array_like Array of periods at which to compute the periodogram. Returns ------- scores : np.ndarray Array of normalized powers (between 0 and 1) for each period. Shape of scores matches the shape of the provided periods.
def main(prog: str = None, subcommand_overrides: Dict[str, Subcommand] = {}) -> None: """ The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp`` codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't work for them, unless you use the ``--include-package`` flag. """ # pylint: disable=dangerous-default-value parser = ArgumentParserWithDefaults(description="Run AllenNLP", usage='%(prog)s', prog=prog) parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) subparsers = parser.add_subparsers(title='Commands', metavar='') subcommands = { # Default commands "configure": Configure(), "train": Train(), "evaluate": Evaluate(), "predict": Predict(), "make-vocab": MakeVocab(), "elmo": Elmo(), "fine-tune": FineTune(), "dry-run": DryRun(), "test-install": TestInstall(), "find-lr": FindLearningRate(), "print-results": PrintResults(), # Superseded by overrides **subcommand_overrides } for name, subcommand in subcommands.items(): subparser = subcommand.add_subparser(name, subparsers) # configure doesn't need include-package because it imports # whatever classes it needs. if name != "configure": subparser.add_argument('--include-package', type=str, action='append', default=[], help='additional packages to include') args = parser.parse_args() # If a subparser is triggered, it adds its work as `args.func`. # So if no such attribute has been added, no subparser was triggered, # so give the user some help. if 'func' in dir(args): # Import any additional modules needed (to register custom classes). for package_name in getattr(args, 'include_package', ()): import_submodules(package_name) args.func(args) else: parser.print_help()
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp`` codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't work for them, unless you use the ``--include-package`` flag.
def handle_nested_relation(self, line: str, position: int, tokens: ParseResults): """Handle nested statements. If :code:`allow_nested` is False, raises a ``NestedRelationWarning``. :raises: NestedRelationWarning """ if not self.allow_nested: raise NestedRelationWarning(self.get_line_number(), line, position) self._handle_relation_harness(line, position, { SUBJECT: tokens[SUBJECT], RELATION: tokens[RELATION], OBJECT: tokens[OBJECT][SUBJECT], }) self._handle_relation_harness(line, position, { SUBJECT: tokens[OBJECT][SUBJECT], RELATION: tokens[OBJECT][RELATION], OBJECT: tokens[OBJECT][OBJECT], }) return tokens
Handle nested statements. If :code:`allow_nested` is False, raises a ``NestedRelationWarning``. :raises: NestedRelationWarning
def _findSegment(self, x): ''' :param x: x value to place in segment defined by the xData (instantiation) :return: The lower index in the segment ''' iLeft = 0 iRight = len(self.xData) - 1 while True: if iRight - iLeft <= 1: return iLeft i = (iRight + iLeft) / 2 if x < self.xData[i]: iRight = i else: iLeft = i
:param x: x value to place in segment defined by the xData (instantiation) :return: The lower index in the segment
def do_types_conflict(type1: GraphQLOutputType, type2: GraphQLOutputType) -> bool: """Check whether two types conflict Two types conflict if both types could not apply to a value simultaneously. Composite types are ignored as their individual field types will be compared later recursively. However List and Non-Null types must match. """ if is_list_type(type1): return ( do_types_conflict( cast(GraphQLList, type1).of_type, cast(GraphQLList, type2).of_type ) if is_list_type(type2) else True ) if is_list_type(type2): return True if is_non_null_type(type1): return ( do_types_conflict( cast(GraphQLNonNull, type1).of_type, cast(GraphQLNonNull, type2).of_type ) if is_non_null_type(type2) else True ) if is_non_null_type(type2): return True if is_leaf_type(type1) or is_leaf_type(type2): return type1 is not type2 return False
Check whether two types conflict Two types conflict if both types could not apply to a value simultaneously. Composite types are ignored as their individual field types will be compared later recursively. However List and Non-Null types must match.
def update(self, dict): """Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`. """ for field in self.sorted_fields(): if field in dict: if dict[field] is None: delattr(self, field) else: setattr(self, field, dict[field])
Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`.
def parse_csr(): """ Parse certificate signing request for domains """ LOGGER.info("Parsing CSR...") cmd = [ 'openssl', 'req', '-in', os.path.join(gettempdir(), 'domain.csr'), '-noout', '-text' ] devnull = open(os.devnull, 'wb') out = subprocess.check_output(cmd, stderr=devnull) domains = set([]) common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8')) if common_name is not None: domains.add(common_name.group(1)) subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL) if subject_alt_names is not None: for san in subject_alt_names.group(1).split(", "): if san.startswith("DNS:"): domains.add(san[4:]) return domains
Parse certificate signing request for domains
def OnDestroy(self, event): """Called on panel destruction.""" # deregister observers if hasattr(self, 'cardmonitor'): self.cardmonitor.deleteObserver(self.cardtreecardobserver) if hasattr(self, 'readermonitor'): self.readermonitor.deleteObserver(self.readertreereaderobserver) self.cardmonitor.deleteObserver(self.readertreecardobserver) event.Skip()
Called on panel destruction.
def get_user(self, user_id, expand=False): """Returns Hacker News `User` object. Fetches data from the url: https://hacker-news.firebaseio.com/v0/user/<user_id>.json e.g. https://hacker-news.firebaseio.com/v0/user/pg.json Args: user_id (string): unique user id of a Hacker News user. expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `User` object representing a user on Hacker News. Raises: InvalidUserID: If no such user exists on Hacker News. """ url = urljoin(self.user_url, F"{user_id}.json") response = self._get_sync(url) if not response: raise InvalidUserID user = User(response) if expand and user.submitted: items = self.get_items_by_ids(user.submitted) user_opt = { 'stories': 'story', 'comments': 'comment', 'jobs': 'job', 'polls': 'poll', 'pollopts': 'pollopt' } for key, value in user_opt.items(): setattr( user, key, [i for i in items if i.item_type == value] ) return user
Returns Hacker News `User` object. Fetches data from the url: https://hacker-news.firebaseio.com/v0/user/<user_id>.json e.g. https://hacker-news.firebaseio.com/v0/user/pg.json Args: user_id (string): unique user id of a Hacker News user. expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `User` object representing a user on Hacker News. Raises: InvalidUserID: If no such user exists on Hacker News.
def gprmc_to_degdec(lat, latDirn, lng, lngDirn): """Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.""" x = float(lat[0:2]) + float(lat[2:]) / 60 y = float(lng[0:3]) + float(lng[3:]) / 60 if latDirn == 'S': x = -x if lngDirn == 'W': y = -y return x, y
Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.
def import_from_json(self, data): """ Replace the current roster with the :meth:`export_as_json`-compatible dictionary in `data`. No events are fired during this activity. After this method completes, the whole roster contents are exchanged with the contents from `data`. Also, no data is transferred to the server; this method is intended to be used for roster versioning. See below (in the docs of :class:`Service`). """ self.version = data.get("ver", None) self.items.clear() self.groups.clear() for jid, data in data.get("items", {}).items(): jid = structs.JID.fromstr(jid) item = Item(jid) item.update_from_json(data) self.items[jid] = item for group in item.groups: self.groups.setdefault(group, set()).add(item)
Replace the current roster with the :meth:`export_as_json`-compatible dictionary in `data`. No events are fired during this activity. After this method completes, the whole roster contents are exchanged with the contents from `data`. Also, no data is transferred to the server; this method is intended to be used for roster versioning. See below (in the docs of :class:`Service`).
def create_process(daemon, name, callback, *callbackParams): """创建进程 :param daemon: True主进程关闭而关闭, False主进程必须等待子进程结束 :param name: 进程名称 :param callback: 回调函数 :param callbackParams: 回调函数参数 :return: 返回一个进程对象 """ bp = Process(daemon=daemon, name=name, target=callback, args=callbackParams) return bp
创建进程 :param daemon: True主进程关闭而关闭, False主进程必须等待子进程结束 :param name: 进程名称 :param callback: 回调函数 :param callbackParams: 回调函数参数 :return: 返回一个进程对象
def encode_dict(data, encoding=None, errors='strict', keep=False, preserve_dict_class=False, preserve_tuples=False): ''' Encode all string values to bytes ''' rv = data.__class__() if preserve_dict_class else {} for key, value in six.iteritems(data): if isinstance(key, tuple): key = encode_tuple(key, encoding, errors, keep, preserve_dict_class) \ if preserve_tuples \ else encode_list(key, encoding, errors, keep, preserve_dict_class, preserve_tuples) else: try: key = salt.utils.stringutils.to_bytes(key, encoding, errors) except TypeError: # to_bytes raises a TypeError when input is not a # string/bytestring/bytearray. This is expected and simply # means we are going to leave the value as-is. pass except UnicodeEncodeError: if not keep: raise if isinstance(value, list): value = encode_list(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) elif isinstance(value, tuple): value = encode_tuple(value, encoding, errors, keep, preserve_dict_class) \ if preserve_tuples \ else encode_list(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) elif isinstance(value, Mapping): value = encode_dict(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) else: try: value = salt.utils.stringutils.to_bytes(value, encoding, errors) except TypeError: # to_bytes raises a TypeError when input is not a # string/bytestring/bytearray. This is expected and simply # means we are going to leave the value as-is. pass except UnicodeEncodeError: if not keep: raise rv[key] = value return rv
Encode all string values to bytes
def _get_configs_path(): """Get a list of possible configuration files, from the following sources: 1. All files that exists in constants.CONFS_PATH. 2. All XDG standard config files for "lago.conf", in reversed order of importance. Returns: list(str): list of files """ paths = [] xdg_paths = [ path for path in base_dirs.load_config_paths('lago', 'lago.conf') ] paths.extend([path for path in CONFS_PATH if os.path.exists(path)]) paths.extend(reversed(xdg_paths)) return paths
Get a list of possible configuration files, from the following sources: 1. All files that exists in constants.CONFS_PATH. 2. All XDG standard config files for "lago.conf", in reversed order of importance. Returns: list(str): list of files
def pad_with_fill_value(self, pad_widths=None, fill_value=dtypes.NA, **pad_widths_kwargs): """ Return a new Variable with paddings. Parameters ---------- pad_width: Mapping of the form {dim: (before, after)} Number of values padded to the edges of each dimension. **pad_widths_kwargs: Keyword argument for pad_widths """ pad_widths = either_dict_or_kwargs(pad_widths, pad_widths_kwargs, 'pad') if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype if isinstance(self.data, dask_array_type): array = self.data # Dask does not yet support pad. We manually implement it. # https://github.com/dask/dask/issues/1926 for d, pad in pad_widths.items(): axis = self.get_axis_num(d) before_shape = list(array.shape) before_shape[axis] = pad[0] before_chunks = list(array.chunks) before_chunks[axis] = (pad[0], ) after_shape = list(array.shape) after_shape[axis] = pad[1] after_chunks = list(array.chunks) after_chunks[axis] = (pad[1], ) arrays = [] if pad[0] > 0: arrays.append(da.full(before_shape, fill_value, dtype=dtype, chunks=before_chunks)) arrays.append(array) if pad[1] > 0: arrays.append(da.full(after_shape, fill_value, dtype=dtype, chunks=after_chunks)) if len(arrays) > 1: array = da.concatenate(arrays, axis=axis) else: pads = [(0, 0) if d not in pad_widths else pad_widths[d] for d in self.dims] array = np.pad(self.data.astype(dtype, copy=False), pads, mode='constant', constant_values=fill_value) return type(self)(self.dims, array)
Return a new Variable with paddings. Parameters ---------- pad_width: Mapping of the form {dim: (before, after)} Number of values padded to the edges of each dimension. **pad_widths_kwargs: Keyword argument for pad_widths
def list_contacts(self, **kwargs): """ List all contacts, optionally filtered by a query. Specify filters as query keyword argument, such as: query= email is abc@xyz.com, query= mobile is 1234567890, query= phone is 1234567890, contacts can be filtered by name such as; letter=Prenit Passing None means that no named filter will be passed to Freshdesk, which returns list of all contacts """ url = 'contacts.json?' if 'query' in kwargs.keys(): filter_query = kwargs.pop('query') url = url + "query={}".format(filter_query) if 'state' in kwargs.keys(): state_query = kwargs.pop('state') url = url + "state={}".format(state_query) if 'letter' in kwargs.keys(): name_query = kwargs.pop('letter') url = url + "letter={}".format(name_query) contacts = self._api._get(url) return [Contact(**c['user']) for c in contacts]
List all contacts, optionally filtered by a query. Specify filters as query keyword argument, such as: query= email is abc@xyz.com, query= mobile is 1234567890, query= phone is 1234567890, contacts can be filtered by name such as; letter=Prenit Passing None means that no named filter will be passed to Freshdesk, which returns list of all contacts
def org_find_projects(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /org-xxxx/findProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindProjects """ return DXHTTPRequest('/%s/findProjects' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /org-xxxx/findProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindProjects
def copy_data(self, project, logstore, from_time, to_time=None, to_client=None, to_project=None, to_logstore=None, shard_list=None, batch_size=None, compress=None, new_topic=None, new_source=None): """ copy data from one logstore to another one (could be the same or in different region), the time is log received time on server side. :type project: string :param project: project name :type logstore: string :param logstore: logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, default is "end", could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_client: LogClient :param to_client: logclient instance, if empty will use source client :type to_project: string :param to_project: project name, if empty will use source project :type to_logstore: string :param to_logstore: logstore name, if empty will use source logstore :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type new_topic: string :param new_topic: overwrite the copied topic with the passed one :type new_source: string :param new_source: overwrite the copied source with the passed one :return: LogResponse {"total_count": 30, "shards": {0: 10, 1: 20} }) """ return copy_data(self, project, logstore, from_time, to_time=to_time, to_client=to_client, to_project=to_project, to_logstore=to_logstore, shard_list=shard_list, batch_size=batch_size, compress=compress, new_topic=new_topic, new_source=new_source)
copy data from one logstore to another one (could be the same or in different region), the time is log received time on server side. :type project: string :param project: project name :type logstore: string :param logstore: logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, default is "end", could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_client: LogClient :param to_client: logclient instance, if empty will use source client :type to_project: string :param to_project: project name, if empty will use source project :type to_logstore: string :param to_logstore: logstore name, if empty will use source logstore :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type new_topic: string :param new_topic: overwrite the copied topic with the passed one :type new_source: string :param new_source: overwrite the copied source with the passed one :return: LogResponse {"total_count": 30, "shards": {0: 10, 1: 20} })
def hash(buf, encoding="utf-8"): """ Compute the fuzzy hash of a buffer :param String|Bytes buf: The data to be fuzzy hashed :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error :raises TypeError: If buf is not String or Bytes """ if isinstance(buf, six.text_type): buf = buf.encode(encoding) if not isinstance(buf, six.binary_type): raise TypeError( "Argument must be of string, unicode or bytes type not " "'%r'" % type(buf) ) # allocate memory for result result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT) if binding.lib.fuzzy_hash_buf(buf, len(buf), result) != 0: raise InternalError("Function returned an unexpected error code") return ffi.string(result).decode("ascii")
Compute the fuzzy hash of a buffer :param String|Bytes buf: The data to be fuzzy hashed :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error :raises TypeError: If buf is not String or Bytes
def printImportedNames(self): """Produce a report of imported names.""" for module in self.listModules(): print("%s:" % module.modname) print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
Produce a report of imported names.
def setup_logger(): # type: () -> logging.Logger """Setup the root logger. Return the logger instance for possible further setting and use. To be used from CLI scripts only. """ formatter = logging.Formatter( fmt='%(levelname)s - %(module)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) return logger
Setup the root logger. Return the logger instance for possible further setting and use. To be used from CLI scripts only.
def get_content(request, page_id, content_id): """Get the content for a particular page""" content = Content.objects.get(pk=content_id) return HttpResponse(content.body)
Get the content for a particular page
def fprint(self, file, indent): """ Print contents of directory to open stream """ return lib.zdir_fprint(self._as_parameter_, coerce_py_file(file), indent)
Print contents of directory to open stream
def reset(): """ Unmount and remove the sqlite database (used in robot reset) """ if os.path.exists(database_path): os.remove(database_path) # Not an os.path.join because it is a suffix to the full filename journal_path = database_path + '-journal' if os.path.exists(journal_path): os.remove(journal_path)
Unmount and remove the sqlite database (used in robot reset)
def _is_qrs(self, peak_num, backsearch=False): """ Check whether a peak is a qrs complex. It is classified as qrs if it: - Comes after the refractory period - Passes qrs threshold - Is not a t-wave (check it if the peak is close to the previous qrs). Parameters ---------- peak_num : int The peak number of the mwi signal to be inspected backsearch: bool, optional Whether the peak is being inspected during backsearch """ i = self.peak_inds_i[peak_num] if backsearch: qrs_thr = self.qrs_thr / 2 else: qrs_thr = self.qrs_thr if (i-self.last_qrs_ind > self.ref_period and self.sig_i[i] > qrs_thr): if i-self.last_qrs_ind < self.t_inspect_period: if self._is_twave(peak_num): return False return True return False
Check whether a peak is a qrs complex. It is classified as qrs if it: - Comes after the refractory period - Passes qrs threshold - Is not a t-wave (check it if the peak is close to the previous qrs). Parameters ---------- peak_num : int The peak number of the mwi signal to be inspected backsearch: bool, optional Whether the peak is being inspected during backsearch
def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer): """ Split actions into chunks by number or size, serialize them into strings in the process. """ bulk_actions = [] size, action_count = 0, 0 for action, data in actions: action = serializer.dumps(action) cur_size = len(action) + 1 if data is not None: data = serializer.dumps(data) cur_size += len(data) + 1 # full chunk, send it and start a new one if bulk_actions and (size + cur_size > max_chunk_bytes or action_count == chunk_size): yield bulk_actions bulk_actions = [] size, action_count = 0, 0 bulk_actions.append(action) if data is not None: bulk_actions.append(data) size += cur_size action_count += 1 if bulk_actions: yield bulk_actions
Split actions into chunks by number or size, serialize them into strings in the process.
def forward(self, x, boxes): """ Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores """ class_logits, box_regression = x class_prob = F.softmax(class_logits, -1) # TODO think about a representation of batch of boxes image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) if self.cls_agnostic_bbox_reg: box_regression = box_regression[:, -4:] proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes ) if self.cls_agnostic_bbox_reg: proposals = proposals.repeat(1, class_prob.shape[1]) num_classes = class_prob.shape[1] proposals = proposals.split(boxes_per_image, dim=0) class_prob = class_prob.split(boxes_per_image, dim=0) results = [] for prob, boxes_per_img, image_shape in zip( class_prob, proposals, image_shapes ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) return results
Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores
def name(self): # pylint: disable=no-self-use """ Name returns user's name or user's email or user_id :return: best guess of name to use to greet user """ if 'lis_person_sourcedid' in self.session: return self.session['lis_person_sourcedid'] elif 'lis_person_contact_email_primary' in self.session: return self.session['lis_person_contact_email_primary'] elif 'user_id' in self.session: return self.session['user_id'] else: return ''
Name returns user's name or user's email or user_id :return: best guess of name to use to greet user
def exists(self, path_or_index): """ Checks if a path exists in the document. This is meant to be used for a corresponding :meth:`~couchbase.subdocument.exists` request. :param path_or_index: The path (or index) to check :return: `True` if the path exists, `False` if the path does not exist :raise: An exception if the server-side check failed for a reason other than the path not existing. """ result = self._resolve(path_or_index) if not result[0]: return True elif E.SubdocPathNotFoundError._can_derive(result[0]): return False else: raise E.exc_from_rc(result[0])
Checks if a path exists in the document. This is meant to be used for a corresponding :meth:`~couchbase.subdocument.exists` request. :param path_or_index: The path (or index) to check :return: `True` if the path exists, `False` if the path does not exist :raise: An exception if the server-side check failed for a reason other than the path not existing.
async def open(self) -> 'HolderProver': """ Explicit entry. Perform ancestor opening operations, then parse cache from archive if so configured, and synchronize revocation registry to tails tree content. :return: current object """ LOGGER.debug('HolderProver.open >>>') await super().open() if self.cfg.get('parse-cache-on-open', False): Caches.parse(self.dir_cache) for path_rr_id in Tails.links(self._dir_tails): await self._sync_revoc(basename(path_rr_id)) LOGGER.debug('HolderProver.open <<<') return self
Explicit entry. Perform ancestor opening operations, then parse cache from archive if so configured, and synchronize revocation registry to tails tree content. :return: current object
def insert(self, document): """ Insert a new document into the table. :param document: the document to insert :returns: the inserted document's ID """ doc_id = self._get_doc_id(document) data = self._read() data[doc_id] = dict(document) self._write(data) return doc_id
Insert a new document into the table. :param document: the document to insert :returns: the inserted document's ID
def create_symlinks(d): """Create new symbolic links in output directory.""" data = loadJson(d) outDir = prepare_output(d) unseen = data["pages"].keys() while len(unseen) > 0: latest = work = unseen[0] while work in unseen: unseen.remove(work) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] print("Latest page: %s" % (latest)) order = [] work = latest while work in data["pages"]: order.extend(data["pages"][work]["images"].values()) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] else: work = None order.reverse() for i, img in enumerate(order): os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
Create new symbolic links in output directory.
def filter_paths(pathnames, patterns=None, ignore_patterns=None): """Filters from a set of paths based on acceptable patterns and ignorable patterns.""" result = [] if patterns is None: patterns = ['*'] if ignore_patterns is None: ignore_patterns = [] for pathname in pathnames: if match_patterns(pathname, patterns) and not match_patterns(pathname, ignore_patterns): result.append(pathname) return result
Filters from a set of paths based on acceptable patterns and ignorable patterns.
def check_contract_allowed(func): """Check if Contract is allowed by token """ @wraps(func) def decorator(*args, **kwargs): contract = kwargs.get('contract') if (contract and current_user.is_authenticated() and not current_user.allowed(contract)): return current_app.login_manager.unauthorized() return func(*args, **kwargs) return decorator
Check if Contract is allowed by token
def start(self, stdout=subprocess.PIPE, stderr=subprocess.PIPE): """ Merged copy paste from the inheritance chain with modified stdout/err behaviour """ if self.pre_start_check(): # Some other executor (or process) is running with same config: raise AlreadyRunning(self) if self.process is None: command = self.command if not self._shell: command = self.command_parts env = os.environ.copy() env[ENV_UUID] = self._uuid popen_kwargs = { 'shell': self._shell, 'stdin': subprocess.PIPE, 'stdout': stdout, 'stderr': stderr, 'universal_newlines': True, 'env': env, } if platform.system() != 'Windows': popen_kwargs['preexec_fn'] = os.setsid self.process = subprocess.Popen( command, **popen_kwargs, ) self._set_timeout() self.wait_for(self.check_subprocess) return self
Merged copy paste from the inheritance chain with modified stdout/err behaviour
def get_type_of_fields(fields, table): """ Return data types of `fields` that are in `table`. If a given parameter is empty return primary key. :param fields: list - list of fields that need to be returned :param table: sa.Table - the current table :return: list - list of the tuples `(field_name, fields_type)` """ if not fields: fields = table.primary_key actual_fields = [ field for field in table.c.items() if field[0] in fields ] data_type_fields = { name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value) for name, field_type in actual_fields } return data_type_fields
Return data types of `fields` that are in `table`. If a given parameter is empty return primary key. :param fields: list - list of fields that need to be returned :param table: sa.Table - the current table :return: list - list of the tuples `(field_name, fields_type)`
def Description(self): """Returns searchable data as Description""" descr = " ".join((self.getId(), self.aq_parent.Title())) return safe_unicode(descr).encode('utf-8')
Returns searchable data as Description
def selectnone(table, field, complement=False): """Select rows where the given field is `None`.""" return select(table, field, lambda v: v is None, complement=complement)
Select rows where the given field is `None`.
def __process_sentence(sentence_tuple, counts): """pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts: """ sentence = sentence_tuple[2] # now we start replacing words one type at a time... sentence = __replace_verbs(sentence, counts) sentence = __replace_nouns(sentence, counts) sentence = ___replace_adjective_maybe(sentence, counts) sentence = __replace_adjective(sentence, counts) sentence = __replace_names(sentence, counts) # here we perform a check to see if we need to use A or AN depending on the # first letter of the following word... sentence = __replace_an(sentence) # replace the new repeating segments sentence = __replace_repeat(sentence) # now we will read, choose and substitute each of the RANDOM sentence tuples sentence = __replace_random(sentence) # now we are going to choose whether to capitalize words/sentences or not sentence = __replace_capitalise(sentence) # here we will choose whether to capitalize all words in the sentence sentence = __replace_capall(sentence) # check for appropriate spaces in the correct places. sentence = __check_spaces(sentence) return sentence
pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts:
def list(self): """ :rtype: list(setting_name, value, default_value, is_set, is_supported) """ settings = [] for setting in _SETTINGS: value = self.get(setting) is_set = self.is_set(setting) default_value = self.get_default_value(setting) is_supported = True settings.append((setting, value, default_value, is_set, is_supported)) for setting in sorted(self.settings_state.list_keys()): if not self.is_supported(setting): value = self.get(setting) default_value = None is_set = True is_supported = False settings.append((setting, value, default_value, is_set, is_supported)) return settings
:rtype: list(setting_name, value, default_value, is_set, is_supported)
def remove(mode_id: str) -> bool: """ Removes the specified mode identifier from the active modes and returns whether or not a remove operation was carried out. If the mode identifier is not in the currently active modes, it does need to be removed. """ had_mode = has(mode_id) if had_mode: _current_modes.remove(mode_id) return had_mode
Removes the specified mode identifier from the active modes and returns whether or not a remove operation was carried out. If the mode identifier is not in the currently active modes, it does need to be removed.
def combineblocks(blks, imgsz, stpsz=None, fn=np.median): """Combine blocks from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan """ # Construct a vectorized append function def listapp(x, y): x.append(y) veclistapp = np.vectorize(listapp, otypes=[np.object_]) blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) # Construct an imgs matrix of empty lists imgs = np.empty(imgsz, dtype=np.object_) imgs.fill([]) imgs = np.frompyfunc(list, 1, 1)(imgs) # Iterate over each block and append the values to the corresponding # imgs cell for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c + b) for a, b, c in zip_longest(pos, blksz, stpsz, fillvalue=1)) veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze()) return np.vectorize(fn, otypes=[blks.dtype])(imgs)
Combine blocks from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan
def softmax_average_pooling_class_label_top(body_output, targets, model_hparams, vocab_size): """Loss for class label.""" del targets # unused arg with tf.variable_scope( "softmax_average_pooling_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size)): x = body_output x = tf.reduce_mean(x, axis=1, keepdims=True) return tf.layers.dense(x, vocab_size)
Loss for class label.
def namespace_map(self, target): """Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary """ self._check_target(target) return target.namespace_map or self._default_namespace_map
Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary
def element_height_should_be(self, locator, expected): """Verifies the element identified by `locator` has the expected height. Expected height should be in pixels. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected height | 600 |""" self._info("Verifying element '%s' height is '%s'" % (locator, expected)) self._check_element_size(locator, 'height', expected)
Verifies the element identified by `locator` has the expected height. Expected height should be in pixels. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected height | 600 |
def _next(self, request, application, roles, next_config): """ Continue the state machine at given state. """ # we only support state changes for POST requests if request.method == "POST": key = None # If next state is a transition, process it while True: # We do not expect to get a direct state transition here. assert next_config['type'] in ['goto', 'transition'] while next_config['type'] == 'goto': key = next_config['key'] next_config = self._config[key] instance = load_instance(next_config) if not isinstance(instance, Transition): break next_config = instance.get_next_config(request, application, roles) # lookup next state assert key is not None state_key = key # enter that state instance.enter_state(request, application) application.state = state_key application.save() # log details log.change(application.application_ptr, "state: %s" % instance.name) # redirect to this new state url = get_url(request, application, roles) return HttpResponseRedirect(url) else: return HttpResponseBadRequest("<h1>Bad Request</h1>")
Continue the state machine at given state.
def category_helper(form_tag=True): """ Category's form layout helper """ helper = FormHelper() helper.form_action = '.' helper.attrs = {'data_abide': ''} helper.form_tag = form_tag helper.layout = Layout( Row( Column( 'title', css_class='small-12' ), ), Row( Column( 'slug', css_class='small-12 medium-10' ), Column( 'order', css_class='small-12 medium-2' ), ), Row( Column( 'description', css_class='small-12' ), ), Row( Column( 'visible', css_class='small-12' ), ), ButtonHolderPanel( Submit('submit', _('Submit')), css_class='text-right', ), ) return helper
Category's form layout helper
def password_length_needed(entropybits: Union[int, float], chars: str) -> int: """Calculate the length of a password for a given entropy and chars.""" if not isinstance(entropybits, (int, float)): raise TypeError('entropybits can only be int or float') if entropybits < 0: raise ValueError('entropybits should be greater than 0') if not isinstance(chars, str): raise TypeError('chars can only be string') if not chars: raise ValueError("chars can't be null") # entropy_bits(list(characters)) = 6.554588 entropy_c = entropy_bits(list(chars)) return ceil(entropybits / entropy_c)
Calculate the length of a password for a given entropy and chars.
def list(self, **kwds): """ Endpoint: /albums/list.json Returns a list of Album objects. """ albums = self._client.get("/albums/list.json", **kwds)["result"] albums = self._result_to_list(albums) return [Album(self._client, album) for album in albums]
Endpoint: /albums/list.json Returns a list of Album objects.
def do_list_modules(self, long_output=None,sort_order=None): """Display a list of loaded modules. Config items: - shutit.list_modules['long'] If set, also print each module's run order value - shutit.list_modules['sort'] Select the column by which the list is ordered: - id: sort the list by module id - run_order: sort the list by module run order The output is also saved to ['build']['log_config_path']/module_order.txt Dependencies: operator """ shutit_global.shutit_global_object.yield_to_draw() cfg = self.cfg # list of module ids and other details # will also contain column headers table_list = [] if long_output is None: long_output = self.list_modules['long'] if sort_order is None: sort_order = self.list_modules['sort'] if long_output: # --long table: sort modules by run order table_list.append(["Order","Module ID","Description","Run Order","Built","Compatible"]) #table_list.append(["Order","Module ID","Description","Run Order","Built"]) else: # "short" table ==> sort module by module_id #table_list.append(["Module ID","Description","Built"]) table_list.append(["Module ID","Description","Built","Compatible"]) if sort_order == 'run_order': d = {} for m in self.shutit_modules: d.update({m.module_id:m.run_order}) # sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value b = sorted(d.items(), key=operator.itemgetter(1)) count = 0 # now b is a list of tuples (module_id, run_order) for pair in b: # module_id is the first item of the tuple k = pair[0] for m in self.shutit_modules: if m.module_id == k: count += 1 compatible = True if not cfg[m.module_id]['shutit.core.module.build']: cfg[m.module_id]['shutit.core.module.build'] = True compatible = self.determine_compatibility(m.module_id) == 0 cfg[m.module_id]['shutit.core.module.build'] = False if long_output: table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) #table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])]) else: table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) elif sort_order == 'id': l = [] for m in self.shutit_modules: l.append(m.module_id) l.sort() for k in l: for m in self.shutit_modules: if m.module_id == k: count = 1 compatible = True if not cfg[m.module_id]['shutit.core.module.build']: cfg[m.module_id]['shutit.core.module.build'] = True compatible = self.determine_compatibility(m.module_id) == 0 if long_output: table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) #table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])]) else: #table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build'])]) table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) # format table for display table = texttable.Texttable() table.add_rows(table_list) # Base length of table on length of strings colwidths = [] for item in table_list: for n in range(0,len(item)): # default to 10 chars colwidths.append(10) break for item in table_list: for n in range(0,len(item)-1): if len(str(item[n])) > colwidths[n]: colwidths[n] = len(str(item[n])) table.set_cols_width(colwidths) msg = table.draw() shutit_global.shutit_global_object.shutit_print('\n' + msg)
Display a list of loaded modules. Config items: - shutit.list_modules['long'] If set, also print each module's run order value - shutit.list_modules['sort'] Select the column by which the list is ordered: - id: sort the list by module id - run_order: sort the list by module run order The output is also saved to ['build']['log_config_path']/module_order.txt Dependencies: operator
def _on_rpc_done(self, future): """Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``. """ _LOGGER.info("RPC termination has signaled manager shutdown.") future = _maybe_wrap_exception(future) thread = threading.Thread( name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future} ) thread.daemon = True thread.start()
Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``.
def _create_adapter_type(network_adapter, adapter_type, network_adapter_label=''): ''' Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual ethernet card information network_adapter None or VirtualEthernet object adapter_type String, type of adapter network_adapter_label string, network adapter name ''' log.trace('Configuring virtual machine network ' 'adapter adapter_type=%s', adapter_type) if adapter_type in ['vmxnet', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e']: edited_network_adapter = salt.utils.vmware.get_network_adapter_type( adapter_type) if isinstance(network_adapter, type(edited_network_adapter)): edited_network_adapter = network_adapter else: if network_adapter: log.trace('Changing type of \'%s\' from \'%s\' to \'%s\'', network_adapter.deviceInfo.label, type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(), adapter_type) else: # If device is edited and type not specified or does not match, # don't change adapter type if network_adapter: if adapter_type: log.error( 'Cannot change type of \'%s\' to \'%s\'. Not changing type', network_adapter.deviceInfo.label, adapter_type ) edited_network_adapter = network_adapter else: if not adapter_type: log.trace('The type of \'%s\' has not been specified. ' 'Creating of default type \'vmxnet3\'', network_adapter_label) edited_network_adapter = vim.vm.device.VirtualVmxnet3() return edited_network_adapter
Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual ethernet card information network_adapter None or VirtualEthernet object adapter_type String, type of adapter network_adapter_label string, network adapter name
def run(self): """Process the work unit, or wait for sentinel to exit""" while True: self.running = True workunit = self._workq.get() if is_sentinel(workunit): # Got sentinel break # Run the job / sequence workunit.process() self.running = False
Process the work unit, or wait for sentinel to exit
def onSiliconCheckList(ra_deg, dec_deg, FovObj, padding_pix=DEFAULT_PADDING): """Check a list of positions.""" dist = angSepVincenty(FovObj.ra0_deg, FovObj.dec0_deg, ra_deg, dec_deg) mask = (dist < 90.) out = np.zeros(len(dist), dtype=bool) out[mask] = FovObj.isOnSiliconList(ra_deg[mask], dec_deg[mask], padding_pix=padding_pix) return out
Check a list of positions.
def set_bin_window(self, bin_size=None, window_size=None): """Set the bin and window sizes.""" bin_size = bin_size or self.bin_size window_size = window_size or self.window_size assert 1e-6 < bin_size < 1e3 assert 1e-6 < window_size < 1e3 assert bin_size < window_size self.bin_size = bin_size self.window_size = window_size # Set the status message. b, w = self.bin_size * 1000, self.window_size * 1000 self.set_status('Bin: {:.1f} ms. Window: {:.1f} ms.'.format(b, w))
Set the bin and window sizes.
def join(cls, diffs: Iterable['DBDiff']) -> 'DBDiff': """ Join several DBDiff objects into a single DBDiff object. In case of a conflict, changes in diffs that come later in ``diffs`` will overwrite changes from earlier changes. """ tracker = DBDiffTracker() for diff in diffs: diff.apply_to(tracker) return tracker.diff()
Join several DBDiff objects into a single DBDiff object. In case of a conflict, changes in diffs that come later in ``diffs`` will overwrite changes from earlier changes.