code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def init(module_paths, work_db, config): operator_names = cosmic_ray.plugins.operator_names() work_db.set_config(config=config) work_db.clear() for module_path in module_paths: module_ast = get_ast( module_path, python_version=config.python_version) for op_name in operator_names: operator = get_operator(op_name)(config.python_version) visitor = WorkDBInitVisitor(module_path, op_name, work_db, operator) visitor.walk(module_ast) apply_interceptors(work_db, config.sub('interceptors').get('enabled', ()))
Clear and initialize a work-db with work items. Any existing data in the work-db will be cleared and replaced with entirely new work orders. In particular, this means that any results in the db are removed. Args: module_paths: iterable of pathlib.Paths of modules to mutate. work_db: A `WorkDB` instance into which the work orders will be saved. config: The configuration for the new session.
def dependencies(self, task, params={}, **options): path = "/tasks/%s/dependencies" % (task) return self.client.get(path, params, **options)
Returns the compact representations of all of the dependencies of a task. Parameters ---------- task : {Id} The task to get dependencies on. [params] : {Object} Parameters for the request
def cli(obj): client = obj['client'] metrics = client.mgmt_status()['metrics'] headers = {'title': 'METRIC', 'type': 'TYPE', 'name': 'NAME', 'value': 'VALUE', 'average': 'AVERAGE'} click.echo(tabulate([{ 'title': m['title'], 'type': m['type'], 'name': '{}.{}'.format(m['group'], m['name']), 'value': m.get('value', None) or m.get('count', 0), 'average': int(m['totalTime']) * 1.0 / int(m['count']) if m['type'] == 'timer' else None } for m in metrics], headers=headers, tablefmt=obj['output']))
Display API server switch status and usage metrics.
def fishrot(k=20, n=100, dec=0, inc=90, di_block=True): directions = [] declinations = [] inclinations = [] if di_block == True: for data in range(n): d, i = pmag.fshdev(k) drot, irot = pmag.dodirot(d, i, dec, inc) directions.append([drot, irot, 1.]) return directions else: for data in range(n): d, i = pmag.fshdev(k) drot, irot = pmag.dodirot(d, i, dec, inc) declinations.append(drot) inclinations.append(irot) return declinations, inclinations
Generates Fisher distributed unit vectors from a specified distribution using the pmag.py fshdev and dodirot functions. Parameters ---------- k : kappa precision parameter (default is 20) n : number of vectors to determine (default is 100) dec : mean declination of distribution (default is 0) inc : mean inclination of distribution (default is 90) di_block : this function returns a nested list of [dec,inc,1.0] as the default if di_block = False it will return a list of dec and a list of inc Returns --------- di_block : a nested list of [dec,inc,1.0] (default) dec, inc : a list of dec and a list of inc (if di_block = False) Examples -------- >>> ipmag.fishrot(k=20, n=5, dec=40, inc=60) [[44.766285502555775, 37.440866867657235, 1.0], [33.866315796883725, 64.732532250463436, 1.0], [47.002912770597163, 54.317853800896977, 1.0], [36.762165614432547, 56.857240672884252, 1.0], [71.43950604474395, 59.825830945715431, 1.0]]
def contracts_deployed_path( chain_id: int, version: Optional[str] = None, services: bool = False, ): data_path = contracts_data_path(version) chain_name = ID_TO_NETWORKNAME[chain_id] if chain_id in ID_TO_NETWORKNAME else 'private_net' return data_path.joinpath(f'deployment_{"services_" if services else ""}{chain_name}.json')
Returns the path of the deplolyment data JSON file.
def _respond(self, resp): response_queue = self._response_queues.get(timeout=0.1) response_queue.put(resp) self._completed_response_lines = [] self._is_multiline = None
Respond to the person waiting
def _build(self, items, chunk_size=10000): _log.debug("_build, chunk_size={:d}".format(chunk_size)) n, i = 0, 0 for i, item in enumerate(items): if i == 0: _log.debug("_build, first item") if 0 == (i + 1) % chunk_size: if self._seq: self._run(0) else: self._run_parallel_fn() if self._status.has_failures(): break n = i + 1 self._queue.put(item) if self._seq: self._run(0) else: self._run_parallel_fn() if not self._status.has_failures(): n = i + 1 return n
Build the output, in chunks. :return: Number of items processed :rtype: int
def size_limit(self): limits = [ lim for lim in current_files_rest.file_size_limiters( self) if lim.limit is not None ] return min(limits) if limits else None
Get size limit for this bucket. The limit is based on the minimum output of the file size limiters.
def alien_filter(packages, sizes): cache, npkg, nsize = [], [], [] for p, s in zip(packages, sizes): name = split_package(p)[0] if name not in cache: cache.append(name) npkg.append(p) nsize.append(s) return npkg, nsize
This filter avoid list double packages from alien repository
def hacking_python3x_octal_literals(logical_line, tokens, noqa): r if noqa: return for token_type, text, _, _, _ in tokens: if token_type == tokenize.NUMBER: match = RE_OCTAL.match(text) if match: yield 0, ("H232: Python 3.x incompatible octal %s should be " "written as 0o%s " % (match.group(0)[1:], match.group(1)))
r"""Check for octal literals in Python 3.x compatible form. As of Python 3.x, the construct "0755" has been removed. Use "0o755" instead". Okay: f(0o755) Okay: 'f(0755)' Okay: f(755) Okay: f(0) Okay: f(000) Okay: MiB = 1.0415 H232: f(0755) Okay: f(0755) # noqa
def step_definition(step_name): if not orca.is_step(step_name): abort(404) filename, lineno, source = \ orca.get_step(step_name).func_source_data() html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify(filename=filename, lineno=lineno, text=source, html=html)
Get the source of a step function. Returned object has keys "filename", "lineno", "text" and "html". "text" is the raw text of the function, "html" has been marked up by Pygments.
def __parse_config(self): if self.should_parse_config and (self.args.config or self.config_file): self.config = ConfigParser.SafeConfigParser() self.config.read(self.args.config or self.config_file)
Invoke the config file parser.
def interruptRead(self, endpoint, size, timeout = 100): r return self.dev.read(endpoint, size, timeout)
r"""Performs a interrupt read request to the endpoint specified. Arguments: endpoint: endpoint number. size: number of bytes to read. timeout: operation timeout in milliseconds. (default: 100) Returns a tuple with the data read.
def lookup_forward(name): ip_addresses = {} addresses = list(set(str(ip[4][0]) for ip in socket.getaddrinfo( name, None))) if addresses is None: return ip_addresses for address in addresses: if type(ipaddress.ip_address(address)) is ipaddress.IPv4Address: ip_addresses['ipv4'] = address if type(ipaddress.ip_address(address)) is ipaddress.IPv6Address: ip_addresses['ipv6'] = address return ip_addresses
Perform a forward lookup of a hostname.
def CopyToIsoFormat(cls, timestamp, timezone=pytz.UTC, raise_error=False): datetime_object = cls.CopyToDatetime( timestamp, timezone, raise_error=raise_error) return datetime_object.isoformat()
Copies the timestamp to an ISO 8601 formatted string. Args: timestamp: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. timezone: Optional timezone (instance of pytz.timezone). raise_error: Boolean that if set to True will not absorb an OverflowError if the timestamp is out of bounds. By default there will be no error raised. Returns: A string containing an ISO 8601 formatted date and time.
def bind_sockets(address, port): ss = netutil.bind_sockets(port=port or 0, address=address) assert len(ss) ports = {s.getsockname()[1] for s in ss} assert len(ports) == 1, "Multiple ports assigned??" actual_port = ports.pop() if port: assert actual_port == port return ss, actual_port
Bind a socket to a port on an address. Args: address (str) : An address to bind a port on, e.g. ``"localhost"`` port (int) : A port number to bind. Pass 0 to have the OS automatically choose a free port. This function returns a 2-tuple with the new socket as the first element, and the port that was bound as the second. (Useful when passing 0 as a port number to bind any free port.) Returns: (socket, port)
def dependencies(self) -> List[Dependency]: dependencies_str = DB.get_hash_value(self.key, 'dependencies') dependencies = [] for dependency in ast.literal_eval(dependencies_str): dependencies.append(Dependency(dependency)) return dependencies
Return the PB dependencies.
def _read_current_marker(self): return self._geno_values[ np.frombuffer(self._bed.read(self._nb_bytes), dtype=np.uint8) ].flatten(order="C")[:self._nb_samples]
Reads the current marker and returns its genotypes.
def check(self, _): try: import yaml except: return True try: yaml.safe_load(self.recipes) except Exception as e: raise RADLParseException("Invalid YAML code: %s." % e, line=self.line) return True
Check this configure.
def add_config_source(self, config_source, position=None): rank = position if position is not None else len(self._config_sources) self._config_sources.insert(rank, config_source)
Add a config source to the current ConfigResolver instance. If position is not set, this source will be inserted with the lowest priority.
def _get_all_volumes_paths(conn): volumes = [vol for l in [obj.listAllVolumes() for obj in conn.listAllStoragePools()] for vol in l] return {vol.path(): [path.text for path in ElementTree.fromstring(vol.XMLDesc()).findall('.//backingStore/path')] for vol in volumes if _is_valid_volume(vol)}
Extract the path and backing stores path of all volumes. :param conn: libvirt connection to use
def _chunk(self, response, size=4096): method = response.headers.get("content-encoding") if method == "gzip": d = zlib.decompressobj(16+zlib.MAX_WBITS) b = response.read(size) while b: data = d.decompress(b) yield data b = response.read(size) del data else: while True: chunk = response.read(size) if not chunk: break yield chunk
downloads a web response in pieces
def write_to_disk(filename, delete=False, content=get_time()): if not os.path.exists(os.path.dirname(filename)): return if delete: if os.path.lexists(filename): os.remove(filename) else: with open(filename, 'wb') as f: f.write(content.encode('utf-8'))
Write filename out to disk
def create(self, ami, count, config=None): return self.Launcher(config=config).launch(ami, count)
Create an instance using the launcher.
def load_all(cls, vr, params=None): ob_docs = vr.query(cls.base, params) return [cls(vr, ob) for ob in ob_docs]
Create instances of all objects found
def logout(request, redirect_url=settings.LOGOUT_REDIRECT_URL): django_logout(request) return HttpResponseRedirect(request.build_absolute_uri(redirect_url))
Nothing hilariously hidden here, logs a user out. Strip this out if your application already has hooks to handle this.
def next(self): line = self.filehandle.readline() line = line.decode('utf-8', 'replace') if line == '': raise StopIteration line = line.rstrip('\n') le = LogEvent(line) if self._datetime_format and self._datetime_nextpos is not None: ret = le.set_datetime_hint(self._datetime_format, self._datetime_nextpos, self.year_rollover) if not ret: self._datetime_format = None self._datetime_nextpos = None elif le.datetime: self._datetime_format = le.datetime_format self._datetime_nextpos = le._datetime_nextpos return le
Get next line, adjust for year rollover and hint datetime format.
def ping(): r = __salt__['dracr.system_info'](host=DETAILS['host'], admin_username=DETAILS['admin_username'], admin_password=DETAILS['admin_password']) if r.get('retcode', 0) == 1: return False else: return True try: return r['dict'].get('ret', False) except Exception: return False
Is the chassis responding? :return: Returns False if the chassis didn't respond, True otherwise.
def get_chinese_text(): if not os.path.isdir("data/"): os.system("mkdir data/") if (not os.path.exists('data/pos.txt')) or \ (not os.path.exists('data/neg')): os.system("wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip " "-P data/") os.chdir("./data") os.system("unzip -u chinese_text.zip") os.chdir("..")
Download the chinese_text dataset and unzip it
def _on_move(self, event): w = self.winfo_width() h = self.winfo_height() x = min(max(event.x, 0), w) y = min(max(event.y, 0), h) self.coords('cross_h', 0, y, w, y) self.coords('cross_v', x, 0, x, h) self.event_generate("<<ColorChanged>>")
Make the cross follow the cursor.
def spoken_number(num: str) -> str: ret = [] for part in num.split(' '): if part in FRACTIONS: ret.append(FRACTIONS[part]) else: ret.append(' '.join([NUMBER_REPL[char] for char in part if char in NUMBER_REPL])) return ' and '.join(ret)
Returns the spoken version of a number Ex: 1.2 -> one point two 1 1/2 -> one and one half
def insert(self, index, child, by_name_index=-1): if self._can_add_child(child): try: if by_name_index == -1: self.indexes[child.name].append(child) else: self.indexes[child.name].insert(by_name_index, child) except KeyError: self.indexes[child.name] = [child] self.list.insert(index, child)
Add the child at the given index :type index: ``int`` :param index: child position :type child: :class:`Element <hl7apy.core.Element>` :param child: an instance of an :class:`Element <hl7apy.core.Element>` subclass
def add_property(self, prop): if _debug: Object._debug("add_property %r", prop) self._properties = _copy(self._properties) self._properties[prop.identifier] = prop self._values[prop.identifier] = prop.default
Add a property to an object. The property is an instance of a Property or one of its derived classes. Adding a property disconnects it from the collection of properties common to all of the objects of its class.
def consider_member(name_member, member, module, class_=None): if name_member.startswith('_'): return False if inspect.ismodule(member): return False real_module = getattr(member, '__module__', None) if not real_module: return True if real_module != module.__name__: if class_ and hasattr(member, '__get__'): return True if 'hydpy' in real_module: return False if module.__name__ not in real_module: return False return True
Return |True| if the given member should be added to the substitutions. If not return |False|. Some examples based on the site-package |numpy|: >>> from hydpy.core.autodoctools import Substituter >>> import numpy A constant like |nan| should be added: >>> Substituter.consider_member( ... 'nan', numpy.nan, numpy) True Members with a prefixed underscore should not be added: >>> Substituter.consider_member( ... '_NoValue', numpy._NoValue, numpy) False Members that are actually imported modules should not be added: >>> Substituter.consider_member( ... 'warnings', numpy.warnings, numpy) False Members that are actually defined in other modules should not be added: >>> numpy.Substituter = Substituter >>> Substituter.consider_member( ... 'Substituter', numpy.Substituter, numpy) False >>> del numpy.Substituter Members that are defined in submodules of a given package (either from the standard library or from site-packages) should be added... >>> Substituter.consider_member( ... 'clip', numpy.clip, numpy) True ...but not members defined in *HydPy* submodules: >>> import hydpy >>> Substituter.consider_member( ... 'Node', hydpy.Node, hydpy) False For descriptor instances (with method `__get__`) beeing members of classes should be added: >>> from hydpy.auxs import anntools >>> Substituter.consider_member( ... 'shape_neurons', anntools.ANN.shape_neurons, ... anntools, anntools.ANN) True
def get_cardinality(self, node=None): if node: return self.get_cpds(node).cardinality[0] else: cardinalities = defaultdict(int) for cpd in self.cpds: cardinalities[cpd.variable] = cpd.cardinality[0] return cardinalities
Returns the cardinality of the node. Throws an error if the CPD for the queried node hasn't been added to the network. Parameters ---------- node: Any hashable python object(optional). The node whose cardinality we want. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Returns ------- int or dict : If node is specified returns the cardinality of the node. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> cpd_diff = TabularCPD('diff',2,[[0.6,0.4]]); >>> cpd_intel = TabularCPD('intel',2,[[0.7,0.3]]); >>> cpd_grade = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7], ... [0.9, 0.1, 0.8, 0.3]], ... ['intel', 'diff'], [2, 2]) >>> student.add_cpds(cpd_diff,cpd_intel,cpd_grade) >>> student.get_cardinality() defaultdict(int, {'diff': 2, 'grade': 2, 'intel': 2}) >>> student.get_cardinality('intel') 2
def scene_remove(sequence_number, scene_id): return MessageWriter().string("scene.rm").uint64(sequence_number).uint32(scene_id).get()
Create a scene.rm message
def unique(self): from pandas import unique uniques = unique(self.astype(object)) return self._from_sequence(uniques, dtype=self.dtype)
Compute the ExtensionArray of unique values. Returns ------- uniques : ExtensionArray
def start(self): if self.started is True: raise WampyError("Router already started") crossbar_config_path = self.config_path cbdir = self.crossbar_directory cmd = [ 'crossbar', 'start', '--cbdir', cbdir, '--config', crossbar_config_path, ] self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid) self._wait_until_ready() logger.info( "Crosbar.io is ready for connections on %s (IPV%s)", self.url, self.ipv ) self.started = True
Start Crossbar.io in a subprocess.
def filter(self, filter_func, reverse=False): new_log_file = Log() new_log_file.logfile = self.logfile new_log_file.total_lines = 0 new_log_file._valid_lines = [] new_log_file._invalid_lines = self._invalid_lines[:] if not reverse: for i in self._valid_lines: if filter_func(i): new_log_file.total_lines += 1 new_log_file._valid_lines.append(i) else: for i in self._valid_lines: if not filter_func(i): new_log_file.total_lines += 1 new_log_file._valid_lines.append(i) return new_log_file
Filter current log lines by a given filter function. This allows to drill down data out of the log file by filtering the relevant log lines to analyze. For example, filter by a given IP so only log lines for that IP are further processed with commands (top paths, http status counter...). :param filter_func: [required] Filter method, see filters.py for all available filters. :type filter_func: function :param reverse: negate the filter (so accept all log lines that return ``False``). :type reverse: boolean :returns: a new instance of Log containing only log lines that passed the filter function. :rtype: :class:`Log`
def get_boundaries(self, filter_type, value): assert filter_type in self.handled_suffixes start = '-inf' end = '+inf' exclude = None if filter_type in (None, 'eq'): start = end = value elif filter_type == 'gt': start = '(%s' % value elif filter_type == 'gte': start = value elif filter_type == 'lt': end = '(%s' % value elif filter_type == 'lte': end = value return start, end, exclude
Compute the boundaries to pass to the sorted-set command depending of the filter type The third return value, ``exclude`` is always ``None`` because we can easily restrict the score to filter on in the sorted-set. For the parameters, see BaseRangeIndex.store Notes ----- For zrangebyscore: - `(` means "not included" - `-inf` alone means "from the very beginning" - `+inf` alone means "to the very end"
def HMAC(self, message, use_sha256=False): h = self._NewHMAC(use_sha256=use_sha256) h.update(message) return h.finalize()
Calculates the HMAC for a given message.
def convert_type(df, columns): out_df = df.copy() for col in columns: column_values = pd.Series(out_df[col].unique()) column_values = column_values[~column_values.isnull()] if len(column_values) == 0: continue if set(column_values.values) < {'True', 'False'}: out_df[col] = out_df[col].map({'True': True, 'False': False}) continue if pd.to_numeric(column_values, errors='coerce').isnull().sum() == 0: out_df[col] = pd.to_numeric(out_df[col], errors='ignore') continue if pd.to_datetime(column_values, errors='coerce').isnull().sum() == 0: out_df[col] = pd.to_datetime(out_df[col], errors='ignore', infer_datetime_format=True) continue return out_df
Helper function that attempts to convert columns into their appropriate data type.
def expose(rule, **options): def decorator(f): if not hasattr(f, "urls"): f.urls = [] if isinstance(rule, (list, tuple)): f.urls.extend(rule) else: f.urls.append((rule, options)) return f return decorator
Decorator to add an url rule to a function
def start(self): super(Syndic, self).start() if check_user(self.config['user']): self.action_log_info('Starting up') self.verify_hash_type() try: self.syndic.tune_in() except KeyboardInterrupt: self.action_log_info('Stopping') self.shutdown()
Start the actual syndic. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.
def show_plain_text(self, text): self.switch_to_plugin() self.switch_to_plain_text() self.set_plain_text(text, is_code=False)
Show text in plain mode
def sysinfo2float(version_info=sys.version_info): vers_str = '.'.join([str(v) for v in version_info[0:3]]) if version_info[3] != 'final': vers_str += '.' + ''.join([str(i) for i in version_info[3:]]) if IS_PYPY: vers_str += 'pypy' else: try: import platform platform = platform.python_implementation() if platform in ('Jython', 'Pyston'): vers_str += platform pass except ImportError: pass except AttributeError: pass return py_str2float(vers_str)
Convert a sys.versions_info-compatible list into a 'canonic' floating-point number which that can then be used to look up a magic number. Note that this can only be used for released version of C Python, not interim development versions, since we can't represent that as a floating-point number. For handling Pypy, pyston, jython, etc. and interim versions of C Python, use sysinfo2magic.
def to_json(self): capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Persistence": persistence, "Dying": dying, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None] * len(self.Y)) for label, items in self.base_partitions.items(): base[items] = label capsule["Partitions"] = base.tolist() return json.dumps(capsule, separators=(",", ":"))
Writes the complete Morse complex merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all maxima.
def _get_api_urls(self, api_urls=None): view_name = self.__class__.__name__ api_urls = api_urls or {} api_urls["read"] = url_for(view_name + ".api_read") api_urls["delete"] = url_for(view_name + ".api_delete", pk="") api_urls["create"] = url_for(view_name + ".api_create") api_urls["update"] = url_for(view_name + ".api_update", pk="") return api_urls
Completes a dict with the CRUD urls of the API. :param api_urls: A dict with the urls {'<FUNCTION>':'<URL>',...} :return: A dict with the CRUD urls of the base API.
def serve_forever(django=False): logger = getLogger("irc.dispatch") logger.setLevel(settings.LOG_LEVEL) logger.addHandler(StreamHandler()) app = IRCApplication(django) server = SocketIOServer((settings.HTTP_HOST, settings.HTTP_PORT), app) print "%s [Bot: %s] listening on %s:%s" % ( settings.GNOTTY_VERSION_STRING, app.bot.__class__.__name__, settings.HTTP_HOST, settings.HTTP_PORT, ) server.serve_forever()
Starts the gevent-socketio server.
def parse_transmission_id(header, reference_id=None): m = _TID_PATTERN.search(header) if not m: return None return m.group(1)
\ Returns the transmission ID of the cable. If no transmission identifier was found, ``None`` is returned. `header` The cable's header `reference_id` The cable's reference ID.
def get(self, column_name, default=None): if column_name in self.table.default_columns: index = self.table.default_columns.index(column_name) return Datum(self.cells[index], self.row_num, column_name, self.table) return default
Return the Datum for column_name, or default.
def _filenames_from_arg(filename): if isinstance(filename, string_types): filenames = [filename] elif isinstance(filename, (list, tuple)): filenames = filename else: raise Exception('filename argument must be string, list or tuple') for fn in filenames: if not os.path.exists(fn): raise ValueError('file not found: %s' % fn) if not os.path.isfile(fn): raise ValueError('not a file: %s' % fn) return filenames
Utility function to deal with polymorphic filenames argument.
def get_account_info(self): request = self._get_request() response = request.get(self.ACCOUNT_INFO_URL) self.account.json_data = response["account"] return self.account
Get current account information The information then will be saved in `self.account` so that you can access the information like this: >>> hsclient = HSClient() >>> acct = hsclient.get_account_info() >>> print acct.email_address Returns: An Account object
def _highlight_path(self, hl_path, tf): fc = self.settings.get('row_font_color', 'green') try: self.treeview.highlight_path(hl_path, tf, font_color=fc) except Exception as e: self.logger.info('Error changing highlight on treeview path ' '({0}): {1}'.format(hl_path, str(e)))
Highlight or unhighlight a single entry. Examples -------- >>> hl_path = self._get_hl_key(chname, image) >>> self._highlight_path(hl_path, True)
def factorize_and_solve(self, A, b): self.factorize(A) return self.solve(b)
Factorizes A and solves Ax=b. Returns ------- x : vector
def do_search(self, arg=None): new_cats = [] for cat in self.cats: new_cat = cat.search(self.inputs.text, depth=self.inputs.depth) if len(list(new_cat)) > 0: new_cats.append(new_cat) if len(new_cats) > 0: self.done_callback(new_cats) self.visible = False
Do search and close panel
def lrun(command, *args, **kwargs): return run('cd {0} && {1}'.format(ROOT, command), *args, **kwargs)
Run a local command from project root
def _compute_matrix(cls, sentences, weighting='frequency', norm=None): if norm not in ('l1', 'l2', None): raise ValueError('Parameter "norm" can only take values "l1", "l2" or None') if weighting.lower() == 'binary': vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=True, stop_words=None) elif weighting.lower() == 'frequency': vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=False, stop_words=None) elif weighting.lower() == 'tfidf': vectorizer = TfidfVectorizer(min_df=1, ngram_range=(1, 1), stop_words=None) else: raise ValueError('Parameter "method" must take one of the values "binary", "frequency" or "tfidf".') frequency_matrix = vectorizer.fit_transform(sentences).astype(float) if norm in ('l1', 'l2'): frequency_matrix = normalize(frequency_matrix, norm=norm, axis=1) elif norm is not None: raise ValueError('Parameter "norm" can only take values "l1", "l2" or None') return frequency_matrix
Compute the matrix of term frequencies given a list of sentences
def match_end_date(self, start, end, match): if match: if end < start: raise errors.InvalidArgument('end date must be >= start date when match = True') self._query_terms['endDate'] = { '$gte': start, '$lte': end } else: raise errors.InvalidArgument('match = False not currently supported')
Matches temporals whose effective end date falls in between the given dates inclusive. arg: start (osid.calendaring.DateTime): start of date range arg: end (osid.calendaring.DateTime): end of date range arg: match (boolean): ``true`` if a positive match, ``false`` for negative match raise: InvalidArgument - ``start`` is less than ``end`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def attach(session_type, address=None, port=None, path=None, argv=None, decode=None): session = (tcp_session(address, port) if session_type == 'tcp' else socket_session(path) if session_type == 'socket' else stdio_session() if session_type == 'stdio' else child_session(argv) if session_type == 'child' else None) if not session: raise Exception('Unknown session type "%s"' % session_type) if decode is None: decode = IS_PYTHON3 return Nvim.from_session(session).with_decode(decode)
Provide a nicer interface to create python api sessions. Previous machinery to create python api sessions is still there. This only creates a facade function to make things easier for the most usual cases. Thus, instead of: from pynvim import socket_session, Nvim session = tcp_session(address=<address>, port=<port>) nvim = Nvim.from_session(session) You can now do: from pynvim import attach nvim = attach('tcp', address=<address>, port=<port>) And also: nvim = attach('socket', path=<path>) nvim = attach('child', argv=<argv>) nvim = attach('stdio') When the session is not needed anymore, it is recommended to explicitly close it: nvim.close() It is also possible to use the session as a context mangager: with attach('socket', path=thepath) as nvim: print(nvim.funcs.getpid()) print(nvim.current.line) This will automatically close the session when you're done with it, or when an error occured.
def _open_tracing_interface(self, conn_id, callback): try: handle = self._find_handle(conn_id) services = self._connections[handle]['services'] except (ValueError, KeyError): callback(conn_id, self.id, False, 'Connection closed unexpectedly before we could open the streaming interface') return self._command_task.async_command(['_enable_tracing', handle, services], self._on_interface_finished, {'connection_id': conn_id, 'callback': callback})
Enable the debug tracing interface for this IOTile device Args: conn_id (int): the unique identifier for the connection callback (callback): Callback to be called when this command finishes callback(conn_id, adapter_id, success, failure_reason)
def dehydrate_point(value): dim = len(value) if dim == 2: return Structure(b"X", value.srid, *value) elif dim == 3: return Structure(b"Y", value.srid, *value) else: raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
Dehydrator for Point data. :param value: :type value: Point :return:
def construct_optional_traversal_tree(complex_optional_roots, location_to_optional_roots): tree = OptionalTraversalTree(complex_optional_roots) for optional_root_locations_stack in six.itervalues(location_to_optional_roots): tree.insert(list(optional_root_locations_stack)) return tree
Return a tree of complex optional root locations. Args: complex_optional_roots: list of @optional locations (location immmediately preceding an @optional Traverse) that expand vertex fields location_to_optional_roots: dict mapping from location -> optional_roots where location is within some number of @optionals and optional_roots is a list of optional root locations preceding the successive @optional scopes within which the location resides Returns: OptionalTraversalTree object representing the tree of complex optional roots
def run_std_server(self): config = tf.estimator.RunConfig() server = tf.train.Server( config.cluster_spec, job_name=config.task_type, task_index=config.task_id, protocol=config.protocol) server.join()
Starts a TensorFlow server and joins the serving thread. Typically used for parameter servers. Raises: ValueError: if not enough information is available in the estimator's config to create a server.
def _checkremove_que(self, word): in_que_pass_list = False que_pass_list = ['atque', 'quoque', 'neque', 'itaque', 'absque', 'apsque', 'abusque', 'adaeque', 'adusque', 'denique', 'deque', 'susque', 'oblique', 'peraeque', 'plenisque', 'quandoque', 'quisque', 'quaeque', 'cuiusque', 'cuique', 'quemque', 'quamque', 'quaque', 'quique', 'quorumque', 'quarumque', 'quibusque', 'quosque', 'quasque', 'quotusquisque', 'quousque', 'ubique', 'undique', 'usque', 'uterque', 'utique', 'utroque', 'utribique', 'torque', 'coque', 'concoque', 'contorque', 'detorque', 'decoque', 'excoque', 'extorque', 'obtorque', 'optorque', 'retorque', 'recoque', 'attorque', 'incoque', 'intorque', 'praetorque'] if word not in que_pass_list: word = re.sub(r'que$', '', word) else: in_que_pass_list = True return word, in_que_pass_list
If word ends in -que and if word is not in pass list, strip -que
def update_firewall_rule(self, firewall_rule, body=None): return self.put(self.firewall_rule_path % (firewall_rule), body=body)
Updates a firewall rule.
def _PrepareAttributeContainer(self, attribute_container): attribute_values_hash = hash(attribute_container.GetAttributeValuesString()) identifier = identifiers.FakeIdentifier(attribute_values_hash) attribute_container.SetIdentifier(identifier) return copy.deepcopy(attribute_container)
Prepares an attribute container for storage. Args: attribute_container (AttributeContainer): attribute container. Returns: AttributeContainer: copy of the attribute container to store in the fake storage.
def _eq_dicts(d1, d2): if not d1.keys() == d2.keys(): return False for k, v1 in d1.items(): v2 = d2[k] if not type(v1) == type(v2): return False if isinstance(v1, np.ndarray): if not np.array_equal(v1, v2): return False else: if not v1 == v2: return False return True
Returns True if d1 == d2, False otherwise
def virtual_interface_list(provider, names, **kwargs): client = _get_client() return client.extra_action(provider=provider, names=names, action='virtual_interface_list', **kwargs)
List virtual interfaces on a server CLI Example: .. code-block:: bash salt minionname cloud.virtual_interface_list my-nova names=['salt-master']
def _param_deprecation_warning(schema, deprecated, context): for i in deprecated: if i in schema: msg = 'When matching {ctx}, parameter {word} is deprecated, use __{word}__ instead' msg = msg.format(ctx = context, word = i) warnings.warn(msg, Warning)
Raises warning about using the 'old' names for some parameters. The new naming scheme just has two underscores on each end of the word for consistency
def get_root_resource(config): app_package_name = get_app_package_name(config) return config.registry._root_resources.setdefault( app_package_name, Resource(config))
Returns the root resource.
def split_box( fraction, x,y, w,h ): if w >= h: new_w = int(w*fraction) if new_w: return (x,y,new_w,h),(x+new_w,y,w-new_w,h) else: return None,None else: new_h = int(h*fraction) if new_h: return (x,y,w,new_h),(x,y+new_h,w,h-new_h) else: return None,None
Return set of two boxes where first is the fraction given
def node_to_object(self, node, object): "Map a single node to one object's attributes" attribute = self.to_lower(node.tag) attribute = "_yield" if attribute == "yield" else attribute try: valueString = node.text or "" value = float(valueString) except ValueError: value = node.text try: setattr(object, attribute, value) except AttributeError(): sys.stderr.write("Attribute <%s> not supported." % attribute)
Map a single node to one object's attributes
def shrink (self): trim = int(0.05*len(self)) if trim: items = super(LFUCache, self).items() keyfunc = lambda x: x[1][0] values = sorted(items, key=keyfunc) for item in values[0:trim]: del self[item[0]]
Shrink ca. 5% of entries.
def update_beta(self, beta): r if self._safeguard: beta *= self.xi_restart beta = max(beta, self.min_beta) return beta
r"""Update beta This method updates beta only in the case of safeguarding (should only be done in the greedy restarting strategy). Parameters ---------- beta: float The beta parameter Returns ------- float: the new value for the beta parameter
def browse_home_listpage_url(self, state=None, county=None, zipcode=None, street=None, **kwargs): url = self.domain_browse_homes for item in [state, county, zipcode, street]: if item: url = url + "/%s" % item url = url + "/" return url
Construct an url of home list page by state, county, zipcode, street. Example: - https://www.zillow.com/browse/homes/ca/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/
def makeStylesheetResource(self, path, registry): return StylesheetRewritingResourceWrapper( File(path), self.installedOfferingNames, self.rootURL)
Return a resource for the css at the given path with its urls rewritten based on self.rootURL.
def _find_special(self): charnames = self._get_char_names() for eventdir in glob.glob('/sys/class/input/event*'): char_name = os.path.split(eventdir)[1] if char_name in charnames: continue name_file = os.path.join(eventdir, 'device', 'name') with open(name_file) as name_file: device_name = name_file.read().strip() if device_name in self.codes['specials']: self._parse_device_path( self.codes['specials'][device_name], os.path.join('/dev/input', char_name))
Look for special devices.
def toList(value, itemcast=None, delim=','): value = value or '' itemcast = itemcast or str return [itemcast(item) for item in value.split(delim) if item != '']
Returns a list of strings from the specified value. Parameters: value (str): comma delimited string to convert to list. itemcast (func): Function to cast each list item to (default str). delim (str): string delimiter (optional; default ',').
def exception_format(): return "".join(traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] ))
Convert exception info into a string suitable for display.
def log_exception(func, handler, args, kwargs): value = args[1] if len(args) == 3 else None if value is None: return func(*args, **kwargs) tracing = handler.settings.get('opentracing_tracing') if not isinstance(value, HTTPError) or 500 <= value.status_code <= 599: tracing._finish_tracing(handler, error=value) return func(*args, **kwargs)
Wrap the handler ``log_exception`` method to finish the Span for the given request, if available. This method is called when an Exception is not handled in the user code.
def config_status(): s = boto3.Session() client = s.client('config') channels = client.describe_delivery_channel_status()[ 'DeliveryChannelsStatus'] for c in channels: print(yaml.safe_dump({ c['name']: dict( snapshot=str( c['configSnapshotDeliveryInfo'].get('lastSuccessfulTime')), history=str( c['configHistoryDeliveryInfo'].get('lastSuccessfulTime')), stream=str( c['configStreamDeliveryInfo'].get('lastStatusChangeTime')) ), }, default_flow_style=False))
Check config status in an account.
def cublasZsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc): status = _libcublas.cublasZsyrk_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(C), ldc) cublasCheckStatus(status)
Rank-k operation on complex symmetric matrix.
def apply_new_scoped_variable_default_value(self, path, new_default_value_str): data_port_id = self.get_list_store_row_from_cursor_selection()[self.ID_STORAGE_ID] try: if str(self.model.state.scoped_variables[data_port_id].default_value) != new_default_value_str: self.model.state.scoped_variables[data_port_id].default_value = new_default_value_str except (TypeError, AttributeError) as e: logger.error("Error while changing default value: {0}".format(e))
Applies the new default value of the scoped variable defined by path :param str path: The path identifying the edited variable :param str new_default_value_str: New default value as string
def _get_kwarg(self, name, kwargs): at_name = '@{}'.format(name) if name in kwargs: if at_name in kwargs: raise ValueError('Both {!r} and {!r} specified in kwargs'.format(name, at_name)) return kwargs[name] if at_name in kwargs: return kwargs[at_name] return not_set
Helper to get value of a named attribute irrespective of whether it is passed with or without "@" prefix.
def finger_master(hash_type=None): keyname = 'master.pub' if hash_type is None: hash_type = __opts__['hash_type'] fingerprint = salt.utils.crypt.pem_finger( os.path.join(__opts__['pki_dir'], keyname), sum_type=hash_type) return {'local': {keyname: fingerprint}}
Return the fingerprint of the master's public key hash_type The hash algorithm used to calculate the fingerprint .. code-block:: python >>> wheel.cmd('key.finger_master') {'local': {'master.pub': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}}
def main(): outf="" N=100 if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-F' in sys.argv: ind=sys.argv.index('-F') outf=sys.argv[ind+1] if outf!="": out=open(outf,'w') if '-n' in sys.argv: ind=sys.argv.index('-n') N=int(sys.argv[ind+1]) dirs=pmag.get_unf(N) if outf=='': for dir in dirs: print('%7.1f %7.1f'%(dir[0],dir[1])) else: numpy.savetxt(outf,dirs,fmt='%7.1f %7.1f')
NAME uniform.py DESCRIPTION draws N directions from uniform distribution on a sphere SYNTAX uniform.py [-h][command line options] -h prints help message and quits -n N, specify N on the command line (default is 100) -F file, specify output file name, default is standard output
def structured_iterator(failure_lines): summary = partial(failure_line_summary, TbplFormatter()) for failure_line in failure_lines: repr_str = summary(failure_line) if repr_str: yield failure_line, repr_str while True: yield None, None
Create FailureLine, Tbpl-formatted-string tuples.
def build_self_uri_list(self_uri_list): "parse the self-uri tags, build Uri objects" uri_list = [] for self_uri in self_uri_list: uri = ea.Uri() utils.set_attr_if_value(uri, 'xlink_href', self_uri.get('xlink_href')) utils.set_attr_if_value(uri, 'content_type', self_uri.get('content-type')) uri_list.append(uri) return uri_list
parse the self-uri tags, build Uri objects
def check_option(self, key, subkey, value): key, subkey = _lower_keys(key, subkey) _entry_must_exist(self.gc, key, subkey) df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)] ev.value_eval(value, df["type"].values[0]) if df["values"].values[0] is not None: return value in df["values"].values[0] return True
Evaluate if a given value fits the option. If an option has a limited set of available values, check if the provided value is amongst them. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. :param value: Value to test (type varies). :return: :class:`bool` - does ``value`` belong to the options? :raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option. :ValueError: If the provided value is not the expected type for the option.
def cut_matrix(self, n): cm = np.zeros((n, n)) for part in self.partition: from_, to = self.direction.order(part.mechanism, part.purview) external = tuple(set(self.indices) - set(to)) cm[np.ix_(from_, external)] = 1 return cm
The matrix of connections that are severed by this cut.
def unmatched_brackets_in_line(self, text, closing_brackets_type=None): if closing_brackets_type is None: opening_brackets = self.BRACKETS_LEFT.values() closing_brackets = self.BRACKETS_RIGHT.values() else: closing_brackets = [closing_brackets_type] opening_brackets = [{')': '(', '}': '{', ']': '['}[closing_brackets_type]] block = self.editor.textCursor().block() line_pos = block.position() for pos, char in enumerate(text): if char in opening_brackets: match = self.editor.find_brace_match(line_pos+pos, char, forward=True) if (match is None) or (match > line_pos+len(text)): return True if char in closing_brackets: match = self.editor.find_brace_match(line_pos+pos, char, forward=False) if (match is None) or (match < line_pos): return True return False
Checks if there is an unmatched brackets in the 'text'. The brackets type can be general or specified by closing_brackets_type (')', ']' or '}')
def registercls(self, data_types, schemacls=None): if schemacls is None: return lambda schemacls: self.registercls( data_types=data_types, schemacls=schemacls ) for data_type in data_types: self._schbytype[data_type] = schemacls return schemacls
Register schema class with associated data_types. Can be used such as a decorator. :param list data_types: data types to associate with schema class. :param type schemacls: schema class to register. :return: schemacls. :rtype: type
def table_describe(table_name): desc = orca.get_table(table_name).to_frame().describe() return ( desc.to_json(orient='split', date_format='iso'), 200, {'Content-Type': 'application/json'})
Return summary statistics of a table as JSON. Includes all columns. Uses Pandas' "split" JSON format.
def _update_trial_queue(self, blocking=False, timeout=600): trials = self._search_alg.next_trials() if blocking and not trials: start = time.time() while (not trials and not self.is_finished() and time.time() - start < timeout): logger.info("Blocking for next trial...") trials = self._search_alg.next_trials() time.sleep(1) for trial in trials: self.add_trial(trial)
Adds next trials to queue if possible. Note that the timeout is currently unexposed to the user. Args: blocking (bool): Blocks until either a trial is available or is_finished (timeout or search algorithm finishes). timeout (int): Seconds before blocking times out.
def _zip_with_scalars(args): zipped = [] for arg in args: if isinstance(arg, prettytensor.PrettyTensor): zipped.append(arg if arg.is_sequence() else itertools.repeat(arg)) elif (isinstance(arg, collections.Sequence) and not isinstance(arg, tf.compat.bytes_or_text_types)): zipped.append(arg) else: zipped.append(itertools.repeat(arg)) assert len(args) == len(zipped) return zip(*zipped)
Zips across args in order and replaces non-iterables with repeats.
def camelcase_to_underscores(argument): result = '' prev_char_title = True if not argument: return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() except IndexError: next_char_title = True upper_to_lower = char.istitle() and not next_char_title lower_to_upper = char.istitle() and not prev_char_title if index and (upper_to_lower or lower_to_upper): result += "_" prev_char_title = char.istitle() if not char.isspace(): result += char.lower() return result
Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute
def to_str(instance, encoding='utf-8'): if isinstance(instance, str): return instance elif hasattr(instance, 'decode'): return instance.decode(encoding) elif isinstance(instance, list): return list([to_str(item, encoding) for item in instance]) elif isinstance(instance, tuple): return tuple([to_str(item, encoding) for item in instance]) elif isinstance(instance, dict): return dict( [(to_str(key, encoding), to_str(value, encoding)) for key, value in instance.items()]) else: return instance
Convert an instance recursively to string.
def _domain_event_block_job_cb(conn, domain, disk, job_type, status, opaque): _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'disk': disk, 'type': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_TYPE_', job_type), 'status': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_', status) })
Domain block job events handler
def get(self, country_code): return AvailablePhoneNumberCountryContext( self._version, account_sid=self._solution['account_sid'], country_code=country_code, )
Constructs a AvailablePhoneNumberCountryContext :param country_code: The ISO country code of the country to fetch available phone number information about :returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext