code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def timeit(hosts=None, stmt=None, warmup=30, repeat=None, duration=None, concurrency=1, output_fmt=None, fail_if=None, sample_mode='reservoir'): num_lines = 0 log = Logger(output_fmt) with Runner(hosts, concurrency, sample_mode) as runner: version_info = aio.run(runner.client.get_server_version) for line in as_statements(lines_from_stdin(stmt)): runner.warmup(line, warmup) timed_stats = runner.run(line, iterations=repeat, duration=duration) r = Result( version_info=version_info, statement=line, timed_stats=timed_stats, concurrency=concurrency ) log.result(r) if fail_if: eval_fail_if(fail_if, r) num_lines += 1 if num_lines == 0: raise SystemExit( 'No SQL statements provided. Use --stmt or provide statements via stdin')
Run the given statement a number of times and return the runtime stats Args: fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34"
def load_config(filename, config_dir=None, copy_default_config=True): if not config_dir: config_file = os.path.join(get_default_config_path(), filename) else: config_file = os.path.join(config_dir, filename) if not os.path.isfile(config_file): if copy_default_config: logger.info('Config file {} not found, I will create a ' 'default version'.format(config_file)) make_directory(config_dir) shutil.copy(os.path.join(package_path, 'config', filename. replace('.cfg', '_default.cfg')), config_file) else: message = 'Config file {} not found.'.format(config_file) logger.error(message) raise FileNotFoundError(message) if len(cfg.read(config_file)) == 0: message = 'Config file {} not found or empty.'.format(config_file) logger.error(message) raise FileNotFoundError(message) global _loaded _loaded = True
Loads the specified config file. Parameters ----------- filename : :obj:`str` Config file name, e.g. 'config_grid.cfg'. config_dir : :obj:`str`, optional Path to config file. If None uses default edisgo config directory specified in config file 'config_system.cfg' in section 'user_dirs' by subsections 'root_dir' and 'config_dir'. Default: None. copy_default_config : Boolean If True copies a default config file into `config_dir` if the specified config file does not exist. Default: True.
def extract_cookies(self, response, request): _debug("extract_cookies: %s", response.info()) self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) for cookie in self.make_cookies(response, request): if self._policy.set_ok(cookie, request): _debug(" setting cookie: %s", cookie) self.set_cookie(cookie) finally: self._cookies_lock.release()
Extract cookies from response, where allowable given the request.
def nodes(self): if self._nodes is None: self._nodes = layout_nodes(self, only_nodes=True) return self._nodes
Computes the node positions the first time they are requested if no explicit node information was supplied.
def data(self): data = super(DynamicListSerializer, self).data processed_data = ReturnDict( SideloadingProcessor(self, data).data, serializer=self ) if self.child.envelope else ReturnList( data, serializer=self ) processed_data = post_process(processed_data) return processed_data
Get the data, after performing post-processing if necessary.
def send_message(self, message): with self._instance_lock: if message is None: Global.LOGGER.error("can't deliver a null messages") return if message.sender is None: Global.LOGGER.error(f"can't deliver anonymous messages with body {message.body}") return if message.receiver is None: Global.LOGGER.error( f"can't deliver message from {message.sender}: recipient not specified") return if message.message is None: Global.LOGGER.error(f"can't deliver message with no body from {message.sender}") return sender = "*" + message.sender + "*" self.socket.send_multipart( [bytes(sender, 'utf-8'), pickle.dumps(message)]) if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("dispatched : " + message.sender + "-" + message.message + "-" + message.receiver) self.dispatched = self.dispatched + 1
Dispatch a message using 0mq
def add_line(self, string): self.code_strings.append(string) code = '' if len(self.code_strings) == 1: code = '(setv result ' + self.code_strings[0] + ')' if len(self.code_strings) > 1: code = '(setv result (and ' + ' '.join(self.code_strings) + '))' self._compiled_ast_and_expr = self.__compile_code(code_string=code)
Adds a line to the LISP code to execute :param string: The line to add :return: None
def _handle_progress(self, total, progress_callback): current = 0 while True: current += yield try: progress_callback(current, total) except Exception: _LOG.exception('Progress callback raised an exception. %s', progress_callback) continue
Calls the callback with the current progress and total .
def resource_record_set(self, name, record_type, ttl, rrdatas): return ResourceRecordSet(name, record_type, ttl, rrdatas, zone=self)
Construct a resource record set bound to this zone. :type name: str :param name: Name of the record set. :type record_type: str :param record_type: RR type :type ttl: int :param ttl: TTL for the RR, in seconds :type rrdatas: list of string :param rrdatas: resource data for the RR :rtype: :class:`google.cloud.dns.resource_record_set.ResourceRecordSet` :returns: a new ``ResourceRecordSet`` instance
def random_deinterleave(text, separator_symbol="X"): words = text.strip().split(" ") n = len(words) if n <= 1: return text, "" cut = [False] * n cut[0] = True num_cuts = int(math.exp(random.uniform(0, math.log(n)))) for _ in range(num_cuts): cut[random.randint(1, n -1)] = True out = [[], []] part = random.randint(0, 1) for i in range(n): if cut[i]: out[part].append(separator_symbol) part = 1 - part out[part].append(words[i]) return " ".join(out[0]), " ".join(out[1])
Create a fill-in-the-blanks training example from text. Split on spaces, then cut into segments at random points. Alternate segments are assigned to the two output strings. separator_symbol separates segments within each of the outputs. example: text="The quick brown fox jumps over the lazy dog." returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.") The two outputs can also be reversed to yield an instance of the same problem. Args: text: a string separator_symbol: a string Returns: a pair of strings
def pubmed_citation(args=sys.argv[1:], out=sys.stdout): parser = argparse.ArgumentParser( description='Get a citation using a PubMed ID or PubMed URL') parser.add_argument('query', help='PubMed ID or PubMed URL') parser.add_argument( '-m', '--mini', action='store_true', help='get mini citation') parser.add_argument( '-e', '--email', action='store', help='set user email', default='') args = parser.parse_args(args=args) lookup = PubMedLookup(args.query, args.email) publication = Publication(lookup, resolve_doi=False) if args.mini: out.write(publication.cite_mini() + '\n') else: out.write(publication.cite() + '\n')
Get a citation via the command line using a PubMed ID or PubMed URL
def expanded_transform(self): segments = self._expand_transform(self.transform) if segments: segments[0]['datatype'] = self.valuetype_class for s in segments: s['column'] = self else: segments = [self.make_xform_seg(datatype=self.valuetype_class, column=self)] return segments
Expands the transform string into segments
def ReadFromFile(self, artifacts_reader, filename): for artifact_definition in artifacts_reader.ReadFile(filename): self.RegisterDefinition(artifact_definition)
Reads artifact definitions into the registry from a file. Args: artifacts_reader (ArtifactsReader): an artifacts reader. filename (str): name of the file to read from.
def encrypt_put_item(encrypt_method, crypto_config_method, write_method, **kwargs): crypto_config, ddb_kwargs = crypto_config_method(**kwargs) ddb_kwargs["Item"] = encrypt_method( item=ddb_kwargs["Item"], crypto_config=crypto_config.with_item(_item_transformer(encrypt_method)(ddb_kwargs["Item"])), ) return write_method(**ddb_kwargs)
Transparently encrypt an item before putting it to the table. :param callable encrypt_method: Method to use to encrypt items :param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig` :param callable write_method: Method that writes to the table :param **kwargs: Keyword arguments to pass to ``write_method`` :return: DynamoDB response :rtype: dict
def close(self, response): LOGGER.info('Closing [%s]', os.getpid()) if not self.database.is_closed(): self.database.close() return response
Close connection to database.
def _get_deployment_instance_diagnostics(awsclient, deployment_id, instance_id): client_codedeploy = awsclient.get_client('codedeploy') request = { 'deploymentId': deployment_id, 'instanceId': instance_id } response = client_codedeploy.get_deployment_instance(**request) for i, event in enumerate(response['instanceSummary']['lifecycleEvents']): if event['status'] == 'Failed': return event['diagnostics']['errorCode'], \ event['diagnostics']['scriptName'], \ event['diagnostics']['message'], \ event['diagnostics']['logTail'] return None
Gets you the diagnostics details for the first 'Failed' event. :param awsclient: :param deployment_id: :param instance_id: return: None or (error_code, script_name, message, log_tail)
def onStopping(self): if self.config.STACK_COMPANION == 1: add_stop_time(self.ledger_dir, self.utc_epoch()) self.logstats() self.reset() for ledger in self.ledgers: try: ledger.stop() except Exception as ex: logger.exception('{} got exception while stopping ledger: {}'.format(self, ex)) self.nodestack.stop() self.clientstack.stop() self.closeAllKVStores() self._info_tool.stop() self.mode = None self.execute_hook(NodeHooks.POST_NODE_STOPPED)
Actions to be performed on stopping the node. - Close the UDP socket of the nodestack
def get(file_name, key): db = XonoticDB.load_path(file_name) value = db.get(key) if value is None: sys.exit(1) else: click.echo(value)
Print a value for the specified key. If key is not found xon_db exists with code 1.
def get_user(request): if not hasattr(request, '_cached_user'): request._cached_user = auth_get_user(request) return request._cached_user
Returns a cached copy of the user if it exists or calls `auth_get_user` otherwise.
def produce(self, *args, **kwargs): new_plugins = [] for p in self._plugins: r = p(*args, **kwargs) new_plugins.append(r) return PluginManager(new_plugins)
Produce a new set of plugins, treating the current set as plugin factories.
def add(addon, dev, interactive): application = get_current_application() application.add( addon, dev=dev, interactive=interactive )
Add a dependency. Examples: $ django add dynamic-rest==1.5.0 + dynamic-rest == 1.5.0
def call_api(self, table, column, value, **kwargs): try: output_format = kwargs.pop('output_format') except KeyError: output_format = self.output_format url_list = [self.base_url, table, column, quote(value), 'rows'] rows_count = self._number_of_rows(**kwargs) url_list.append(rows_count) url_string = '/'.join(url_list) xml_data = urlopen(url_string).read() data = self._format_data(output_format, xml_data) return data
Exposed method to connect and query the EPA's API.
def get_embed_url(self, targeting=None, recirc=None): url = getattr(settings, "VIDEOHUB_EMBED_URL", self.DEFAULT_VIDEOHUB_EMBED_URL) url = url.format(self.id) if targeting is not None: for k, v in sorted(targeting.items()): url += '&{0}={1}'.format(k, v) if recirc is not None: url += '&recirc={0}'.format(recirc) return url
gets a canonical path to an embedded iframe of the video from the hub :return: the path to create an embedded iframe of the video :rtype: str
def visit_WhileStatement(self, node): while self.visit(node.condition): result = self.visit(node.compound) if result is not None: return result
Visitor for `WhileStatement` AST node.
def extract_relationtypes(rs3_xml_tree): return {rel.attrib['name']: rel.attrib['type'] for rel in rs3_xml_tree.iter('rel') if 'type' in rel.attrib}
extracts the allowed RST relation names and relation types from an RS3 XML file. Parameters ---------- rs3_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an RS3 XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'rst' or 'multinuc') as values (str).
def __xinclude_lxml(target, source, env): from lxml import etree doc = etree.parse(str(source[0])) doc.xinclude() try: doc.write(str(target[0]), xml_declaration=True, encoding="UTF-8", pretty_print=True) except: pass return None
Resolving XIncludes, using the lxml module.
def get_version(self): ptr = self._get_version_func(self.alpr_pointer) version_number = ctypes.cast(ptr, ctypes.c_char_p).value version_number = _convert_from_charp(version_number) self._free_json_mem_func(ctypes.c_void_p(ptr)) return version_number
This gets the version of OpenALPR :return: Version information
def read(self, b=None): data = self.__wrapped__.read(b) self.__tee.write(data) return data
Reads data from source, copying it into ``tee`` before returning. :param int b: number of bytes to read
def tuple(self, r): m, m1 = self.args r, z = divmod(r, m1) r, k = divmod(r, 12) u, w = divmod(r, m) return u, w, k, z
Converts the linear_index `q` into an pegasus_index Parameters ---------- r : int The linear_index node label Returns ------- q : tuple The pegasus_index node label corresponding to r
def parse_to_ast(source_code: str) -> List[ast.stmt]: class_types, reformatted_code = pre_parse(source_code) if '\x00' in reformatted_code: raise ParserException('No null bytes (\\x00) allowed in the source code.') parsed_ast = ast.parse(reformatted_code) annotate_and_optimize_ast(parsed_ast, reformatted_code, class_types) return parsed_ast.body
Parses the given vyper source code and returns a list of python AST objects for all statements in the source. Performs pre-processing of source code before parsing as well as post-processing of the resulting AST. :param source_code: The vyper source code to be parsed. :return: The post-processed list of python AST objects for each statement in ``source_code``.
def after_request(self, response): if response is not None and request is not None: if request.endpoint in [None, "static"]: return response self.compile() return response
after_request handler for compiling the compass projects with each request.
def launch(): if launched(): check_version() os.chdir(ROOT) return if not os.path.exists(BIN_LORE): missing = ' %s virtualenv is missing.' % APP if '--launched' in sys.argv: sys.exit(ansi.error() + missing + ' Please check for errors during:\n $ lore install\n') else: print(ansi.warning() + missing) import lore.__main__ lore.__main__.install(None, None) reboot('--env-launched')
Ensure that python is running from the Lore virtualenv past this point.
def load(self, configuration): try: self.config = yaml.load(open(configuration, "rb")) except IOError: try: self.config = yaml.load(configuration) except ParserError, e: raise ParserError('Error parsing config: %s' % e) if isinstance(self.config, dict): self.customer = self.config.get('customer', {}) self.instances_dict = self.config.get('instances', {}) self.web2py_dir = self.config.get('web2py', None) self.api_type = self.config.get('api_type', 'jsonrpc') self.valid = True else: self.customer = {} self.instances_dict = {} self.web2py_dir = None self.valid = False
Load a YAML configuration file. :param configuration: Configuration filename or YAML string
def evaluate_callables(data): sequence = ((k, v() if callable(v) else v) for k, v in data.items()) return type(data)(sequence)
Call any callable values in the input dictionary; return a new dictionary containing the evaluated results. Useful for lazily evaluating default values in ``build`` methods. >>> data = {"spam": "ham", "eggs": (lambda: 123)} >>> result = evaluate_callables(data) >>> result == {'eggs': 123, 'spam': 'ham'} True
def extend (name, values): assert isinstance(name, basestring) assert is_iterable_typed(values, basestring) name = add_grist (name) __validate_feature (name) feature = __all_features [name] if feature.implicit: for v in values: if v in __implicit_features: raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v])) __implicit_features[v] = feature if values and not feature.values and not(feature.free or feature.optional): feature.set_default(values[0]) feature.add_values(values)
Adds the given values to the given feature.
def EXCHANGE(classical_reg1, classical_reg2): left = unpack_classical_reg(classical_reg1) right = unpack_classical_reg(classical_reg2) return ClassicalExchange(left, right)
Produce an EXCHANGE instruction. :param classical_reg1: The first classical register, which gets modified. :param classical_reg2: The second classical register, which gets modified. :return: A ClassicalExchange instance.
def upsert_sweep(self, config): mutation = gql( ) def no_retry_400_or_404(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400 and e.response.status_code != 404: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'config': yaml.dump(config), 'description': config.get("description"), 'entityName': self.settings("entity"), 'projectName': self.settings("project")}, check_retry_fn=no_retry_400_or_404) return response['upsertSweep']['sweep']['name']
Upsert a sweep object. Args: config (str): sweep config (will be converted to yaml)
async def updateconfig(self): "Reload configurations, remove non-exist servers, add new servers, and leave others unchanged" exists = {} for s in self.connections: exists[(s.protocol.vhost, s.rawurl)] = s self._createServers(self, '', exists = exists) for _,v in exists.items(): await v.shutdown() self.connections.remove(v)
Reload configurations, remove non-exist servers, add new servers, and leave others unchanged
def write_file(path, contents): os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "w") as file: file.write(contents)
Write contents to a local file.
def ext_from_filename(filename): try: base, ext = filename.lower().rsplit(".", 1) except ValueError: return '' ext = ".{0}".format(ext) all_exts = [x.extension for x in chain(magic_header_array, magic_footer_array)] if base[-4:].startswith("."): long_ext = base[-4:] + ext if long_ext in all_exts: return long_ext return ext
Scan a filename for it's extension. :param filename: string of the filename :return: the extension off the end (empty string if it can't find one)
def focus_last_child(self): w, focuspos = self.get_focus() child = self._tree.last_child_position(focuspos) if child is not None: self.set_focus(child)
move focus to last child of currently focussed one
def coef_(self): coef = numpy.zeros(self.n_features_ + 1, dtype=float) for estimator in self.estimators_: coef[estimator.component] += self.learning_rate * estimator.coef_ return coef
Return the aggregated coefficients. Returns ------- coef_ : ndarray, shape = (n_features + 1,) Coefficients of features. The first element denotes the intercept.
def _fluent_params(self, fluents, ordering) -> FluentParamsList: variables = [] for fluent_id in ordering: fluent = fluents[fluent_id] param_types = fluent.param_types objects = () names = [] if param_types is None: names = [fluent.name] else: objects = tuple(self.object_table[ptype]['objects'] for ptype in param_types) for values in itertools.product(*objects): values = ','.join(values) var_name = '{}({})'.format(fluent.name, values) names.append(var_name) variables.append((fluent_id, names)) return tuple(variables)
Returns the instantiated `fluents` for the given `ordering`. For each fluent in `fluents`, it instantiates each parameter type w.r.t. the contents of the object table. Returns: Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name and a list of instantiated fluents represented as strings.
def get_klass_parents(gi_name): res = [] parents = __HIERARCHY_GRAPH.predecessors(gi_name) if not parents: return [] __get_parent_link_recurse(parents[0], res) return res
Returns a sorted list of qualified symbols representing the parents of the klass-like symbol named gi_name
def _add_widget(self, widget): widgets = self.widgets() widgets += [widget] self._save_customization(widgets)
Add a widget to the customization. Will save the widget to KE-chain. :param widget: The widget (specific json dict) to be added :type widget: dict
def request_callback_answer( self, chat_id: Union[int, str], message_id: int, callback_data: bytes ): return self.send( functions.messages.GetBotCallbackAnswer( peer=self.resolve_peer(chat_id), msg_id=message_id, data=callback_data ), retries=0, timeout=10 )
Use this method to request a callback answer from bots. This is the equivalent of clicking an inline button containing callback data. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_id (``int``): The message id the inline keyboard is attached on. callback_data (``bytes``): Callback data associated with the inline button you want to get the answer from. Returns: The answer containing info useful for clients to display a notification at the top of the chat screen or as an alert. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``TimeoutError`` if the bot fails to answer within 10 seconds.
def transform(row, table): 'Transform row "link" into full URL and add "state" based on "name"' data = row._asdict() data["link"] = urljoin("https://pt.wikipedia.org", data["link"]) data["name"], data["state"] = regexp_city_state.findall(data["name"])[0] return data
Transform row "link" into full URL and add "state" based on "name"
def check_type_declaration(parameter_names, parameter_types): if len(parameter_names) != len(parameter_types): raise Exception("Number of method parameters ({}) does not match number of " "declared types ({})" .format(len(parameter_names), len(parameter_types))) for parameter_name in parameter_names: if parameter_name not in parameter_types: raise Exception("Parameter '{}' does not have a declared type".format(parameter_name))
Checks that exactly the given parameter names have declared types. :param parameter_names: The names of the parameters in the method declaration :type parameter_names: list[str] :param parameter_types: Parameter type by name :type parameter_types: dict[str, type]
def _set_group_selection(self): grp = self.grouper if not (self.as_index and getattr(grp, 'groupings', None) is not None and self.obj.ndim > 1 and self._group_selection is None): return ax = self.obj._info_axis groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis] if len(groupers): self._group_selection = ax.difference(Index(groupers), sort=False).tolist() self._reset_cache('_selected_obj')
Create group based selection. Used when selection is not passed directly but instead via a grouper. NOTE: this should be paired with a call to _reset_group_selection
def bin_stream(stream, content_type, status='200 OK', headers=None): def_headers = [('Content-Type', content_type)] if headers: def_headers += headers status_headers = StatusAndHeaders(status, def_headers) return WbResponse(status_headers, value=stream)
Utility method for constructing a binary response. :param Any stream: The response body stream :param str content_type: The content-type of the response :param str status: The HTTP status line :param list[tuple[str, str]] headers: Additional headers for this response :return: WbResponse that is a binary stream :rtype: WbResponse
def hello_message(self, invert_hello=False): if invert_hello is False: return self.__gouverneur_message hello_message = [] for i in range(len(self.__gouverneur_message) - 1, -1, -1): hello_message.append(self.__gouverneur_message[i]) return bytes(hello_message)
Return message header. :param invert_hello: whether to return the original header (in case of False value) or reversed \ one (in case of True value). :return: bytes
def load_tlds(): file = os.path.join(os.path.dirname(__file__), 'assets', 'tlds-alpha-by-domain.txt') with open(file) as fobj: return [elem for elem in fobj.read().lower().splitlines()[1:] if "--" not in elem]
Load all legal TLD extensions from assets
def get_coordinate_system(self): coordinate_list = ['specimen'] initial_coordinate = 'specimen' for specimen in self.specimens: if 'geographic' not in coordinate_list and self.Data[specimen]['zijdblock_geo']: coordinate_list.append('geographic') initial_coordinate = 'geographic' if 'tilt-corrected' not in coordinate_list and self.Data[specimen]['zijdblock_tilt']: coordinate_list.append('tilt-corrected') return initial_coordinate, coordinate_list
Check self.Data for available coordinate systems. Returns --------- initial_coordinate, coordinate_list : str, list i.e., 'geographic', ['specimen', 'geographic']
def xpath(self, *args, **kwargs): if "smart_strings" not in kwargs: kwargs["smart_strings"] = False return self.resource.xpath(*args, **kwargs)
Perform XPath on the passage XML :param args: Ordered arguments for etree._Element().xpath() :param kwargs: Named arguments :return: Result list :rtype: list(etree._Element)
def read_ucs2(self, num_chars): buf = readall(self, num_chars * 2) return ucs2_codec.decode(buf)[0]
Reads num_chars UCS2 string from the stream
def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False): cached_wheels_dir = os.path.join(tempfile.gettempdir(), 'cached_wheels') if not os.path.isdir(cached_wheels_dir): os.makedirs(cached_wheels_dir) wheel_file = '{0!s}-{1!s}-{2!s}'.format(package_name, package_version, self.manylinux_wheel_file_suffix) wheel_path = os.path.join(cached_wheels_dir, wheel_file) if not os.path.exists(wheel_path) or not zipfile.is_zipfile(wheel_path): wheel_url = self.get_manylinux_wheel_url(package_name, package_version) if not wheel_url: return None print(" - {}=={}: Downloading".format(package_name, package_version)) with open(wheel_path, 'wb') as f: self.download_url_with_progress(wheel_url, f, disable_progress) if not zipfile.is_zipfile(wheel_path): return None else: print(" - {}=={}: Using locally cached manylinux wheel".format(package_name, package_version)) return wheel_path
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
def no_spikes(tolerance): def no_spikes(curve): diff = np.abs(curve - curve.despike()) return np.count_nonzero(diff) < tolerance return no_spikes
Arg ``tolerance`` is the number of spiky samples allowed.
def _is_child(self, parent, child): parent_parts = tuple(self._split_table_name(parent)) child_parts = tuple(self._split_table_name(child)) if parent_parts == child_parts: return False return parent_parts == child_parts[: len(parent_parts)]
Returns whether a key is strictly a child of another key. AoT siblings are not considered children of one another.
def nearest_intersection_idx(a, b): difference = a - b sign_change_idx, = np.nonzero(np.diff(np.sign(difference))) return sign_change_idx
Determine the index of the point just before two lines with common x values. Parameters ---------- a : array-like 1-dimensional array of y-values for line 1 b : array-like 1-dimensional array of y-values for line 2 Returns ------- An array of indexes representing the index of the values just before the intersection(s) of the two lines.
def table(tab): global open_tables if tab in open_tables: yield open_tables[tab] else: open_tables[tab] = iptc.Table(tab) open_tables[tab].refresh() open_tables[tab].autocommit = False yield open_tables[tab] open_tables[tab].commit() del open_tables[tab]
Access IPTables transactionally in a uniform way. Ensures all access is done without autocommit and that only the outer most task commits, and also ensures we refresh once and commit once.
def finish(self): for obj in self.current_tweens: for tween in self.current_tweens[obj]: tween.finish() self.current_tweens = {}
jump the the last frame of all tweens
def add_key_value(self, key, value): key = self._metadata_map.get(key, key) if key in ['dateAdded', 'lastModified']: self._indicator_data[key] = self._utils.format_datetime( value, date_format='%Y-%m-%dT%H:%M:%SZ' ) elif key == 'confidence': self._indicator_data[key] = int(value) elif key == 'rating': self._indicator_data[key] = float(value) else: self._indicator_data[key] = value
Add custom field to Indicator object. .. note:: The key must be the exact name required by the batch schema. Example:: file_hash = tcex.batch.file('File', '1d878cdc391461e392678ba3fc9f6f32') file_hash.add_key_value('size', '1024') Args: key (str): The field key to add to the JSON batch data. value (str): The field value to add to the JSON batch data.
def top_level_domain(self, tld_type: Optional[TLDType] = None) -> str: key = self._validate_enum(item=tld_type, enum=TLDType) return self.random.choice(TLD[key])
Return random top level domain. :param tld_type: Enum object DomainType :return: Top level domain. :raises NonEnumerableError: if tld_type not in DomainType.
def set_credentials(self, username, password=None, region=None, tenant_id=None, authenticate=False): self.username = username self.password = password self.tenant_id = tenant_id if region: self.region = region if authenticate: self.authenticate()
Sets the username and password directly.
def assignrepr_values2(values, prefix): lines = [] blanks = ' '*len(prefix) for (idx, subvalues) in enumerate(values): if idx == 0: lines.append('%s%s,' % (prefix, repr_values(subvalues))) else: lines.append('%s%s,' % (blanks, repr_values(subvalues))) lines[-1] = lines[-1][:-1] return '\n'.join(lines)
Return a prefixed and properly aligned string representation of the given 2-dimensional value matrix using function |repr|. >>> from hydpy.core.objecttools import assignrepr_values2 >>> import numpy >>> print(assignrepr_values2(numpy.eye(3), 'test(') + ')') test(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0) Functions |assignrepr_values2| works also on empty iterables: >>> print(assignrepr_values2([[]], 'test(') + ')') test()
def _determine_weights(self, other, settings): first_is_used = settings['first']['required'] or \ self.first and other.first first_weight = settings['first']['weight'] if first_is_used else 0 middle_is_used = settings['middle']['required'] or \ self.middle and other.middle middle_weight = settings['middle']['weight'] if middle_is_used else 0 last_is_used = settings['last']['required'] or \ self.last and other.last last_weight = settings['last']['weight'] if last_is_used else 0 return first_weight, middle_weight, last_weight
Return weights of name components based on whether or not they were omitted
def draw_flat_samples(**kwargs): nsamples = kwargs.get('nsamples', 1) min_mass = kwargs.get('min_mass', 1.) max_mass = kwargs.get('max_mass', 2.) m1 = np.random.uniform(min_mass, max_mass, nsamples) m2 = np.random.uniform(min_mass, max_mass, nsamples) return np.maximum(m1, m2), np.minimum(m1, m2)
Draw samples for uniform in mass Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass
def verify(self): self.init.verify() self.sim.verify() if self.init.firstdate > self.sim.firstdate: raise ValueError( f'The first date of the initialisation period ' f'({self.init.firstdate}) must not be later ' f'than the first date of the simulation period ' f'({self.sim.firstdate}).') elif self.init.lastdate < self.sim.lastdate: raise ValueError( f'The last date of the initialisation period ' f'({self.init.lastdate}) must not be earlier ' f'than the last date of the simulation period ' f'({self.sim.lastdate}).') elif self.init.stepsize != self.sim.stepsize: raise ValueError( f'The initialization stepsize ({self.init.stepsize}) ' f'must be identical with the simulation stepsize ' f'({self.sim.stepsize}).') else: try: self.init[self.sim.firstdate] except ValueError: raise ValueError( 'The simulation time grid is not properly ' 'alligned on the initialization time grid.')
Raise an |ValueError| it the different time grids are inconsistent.
def ensure_compatible_admin(view): def wrapper(request, *args, **kwargs): user_roles = request.user.user_data.get('roles', []) if len(user_roles) != 1: context = { 'message': 'I need to be able to manage user accounts. ' 'My username is %s' % request.user.username } return render(request, 'mtp_common/user_admin/incompatible-admin.html', context=context) return view(request, *args, **kwargs) return wrapper
Ensures that the user is in exactly one role. Other checks could be added, such as requiring one prison if in prison-clerk role.
def get_max(qs, field): max_field = '%s__max' % field num = qs.aggregate(Max(field))[max_field] return num if num else 0
get max for queryset. qs: queryset field: The field name to max.
def source_files(mongodb_path): for root, dirs, files in os.walk(mongodb_path): for filename in files: if 'dbtests' in root: continue if filename.endswith(('.cpp', '.c', '.h')): yield os.path.join(root, filename)
Find source files.
def _col_widths2xls(self, worksheets): xls_max_cols, xls_max_tabs = self.xls_max_cols, self.xls_max_tabs dict_grid = self.code_array.dict_grid for col, tab in dict_grid.col_widths: if col < xls_max_cols and tab < xls_max_tabs: pys_width = dict_grid.col_widths[(col, tab)] xls_width = self.pys_width2xls_width(pys_width) worksheets[tab].col(col).width = xls_width
Writes col_widths to xls file Format: <col>\t<tab>\t<value>\n
def partition(a, sz): return [a[i:i+sz] for i in range(0, len(a), sz)]
splits iterables a in equal parts of size sz
def refresh_toc(self, refresh_done_callback, toc_cache): self._useV2 = self.cf.platform.get_protocol_version() >= 4 toc_fetcher = TocFetcher(self.cf, ParamTocElement, CRTPPort.PARAM, self.toc, refresh_done_callback, toc_cache) toc_fetcher.start()
Initiate a refresh of the parameter TOC.
def silhouette(max_iters=100, optimize=True, plot=True): try:import pods except ImportError: print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.silhouette() m = GPy.models.GPRegression(data['X'], data['Y']) if optimize: m.optimize(messages=True, max_iters=max_iters) print(m) return m
Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.
def _open_ok(self, args): self.known_hosts = args.read_shortstr() AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts) return None
signal that the connection is ready This method signals to the client that the connection is ready for use. PARAMETERS: known_hosts: shortstr
def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): ng_trial_info = self._live_trial_mapping.pop(trial_id) if result: self._nevergrad_opt.tell(ng_trial_info, -result[self._reward_attr])
Passes the result to Nevergrad unless early terminated or errored. The result is internally negated when interacting with Nevergrad so that Nevergrad Optimizers can "maximize" this value, as it minimizes on default.
def draw_char_screen(self): self.screen = Image.new("RGB", (self.height, self.width)) self.drawer = ImageDraw.Draw(self.screen) for sy, line in enumerate(self.char_buffer): for sx, tinfo in enumerate(line): self.drawer.text((sx * 6, sy * 9), tinfo[0], fill=tinfo[1:]) self.output_device.interrupt()
Draws the output buffered in the char_buffer.
def extract_attr_for_match(items, **kwargs): query_arg = None for arg, value in kwargs.items(): if value == -1: assert query_arg is None, "Only single query arg (-1 valued) is allowed" query_arg = arg result = [] filterset = set(kwargs.keys()) for item in items: match = True assert filterset.issubset( item.keys()), "Filter set contained %s which was not in record %s" % ( filterset.difference(item.keys()), item) for arg in item: if arg == query_arg: continue if arg in kwargs: if item[arg] != kwargs[arg]: match = False break if match: result.append(item[query_arg]) assert len(result) <= 1, "%d values matched %s, only allow 1" % ( len(result), kwargs) if result: return result[0] return None
Helper method to get attribute value for an item matching some criterion. Specify target criteria value as dict, with target attribute having value -1 Example: to extract state of vpc matching given vpc id response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}] extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available
def _cryptodome_cipher(key, iv): return AES.new(key, AES.MODE_CFB, iv, segment_size=128)
Build a Pycryptodome AES Cipher object. :param bytes key: Encryption key :param bytes iv: Initialization vector :returns: AES Cipher instance
def get_configs(__pkg: str, __name: str = 'config') -> List[str]: dirs = [user_config(__pkg), ] dirs.extend(path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':')) configs = [] for dname in reversed(dirs): test_path = path.join(dname, __name) if path.exists(test_path): configs.append(test_path) return configs
Return all configs for given package. Args: __pkg: Package name __name: Configuration file name
def from_datetime(self, dt): global _last_timestamp epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 timestamp = (dt - epoch).total_seconds() - offset node = None clock_seq = None nanoseconds = int(timestamp * 1e9) timestamp = int(nanoseconds // 100) + 0x01b21dd213814000 if clock_seq is None: import random clock_seq = random.randrange(1 << 14) time_low = timestamp & 0xffffffff time_mid = (timestamp >> 32) & 0xffff time_hi_version = (timestamp >> 48) & 0x0fff clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = (clock_seq >> 8) & 0x3f if node is None: node = getnode() return pyUUID(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node), version=1)
generates a UUID for a given datetime :param dt: datetime :type dt: datetime :return:
def sinogram_as_rytov(uSin, u0=1, align=True): r ndims = len(uSin.shape) phiR = np.angle(uSin / u0) lna = np.log(np.absolute(uSin / u0)) if ndims == 2: phiR[:] = np.unwrap(phiR, axis=-1) else: for ii in range(len(phiR)): phiR[ii] = unwrap_phase(phiR[ii], seed=47) if align: align_unwrapped(phiR) rytovSin = 1j * phiR + lna return u0 * rytovSin
r"""Convert the complex wave field sinogram to the Rytov phase This method applies the Rytov approximation to the recorded complex wave sinogram. To achieve this, the following filter is applied: .. math:: u_\mathrm{B}(\mathbf{r}) = u_\mathrm{0}(\mathbf{r}) \ln\!\left( \frac{u_\mathrm{R}(\mathbf{r})}{u_\mathrm{0}(\mathbf{r})} +1 \right) This filter step effectively replaces the Born approximation :math:`u_\mathrm{B}(\mathbf{r})` with the Rytov approximation :math:`u_\mathrm{R}(\mathbf{r})`, assuming that the scattered field is equal to :math:`u(\mathbf{r})\approx u_\mathrm{R}(\mathbf{r})+ u_\mathrm{0}(\mathbf{r})`. Parameters ---------- uSin: 2d or 3d complex ndarray The sinogram of the complex wave :math:`u_\mathrm{R}(\mathbf{r}) + u_\mathrm{0}(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. u0: ndarray of dimension as `uSin` or less, or int. The incident plane wave :math:`u_\mathrm{0}(\mathbf{r})` at the detector. If `u0` is "1", it is assumed that the data is already background-corrected ( `uSin` :math:`= \frac{u_\mathrm{R}(\mathbf{r})}{ u_\mathrm{0}(\mathbf{r})} + 1` ). Note that if the reconstruction distance :math:`l_\mathrm{D}` of the original experiment is non-zero and `u0` is set to 1, then the reconstruction will be wrong; the field is not focused to the center of the reconstruction volume. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- uB: 2d or 3d real ndarray The Rytov-filtered complex sinogram :math:`u_\mathrm{B}(\mathbf{r})`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping
def equation_of_time(day): day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal() A = EARTH_ORIBITAL_VELOCITY * (day_of_year + 10) B = A + 1.914 * sin(radians(EARTH_ORIBITAL_VELOCITY * (day_of_year - 2))) movement_on_equatorial_plane = degrees( atan2( tan(radians(B)), cos(EARTH_AXIS_TILT) ) ) eot_half_turns = (A - movement_on_equatorial_plane) / 180 result = 720 * (eot_half_turns - int(eot_half_turns + 0.5)) return radians(result)
Compute the equation of time for the given date. Uses formula described at https://en.wikipedia.org/wiki/Equation_of_time#Alternative_calculation :param day: The datetime.date to compute the equation of time for :returns: The angle, in radians, of the Equation of Time
def stylize_comment_block(lines): normal, sep, in_code = range(3) state = normal for line in lines: indented = line.startswith(' ') empty_line = line.strip() == '' if state == normal and empty_line: state = sep elif state in [sep, normal] and indented: yield '' if indented: yield '.. code-block:: javascript' yield '' yield line state = in_code else: state = normal elif state == sep and not empty_line: yield '' yield line state = normal else: yield line if state == in_code and not (indented or empty_line): sep = normal
Parse comment lines and make subsequent indented lines into a code block block.
def carmichael_of_ppower( pp ): p, a = pp if p == 2 and a > 2: return 2**(a-2) else: return (p-1) * p**(a-1)
Carmichael function of the given power of the given prime.
def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None, featuretypes_to_ignore=None): logger.info("Cleaning GFF") chroms_to_ignore = chroms_to_ignore or [] featuretypes_to_ignore = featuretypes_to_ignore or [] with open(cleaned, 'w') as fout: for i in gffutils.iterators.DataIterator(gff): if add_chr: i.chrom = "chr" + i.chrom if i.chrom in chroms_to_ignore: continue if i.featuretype in featuretypes_to_ignore: continue fout.write(str(i) + '\n') return cleaned
Cleans a GFF file by removing features on unwanted chromosomes and of unwanted featuretypes. Optionally adds "chr" to chrom names.
def is_scipy_sparse(arr): global _is_scipy_sparse if _is_scipy_sparse is None: try: from scipy.sparse import issparse as _is_scipy_sparse except ImportError: _is_scipy_sparse = lambda _: False return _is_scipy_sparse(arr)
Check whether an array-like is a scipy.sparse.spmatrix instance. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a scipy.sparse.spmatrix instance. Notes ----- If scipy is not installed, this function will always return False. Examples -------- >>> from scipy.sparse import bsr_matrix >>> is_scipy_sparse(bsr_matrix([1, 2, 3])) True >>> is_scipy_sparse(pd.SparseArray([1, 2, 3])) False >>> is_scipy_sparse(pd.SparseSeries([1, 2, 3])) False
def _get_current_userprofile(): if current_user.is_anonymous: return AnonymousUserProfile() profile = g.get( 'userprofile', UserProfile.get_by_userid(current_user.get_id())) if profile is None: profile = UserProfile(user_id=int(current_user.get_id())) g.userprofile = profile return profile
Get current user profile. .. note:: If the user is anonymous, then a :class:`invenio_userprofiles.models.AnonymousUserProfile` instance is returned. :returns: The :class:`invenio_userprofiles.models.UserProfile` instance.
def head_object_async(self, path, **kwds): return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
HEAD an object. Depending on request headers, HEAD returns various object properties, e.g. Content-Length, Last-Modified, and ETag. Note: No payload argument is supported.
def RetrieveIP4Info(self, ip): if ip.is_private: return (IPInfo.INTERNAL, "Internal IP address.") try: res = socket.getnameinfo((str(ip), 0), socket.NI_NAMEREQD) return (IPInfo.EXTERNAL, res[0]) except (socket.error, socket.herror, socket.gaierror): return (IPInfo.EXTERNAL, "Unknown IP address.")
Retrieves information for an IP4 address.
def get_abstract_dependencies(reqs, sources=None, parent=None): deps = [] from .requirements import Requirement for req in reqs: if isinstance(req, pip_shims.shims.InstallRequirement): requirement = Requirement.from_line( "{0}{1}".format(req.name, req.specifier) ) if req.link: requirement.req.link = req.link requirement.markers = req.markers requirement.req.markers = req.markers requirement.extras = req.extras requirement.req.extras = req.extras elif isinstance(req, Requirement): requirement = copy.deepcopy(req) else: requirement = Requirement.from_line(req) dep = AbstractDependency.from_requirement(requirement, parent=parent) deps.append(dep) return deps
Get all abstract dependencies for a given list of requirements. Given a set of requirements, convert each requirement to an Abstract Dependency. :param reqs: A list of Requirements :type reqs: list[:class:`~requirementslib.models.requirements.Requirement`] :param sources: Pipfile-formatted sources, defaults to None :param sources: list[dict], optional :param parent: The parent of this list of dependencies, defaults to None :param parent: :class:`~requirementslib.models.requirements.Requirement`, optional :return: A list of Abstract Dependencies :rtype: list[:class:`~requirementslib.models.dependency.AbstractDependency`]
def add_config_opts_to_parser(parser): parser.add_argument("--config-files", type=str, nargs="+", required=True, help="A file parsable by " "pycbc.workflow.WorkflowConfigParser.") parser.add_argument("--config-overrides", type=str, nargs="+", default=None, metavar="SECTION:OPTION:VALUE", help="List of section:option:value combinations " "to add into the configuration file.")
Adds options for configuration files to the given parser.
def myanimelist(username): params = { "u": username, "status": "all", "type": "anime" } resp = requests.request("GET", ENDPOINT_URLS.MYANIMELIST, params=params) if resp.status_code == 429: raise MALRateLimitExceededError("MAL rate limit exceeded") resp = bs4.BeautifulSoup(resp.content, "xml") all_anime = resp.find_all("anime") if not len(all_anime): raise InvalidUsernameError("User `{}` does not exist" .format(username)) scores = [] for anime in all_anime: if anime.my_status.string == "6": continue id = anime.series_animedb_id.string id = int(id) score = anime.my_score.string score = int(score) if score > 0: scores.append({"id": id, "score": score}) if not len(scores): raise NoAffinityError("User `{}` hasn't rated any anime" .format(username)) return scores
Retrieve a users' animelist scores from MAL. Only anime scored > 0 will be returned, and all PTW entries are ignored, even if they are scored. :param str username: MAL username :return: `id`, `score` pairs :rtype: list
def solve_filter(expr, vars): lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_filter(): for lhs_value in repeated.getvalues(lhs_values): if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value: yield lhs_value return Result(repeated.lazy(lazy_filter), ())
Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value.
def walk_tree(self, top=None): if top is None: top = self.rootgrp values = top.groups.values() yield values for value in top.groups.values(): for children in self.walk_tree(value): yield children
Navigate all the groups in the file starting from top. If top is None, the root group is used.
def showpath(path): if logger.verbose: return os.path.abspath(path) else: path = os.path.relpath(path) if path.startswith(os.curdir + os.sep): path = path[len(os.curdir + os.sep):] return path
Format a path for displaying.
def parse_noaa_line(line): station = {} station['station_name'] = line[7:51].strip() station['station_code'] = line[0:6] station['CC'] = line[55:57] station['ELEV(m)'] = int(line[73:78]) station['LAT'] = _mlat(line[58:64]) station['LON'] = _mlon(line[65:71]) station['ST'] = line[52:54] return station
Parse NOAA stations. This is an old list, the format is: NUMBER NAME & STATE/COUNTRY LAT LON ELEV (meters) 010250 TROMSO NO 6941N 01855E 10
def write(self, magicc_input, filepath): self._filepath = filepath self.minput = deepcopy(magicc_input) self.data_block = self._get_data_block() output = StringIO() output = self._write_header(output) output = self._write_namelist(output) output = self._write_datablock(output) with open( filepath, "w", encoding="utf-8", newline=self._newline_char ) as output_file: output.seek(0) copyfileobj(output, output_file)
Write a MAGICC input file from df and metadata Parameters ---------- magicc_input : :obj:`pymagicc.io.MAGICCData` MAGICCData object which holds the data to write filepath : str Filepath of the file to write to.
def bbox_horz_aligned(box1, box2): if not (box1 and box2): return False box1_top = box1.top + 1.5 box2_top = box2.top + 1.5 box1_bottom = box1.bottom - 1.5 box2_bottom = box2.bottom - 1.5 return not (box1_top > box2_bottom or box2_top > box1_bottom)
Returns true if the vertical center point of either span is within the vertical range of the other