code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def set_language(self, language): if isinstance(language, str): language_obj = languages.getlang(language) if language_obj: self.language = language_obj.code else: raise TypeError("Language code {} not found".format(language)) if isinstance(language, languages.Language): self.language = language.code
Set self.language to internal lang. repr. code from str or Language object.
def weave_on(advices, pointcut=None, ctx=None, depth=1, ttl=None): def __weave(target): weave( target=target, advices=advices, pointcut=pointcut, ctx=ctx, depth=depth, ttl=ttl ) return target return __weave
Decorator for weaving advices on a callable target. :param pointcut: condition for weaving advices on joinpointe. The condition depends on its type. :param ctx: target ctx (instance or class). :type pointcut: - NoneType: advices are weaved on target. - str: target name is compared to pointcut regex. - function: called with target in parameter, if True, advices will be weaved on target. :param depth: class weaving depthing :type depth: int :param public: (default True) weave only on public members :type public: bool
def index(in_cram, config): out_file = in_cram + ".crai" if not utils.file_uptodate(out_file, in_cram): with file_transaction(config, in_cram + ".crai") as tx_out_file: tx_in_file = os.path.splitext(tx_out_file)[0] utils.symlink_plus(in_cram, tx_in_file) cmd = "samtools index {tx_in_file}" do.run(cmd.format(**locals()), "Index CRAM file") return out_file
Ensure CRAM file has a .crai index file.
def _gen_success_message(publish_output): application_id = publish_output.get('application_id') details = json.dumps(publish_output.get('details'), indent=2) if CREATE_APPLICATION in publish_output.get('actions'): return "Created new application with the following metadata:\n{}".format(details) return 'The following metadata of application "{}" has been updated:\n{}'.format(application_id, details)
Generate detailed success message for published applications. Parameters ---------- publish_output : dict Output from serverlessrepo publish_application Returns ------- str Detailed success message
def create_deployment(self, ref, force=False, payload='', auto_merge=False, description='', environment=None): json = None if ref: url = self._build_url('deployments', base_url=self._api) data = {'ref': ref, 'force': force, 'payload': payload, 'auto_merge': auto_merge, 'description': description, 'environment': environment} self._remove_none(data) headers = Deployment.CUSTOM_HEADERS json = self._json(self._post(url, data=data, headers=headers), 201) return Deployment(json, self) if json else None
Create a deployment. :param str ref: (required), The ref to deploy. This can be a branch, tag, or sha. :param bool force: Optional parameter to bypass any ahead/behind checks or commit status checks. Default: False :param str payload: Optional JSON payload with extra information about the deployment. Default: "" :param bool auto_merge: Optional parameter to merge the default branch into the requested deployment branch if necessary. Default: False :param str description: Optional short description. Default: "" :param str environment: Optional name for the target deployment environment (e.g., production, staging, qa). Default: "production" :returns: :class:`Deployment <github3.repos.deployment.Deployment>`
def addSource(self, sourceUri, weight): assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer" self.topicPage["sources"].append({"uri": sourceUri, "wgt": weight})
add a news source to the topic page @param sourceUri: uri of the news source to add to the topic page @param weight: importance of the news source (typically in range 1 - 50)
def translate(self): expressions, varnames, funcnames = self.expr.translate() argnames = [] for varname in varnames: argnames.append(VARIABLE_PREFIX + varname) for funcname in funcnames: argnames.append(FUNCTION_PREFIX + funcname) func = compile_func( argnames, [ast.Return(ast.List(expressions, ast.Load()))], ) def wrapper_func(values={}, functions={}): args = {} for varname in varnames: args[VARIABLE_PREFIX + varname] = values[varname] for funcname in funcnames: args[FUNCTION_PREFIX + funcname] = functions[funcname] parts = func(**args) return u''.join(parts) return wrapper_func
Compile the template to a Python function.
def _active_todos(self): return [todo for todo in self.todolist.todos() if not self._uncompleted_children(todo) and todo.is_active()]
Returns a list of active todos, taking uncompleted subtodos into account. The stored length of the todolist is taken into account, to prevent new todos created by recurrence to pop up as newly activated tasks. Since these todos pop up at the end of the list, we cut off the list just before that point.
def remove_all_listeners(self, event=None): if event is not None: self._events[event] = OrderedDict() else: self._events = defaultdict(OrderedDict)
Remove all listeners attached to ``event``. If ``event`` is ``None``, remove all listeners on all events.
def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): raise NotImplementedError
Deletes an object.
def _objects_touch_each_other(self, object1: Object, object2: Object) -> bool: in_vertical_range = object1.y_loc <= object2.y_loc + object2.size and \ object1.y_loc + object1.size >= object2.y_loc in_horizantal_range = object1.x_loc <= object2.x_loc + object2.size and \ object1.x_loc + object1.size >= object2.x_loc touch_side = object1.x_loc + object1.size == object2.x_loc or \ object2.x_loc + object2.size == object1.x_loc touch_top_or_bottom = object1.y_loc + object1.size == object2.y_loc or \ object2.y_loc + object2.size == object1.y_loc return (in_vertical_range and touch_side) or (in_horizantal_range and touch_top_or_bottom)
Returns true iff the objects touch each other.
def _generate_field_with_default(**kwargs): field = kwargs['field'] if callable(field.default): return field.default() return field.default
Only called if field.default != NOT_PROVIDED
def trans_history( self, from_=None, count=None, from_id=None, end_id=None, order=None, since=None, end=None ): return self._trade_api_call( 'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id, order=order, since=since, end=end )
Returns the history of transactions. To use this method you need a privilege of the info key. :param int or None from_: transaction ID, from which the display starts (default 0) :param int or None count: number of transaction to be displayed (default 1000) :param int or None from_id: transaction ID, from which the display starts (default 0) :param int or None end_id: transaction ID on which the display ends (default inf.) :param str or None order: sorting (default 'DESC') :param int or None since: the time to start the display (default 0) :param int or None end: the time to end the display (default inf.)
def cmd_start(self, argv, help): parser = argparse.ArgumentParser( prog="%s start" % self.progname, description=help, ) instances = self.get_instances(command='start') parser.add_argument("instance", nargs=1, metavar="instance", help="Name of the instance from the config.", choices=sorted(instances)) parser.add_argument("-o", "--override", nargs="*", type=str, dest="overrides", metavar="OVERRIDE", help="Option to override in instance config for startup script (name=value).") args = parser.parse_args(argv) overrides = self._parse_overrides(args) overrides['instances'] = self.instances instance = instances[args.instance[0]] instance.hooks.before_start(instance) result = instance.start(overrides) instance.hooks.after_start(instance) if result is None: return instance.status()
Starts the instance
def select_candidates(config): download_candidates = [] for group in config.group: summary_file = get_summary(config.section, group, config.uri, config.use_cache) entries = parse_summary(summary_file) for entry in filter_entries(entries, config): download_candidates.append((entry, group)) return download_candidates
Select candidates to download. Parameters ---------- config: NgdConfig Runtime configuration object Returns ------- list of (<candidate entry>, <taxonomic group>)
def config_set(self, parameter, value): if not isinstance(parameter, str): raise TypeError("parameter must be str") fut = self.execute(b'CONFIG', b'SET', parameter, value) return wait_ok(fut)
Set a configuration parameter to the given value.
def flag_calls(func): if hasattr(func, 'called'): return func def wrapper(*args, **kw): wrapper.called = False out = func(*args, **kw) wrapper.called = True return out wrapper.called = False wrapper.__doc__ = func.__doc__ return wrapper
Wrap a function to detect and flag when it gets called. This is a decorator which takes a function and wraps it in a function with a 'called' attribute. wrapper.called is initialized to False. The wrapper.called attribute is set to False right before each call to the wrapped function, so if the call fails it remains False. After the call completes, wrapper.called is set to True and the output is returned. Testing for truth in wrapper.called allows you to determine if a call to func() was attempted and succeeded.
def wait(self): now = _monotonic() if now < self._ref: delay = max(0, self._ref - now) self.sleep_func(delay) self._update_ref()
Blocks until the rate is met
def flush_all(self, delay=0, noreply=None): if noreply is None: noreply = self.default_noreply cmd = b'flush_all ' + six.text_type(delay).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'flush_all', noreply) if noreply: return True return results[0] == b'OK'
The memcached "flush_all" command. Args: delay: optional int, the number of seconds to wait before flushing, or zero to flush immediately (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
def read_blocking(self): while True: data = self._read() if data != None: break return self._parse_message(data)
Same as read, except blocks untill data is available to be read.
def cancelOperation(self): if self.isLongTouchingPoint: self.toggleLongTouchPoint() elif self.isTouchingPoint: self.toggleTouchPoint() elif self.isGeneratingTestCondition: self.toggleGenerateTestCondition()
Cancels the ongoing operation if any.
def activate(self, event): self._index += 1 if self._index >= len(self._values): self._index = 0 self._selection = self._values[self._index] self.ao2.speak(self._selection)
Change the value.
def get_files(*bases): for base in bases: basedir, _ = base.split(".", 1) base = os.path.join(os.path.dirname(__file__), *base.split(".")) rem = len(os.path.dirname(base)) + len(basedir) + 2 for root, dirs, files in os.walk(base): for name in files: yield os.path.join(basedir, root, name)[rem:]
List all files in a data directory.
def register_shortcut(self, qaction_or_qshortcut, context, name, add_sc_to_tip=False): self.main.register_shortcut(qaction_or_qshortcut, context, name, add_sc_to_tip)
Register QAction or QShortcut to Spyder main application. if add_sc_to_tip is True, the shortcut is added to the action's tooltip
def to_dict(self): return { 'name': self.name, 'id': self.id, 'type': self.type, 'workflow_id': self.workflow_id, 'queue': self.queue, 'start_time': self.start_time, 'arguments': self.arguments, 'acknowledged': self.acknowledged, 'func_name': self.func_name, 'hostname': self.hostname, 'worker_name': self.worker_name, 'worker_pid': self.worker_pid, 'routing_key': self.routing_key }
Return a dictionary of the job stats. Returns: dict: Dictionary of the stats.
def flavor_extra_set(request, flavor_id, metadata): flavor = _nova.novaclient(request).flavors.get(flavor_id) if (not metadata): return None return flavor.set_keys(metadata)
Set the flavor extra spec keys.
def conditions(self) -> Dict[str, Dict[str, Union[float, numpy.ndarray]]]: conditions = {} for subname in NAMES_CONDITIONSEQUENCES: subseqs = getattr(self, subname, ()) subconditions = {seq.name: copy.deepcopy(seq.values) for seq in subseqs} if subconditions: conditions[subname] = subconditions return conditions
Nested dictionary containing the values of all condition sequences. See the documentation on property |HydPy.conditions| for further information.
def delete(self): if self._new: raise Exception("This is a new object, %s not in data, \ indicating this entry isn't stored." % self.primaryKey) r.table(self.table).get(self._data[self.primaryKey]) \ .delete(durability=self.durability).run(self._conn) return True
Deletes the current instance. This assumes that we know what we're doing, and have a primary key in our data already. If this is a new instance, then we'll let the user know with an Exception
def _response_item_to_object(self, resp_item): item_cls = resources.get_model_class(self.resource_type) properties_dict = resp_item[self.resource_type] new_dict = helpers.remove_properties_containing_None(properties_dict) obj = item_cls(new_dict) return obj
take json and make a resource out of it
def add_pool(self, pool, match=None): if match is None: self.default_pool = pool else: self.pools.append((match, pool))
Adds a new account pool. If the given match argument is None, the pool the default pool. Otherwise, the match argument is a callback function that is invoked to decide whether or not the given pool should be used for a host. When Exscript logs into a host, the account is chosen in the following order: # Exscript checks whether an account was attached to the :class:`Host` object using :class:`Host.set_account()`), and uses that. # If the :class:`Host` has no account attached, Exscript walks through all pools that were passed to :class:`Queue.add_account_pool()`. For each pool, it passes the :class:`Host` to the function in the given match argument. If the return value is True, the account pool is used to acquire an account. (Accounts within each pool are taken in a round-robin fashion.) # If no matching account pool is found, an account is taken from the default account pool. # Finally, if all that fails and the default account pool contains no accounts, an error is raised. Example usage:: def do_nothing(conn): conn.autoinit() def use_this_pool(host): return host.get_name().startswith('foo') default_pool = AccountPool() default_pool.add_account(Account('default-user', 'password')) other_pool = AccountPool() other_pool.add_account(Account('user', 'password')) queue = Queue() queue.account_manager.add_pool(default_pool) queue.account_manager.add_pool(other_pool, use_this_pool) host = Host('localhost') queue.run(host, do_nothing) In the example code, the host has no account attached. As a result, the queue checks whether use_this_pool() returns True. Because the hostname does not start with 'foo', the function returns False, and Exscript takes the 'default-user' account from the default pool. :type pool: AccountPool :param pool: The account pool that is added. :type match: callable :param match: A callback to check if the pool should be used.
def realpath(path): if path == '~': return userdir if path == '/': return sysroot if path.startswith('/'): return os.path.abspath(path) if path.startswith('~/'): return os.path.expanduser(path) if path.startswith('./'): return os.path.abspath(os.path.join(os.path.curdir, path[2:])) return os.path.abspath(path)
Create the real absolute path for the given path. Add supports for userdir & / supports. Args: * path: pathname to use for realpath. Returns: Platform independent real absolute path.
def load_and_check(self, base_settings, prompt=None): checker = Checker(self.file_name, self.section, self.registry, self.strategy_type, prompt) settings = self.load(base_settings) if checker.check(settings): return settings, True return None, False
Load settings and check them. Loads the settings from ``base_settings``, then checks them. Returns: (merged settings, True) on success (None, False) on failure
def _parameter_sweep(self, parameter_space, kernel_options, device_options, tuning_options): results = [] parameter_space = list(parameter_space) random.shuffle(parameter_space) work_per_thread = int(numpy.ceil(len(parameter_space) / float(self.max_threads))) chunks = _chunk_list(parameter_space, work_per_thread) for chunk in chunks: chunked_result = self._run_chunk(chunk, kernel_options, device_options, tuning_options) results.append(lift(chunked_result)) return gather(*results)
Build a Noodles workflow by sweeping the parameter space
def _ParseSourcePathOption(self, options): self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION) if not self._source_path: raise errors.BadConfigOption('Missing source path.') self._source_path = os.path.abspath(self._source_path)
Parses the source path option. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def json(self): try: return json.loads(self.text) except Exception as e: raise ContentDecodingError(e)
Load response body as json. :raises: :class:`ContentDecodingError`
def email_secure(self): email = self._email if not email: return '' address, host = email.split('@') if len(address) <= 2: return ('*' * len(address)) + '@' + host import re host = '@' + host obfuscated = re.sub(r'[a-zA-z0-9]', '*', address[1:-1]) return address[:1] + obfuscated + address[-1:] + host
Obfuscated email used for display
def _download_datasets(): def filepath(*args): return abspath(join(dirname(__file__), '..', 'vega_datasets', *args)) dataset_listing = {} for name in DATASETS_TO_DOWNLOAD: data = Dataset(name) url = data.url filename = filepath('_data', data.filename) print("retrieving data {0} -> {1}".format(url, filename)) urlretrieve(url, filename) dataset_listing[name] = '_data/{0}'.format(data.filename) with open(filepath('local_datasets.json'), 'w') as f: json.dump(dataset_listing, f, indent=2, sort_keys=True)
Utility to download datasets into package source
def _timed_process(self, *args, **kwargs): for processor in self._processors: start_time = _time.process_time() processor.process(*args, **kwargs) process_time = int(round((_time.process_time() - start_time) * 1000, 2)) self.process_times[processor.__class__.__name__] = process_time
Track Processor execution time for benchmarking.
def _clean_doc(self, doc=None): if doc is None: doc = self.doc resources = doc['Resources'] for arg in ['startline', 'headerlines', 'encoding']: for e in list(resources.args): if e.lower() == arg: resources.args.remove(e) for term in resources: term['startline'] = None term['headerlines'] = None term['encoding'] = None schema = doc['Schema'] for arg in ['altname', 'transform']: for e in list(schema.args): if e.lower() == arg: schema.args.remove(e) for table in self.doc.find('Root.Table'): for col in table.find('Column'): try: col.value = col['altname'].value except: pass col['altname'] = None col['transform'] = None return doc
Clean the doc before writing it, removing unnecessary properties and doing other operations.
def visit_extslice(self, node, parent): newnode = nodes.ExtSlice(parent=parent) newnode.postinit([self.visit(dim, newnode) for dim in node.dims]) return newnode
visit an ExtSlice node by returning a fresh instance of it
def get_cp2k_structure(atoms): from cp2k_tools.generator import dict2cp2k cp2k_cell = {sym: ('[angstrom]',) + tuple(coords) for sym, coords in zip(('a', 'b', 'c'), atoms.get_cell()*Bohr)} cp2k_cell['periodic'] = 'XYZ' cp2k_coord = { 'scaled': True, '*': [[sym] + list(coord) for sym, coord in zip(atoms.get_chemical_symbols(), atoms.get_scaled_positions())], } return dict2cp2k( { 'global': { 'run_type': 'ENERGY_FORCE', }, 'force_eval': { 'subsys': { 'cell': cp2k_cell, 'coord': cp2k_coord, }, 'print': { 'forces': { 'filename': 'forces', }, }, }, } )
Convert the atoms structure to a CP2K input file skeleton string
def begin_commit(): session_token = request.headers['session_token'] repository = request.headers['repository'] current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) repository_path = config['repositories'][repository]['path'] def with_exclusive_lock(): if not can_aquire_user_lock(repository_path, session_token): return fail(lock_fail_msg) data_store = versioned_storage(repository_path) if data_store.get_head() != request.headers["previous_revision"]: return fail(need_to_update_msg) if data_store.have_active_commit(): data_store.rollback() data_store.begin() update_user_lock(repository_path, session_token) return success() return lock_access(repository_path, with_exclusive_lock)
Allow a client to begin a commit and acquire the write lock
def ensure_all_columns_are_used(num_vars_accounted_for, dataframe, data_title='long_data'): dataframe_vars = set(dataframe.columns.tolist()) num_dataframe_vars = len(dataframe_vars) if num_vars_accounted_for == num_dataframe_vars: pass elif num_vars_accounted_for < num_dataframe_vars: msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings.warn(msg.format(num_dataframe_vars, data_title) + msg_2 + msg_3.format(num_vars_accounted_for)) else: msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings.warn(msg + msg_2.format(num_vars_accounted_for) + msg_3.format(data_title, num_dataframe_vars)) return None
Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None.
def _BuildPluginRequest(self, app_id, challenge_data, origin): client_data_map = {} encoded_challenges = [] app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id)) for challenge_item in challenge_data: key = challenge_item['key'] key_handle_encoded = self._Base64Encode(key.key_handle) raw_challenge = challenge_item['challenge'] client_data_json = model.ClientData( model.ClientData.TYP_AUTHENTICATION, raw_challenge, origin).GetJson() challenge_hash_encoded = self._Base64Encode( self._SHA256(client_data_json)) encoded_challenges.append({ 'appIdHash': app_id_hash_encoded, 'challengeHash': challenge_hash_encoded, 'keyHandle': key_handle_encoded, 'version': key.version, }) key_challenge_pair = (key_handle_encoded, challenge_hash_encoded) client_data_map[key_challenge_pair] = client_data_json signing_request = { 'type': 'sign_helper_request', 'signData': encoded_challenges, 'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS, 'localAlways': True } return client_data_map, json.dumps(signing_request)
Builds a JSON request in the form that the plugin expects.
def render_embed_css(self, css_embed: Iterable[bytes]) -> bytes: return b'<style type="text/css">\n' + b"\n".join(css_embed) + b"\n</style>"
Default method used to render the final embedded css for the rendered webpage. Override this method in a sub-classed controller to change the output.
def clean_asciidoc(text): r text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text) text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text) return text
r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!'
def apply_template(template, *args, **kw): if six.callable(template): return template(*args, **kw) if isinstance(template, six.string_types): return template if isinstance(template, collections.Mapping): return template.__class__((k, apply_template(v, *args, **kw)) for k, v in template.items()) if isinstance(template, collections.Iterable): return template.__class__(apply_template(v, *args, **kw) for v in template) return template
Applies every callable in any Mapping or Iterable
def describe_page_numbers(current_page, total_count, per_page, page_numbers_at_ends=3, pages_numbers_around_current=3): if total_count: page_count = int(math.ceil(1.0 * total_count / per_page)) if page_count < current_page: raise PageNumberOutOfBounds page_numbers = get_page_numbers( current_page=current_page, num_pages=page_count, extremes=page_numbers_at_ends, arounds=pages_numbers_around_current, ) current_items_start = (current_page * per_page) - per_page + 1 current_items_end = (current_items_start + per_page) - 1 if current_items_end > total_count: current_items_end = total_count else: page_count = 0 page_numbers = [] current_items_start = 0 current_items_end = 0 return { 'numbers': [num for num in page_numbers if not isinstance(num, six.string_types)], 'has_previous': 'previous' in page_numbers, 'has_next': 'next' in page_numbers, 'current_page': current_page, 'previous_page': current_page - 1, 'next_page': current_page + 1, 'total_count': total_count, 'page_count': page_count, 'per_page': per_page, 'current_items_start': current_items_start, 'current_items_end': current_items_end, }
Produces a description of how to display a paginated list's page numbers. Rather than just spitting out a list of every page available, the page numbers returned will be trimmed to display only the immediate numbers around the start, end, and the current page. :param current_page: the current page number (page numbers should start at 1) :param total_count: the total number of items that are being paginated :param per_page: the number of items that are displayed per page :param page_numbers_at_ends: the amount of page numbers to display at the beginning and end of the list :param pages_numbers_around_current: the amount of page numbers to display around the currently selected page :return: a dictionary describing the page numbers, relative to the current page
def _merge_pool_kwargs(self, override): base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs
Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary.
def canonicalize_clusters(clusters: DefaultDict[int, List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]: merged_clusters: List[Set[Tuple[int, int]]] = [] for cluster in clusters.values(): cluster_with_overlapping_mention = None for mention in cluster: for cluster2 in merged_clusters: if mention in cluster2: cluster_with_overlapping_mention = cluster2 break if cluster_with_overlapping_mention is not None: break if cluster_with_overlapping_mention is not None: cluster_with_overlapping_mention.update(cluster) else: merged_clusters.append(set(cluster)) return [list(c) for c in merged_clusters]
The CONLL 2012 data includes 2 annotated spans which are identical, but have different ids. This checks all clusters for spans which are identical, and if it finds any, merges the clusters containing the identical spans.
def yaml_force_unicode(): if sys.version_info[0] == 2: def construct_func(self, node): return self.construct_scalar(node) yaml.Loader.add_constructor(U('tag:yaml.org,2002:str'), construct_func) yaml.SafeLoader.add_constructor(U('tag:yaml.org,2002:str'), construct_func)
Force pyyaml to return unicode values.
def try_to_set_up_global_logging(): root = logging.getLogger() root.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d [%(filename)s:%(funcName)s():%(lineno)s] %(message)s') if env.is_debug(): handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) root.addHandler(handler) try: handler = logging.FileHandler(GLOBAL_LOG_FNAME, mode='w') handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) root.addHandler(handler) except IOError as e: termerror('Failed to set up logging: {}'.format(e)) return False return True
Try to set up global W&B debug log that gets re-written by every W&B process. It may fail (and return False) eg. if the current directory isn't user-writable
def list_members(self, name, type="USER", recurse=True, max_results=1000): results = self.client.service.getListMembership( name, type, recurse, max_results, self.proxy_id, ) return [item["member"] for item in results]
Look up all the members of a list. Args: name (str): The name of the list type (str): The type of results to return. "USER" to get users, "LIST" to get lists. recurse (bool): Presumably, whether to recurse into member lists when retrieving users. max_results (int): Maximum number of results to return. Returns: list of strings: names of the members of the list
def getPeer(self, url): peers = filter(lambda x: x.getUrl() == url, self.getPeers()) if len(peers) == 0: raise exceptions.PeerNotFoundException(url) return peers[0]
Select the first peer in the datarepo with the given url simulating the behavior of selecting by URL. This is only used during testing.
def _conv(self, data): if isinstance(data, pd.Series): if data.name is None: data = data.to_frame(name='') else: data = data.to_frame() data = data.fillna('NaN') return data
Convert each input to appropriate for table outplot
def _send_command_list(self, commands): output = "" for command in commands: output += self.device.send_command( command, strip_prompt=False, strip_command=False ) return output
Wrapper for Netmiko's send_command method (for list of commands.
def process_tomography_programs(process, qubits=None, pre_rotation_generator=tomography.default_rotations, post_rotation_generator=tomography.default_rotations): if qubits is None: qubits = process.get_qubits() for tomographic_pre_rotation in pre_rotation_generator(*qubits): for tomography_post_rotation in post_rotation_generator(*qubits): process_tomography_program = Program(Pragma("PRESERVE_BLOCK")) process_tomography_program.inst(tomographic_pre_rotation) process_tomography_program.inst(process) process_tomography_program.inst(tomography_post_rotation) process_tomography_program.inst(Pragma("END_PRESERVE_BLOCK")) yield process_tomography_program
Generator that yields tomographic sequences that wrap a process encoded by a QUIL program `proc` in tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in the program should be tomographically rotated. :param Program process: A Quil program :param list|NoneType qubits: The specific qubits for which to generate the tomographic sequences :param pre_rotation_generator: A generator that yields tomographic pre-rotations to perform. :param post_rotation_generator: A generator that yields tomographic post-rotations to perform. :return: Program for process tomography. :rtype: Program
def WSDLUriToVersion(self, uri): value = self._wsdl_uri_mapping.get(uri) if value is not None: return value raise ValueError( 'Unsupported SOAP envelope uri: %s' % uri )
Return the WSDL version related to a WSDL namespace uri.
def closeEvent(self, event): max_dataset_history = self.value('max_dataset_history') keep_recent_datasets(max_dataset_history, self.info) settings.setValue('window/geometry', self.saveGeometry()) settings.setValue('window/state', self.saveState()) event.accept()
save the name of the last open dataset.
def get_package_info_from_line(tpip_pkg, line): lower_line = line.lower() try: metadata_key, metadata_value = lower_line.split(':', 1) except ValueError: return metadata_key = metadata_key.strip() metadata_value = metadata_value.strip() if metadata_value == 'unknown': return if metadata_key in TPIP_FIELD_MAPPINGS: tpip_pkg[TPIP_FIELD_MAPPINGS[metadata_key]] = metadata_value return if metadata_key.startswith('version') and not tpip_pkg.get('PkgVersion'): tpip_pkg['PkgVersion'] = metadata_value return if 'licen' in lower_line: if metadata_key.startswith('classifier') or '::' in metadata_value: license = lower_line.rsplit(':')[-1].strip().lower() license = license_cleanup(license) if license: tpip_pkg.setdefault('PkgLicenses', []).append(license)
Given a line of text from metadata, extract semantic info
def _module_iterator(root, recursive=True): yield root stack = collections.deque((root,)) while stack: package = stack.popleft() paths = getattr(package, '__path__', []) for path in paths: modules = pkgutil.iter_modules([path]) for finder, name, is_package in modules: module_name = '%s.%s' % (package.__name__, name) module = sys.modules.get(module_name, None) if module is None: module = _load_module(finder, module_name) if is_package: if recursive: stack.append(module) yield module else: yield module
Iterate over modules.
def get_constant_state(self): ret = self.constant_states[self.next_constant_state] self.next_constant_state += 1 return ret
Read state that was written in "first_part" mode. Returns: a structure
def delete(self, url, headers=None, **kwargs): if headers is None: headers = [] if kwargs: url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True) message = { 'method': "DELETE", 'headers': headers, } return self.request(url, message)
Sends a DELETE request to a URL. :param url: The URL. :type url: ``string`` :param headers: A list of pairs specifying the headers for the HTTP response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``). :type headers: ``list`` :param kwargs: Additional keyword arguments (optional). These arguments are interpreted as the query part of the URL. The order of keyword arguments is not preserved in the request, but the keywords and their arguments will be URL encoded. :type kwargs: ``dict`` :returns: A dictionary describing the response (see :class:`HttpLib` for its structure). :rtype: ``dict``
def convert_to_vertexlist(geometry, **kwargs): if util.is_instance_named(geometry, 'Trimesh'): return mesh_to_vertexlist(geometry, **kwargs) elif util.is_instance_named(geometry, 'Path'): return path_to_vertexlist(geometry, **kwargs) elif util.is_instance_named(geometry, 'PointCloud'): return points_to_vertexlist(geometry.vertices, colors=geometry.colors, **kwargs) elif util.is_instance_named(geometry, 'ndarray'): return points_to_vertexlist(geometry, **kwargs) else: raise ValueError('Geometry passed is not a viewable type!')
Try to convert various geometry objects to the constructor args for a pyglet indexed vertex list. Parameters ------------ obj : Trimesh, Path2D, Path3D, (n,2) float, (n,3) float Object to render Returns ------------ args : tuple Args to be passed to pyglet indexed vertex list constructor.
def condition_on_par_knowledge(cov,par_knowledge_dict): missing = [] for parnme in par_knowledge_dict.keys(): if parnme not in cov.row_names: missing.append(parnme) if len(missing): raise Exception("par knowledge dict parameters not found: {0}".\ format(','.join(missing))) sel = cov.zero2d sigma_ep = cov.zero2d for parnme,var in par_knowledge_dict.items(): idx = cov.row_names.index(parnme) sel.x[idx,idx] = 1.0 sigma_ep.x[idx,idx] = var print(sel) term2 = sel * cov * sel.T print(term2) term2 = term2.inv term2 *= sel term2 *= cov new_cov = cov - term2 return new_cov
experimental function to include conditional prior information for one or more parameters in a full covariance matrix
def add_scroller_widget(self, ref, left=1, top=1, right=20, bottom=1, direction="h", speed=1, text="Message"): if ref not in self.widgets: widget = ScrollerWidget(screen=self, ref=ref, left=left, top=top, right=right, bottom=bottom, direction=direction, speed=speed, text=text) self.widgets[ref] = widget return self.widgets[ref]
Add Scroller Widget
def enable_disable(self): if self.enabled: self.data['enabled'] = False else: self.data['enabled'] = True self.update()
Enable or disable this endpoint. If enabled, it will be disabled and vice versa. :return: None
def get_experiment_status(port): result, response = check_rest_server_quick(port) if result: return json.loads(response.text).get('status') return None
get the status of an experiment
def filename( self, node ): if not node.directory: return None if node.filename == '~': return None return os.path.join(node.directory, node.filename)
Extension to squaremap api to provide "what file is this" information
def Reorder(x, params, output=None, **kwargs): del params, kwargs if output is None: return x return base.nested_map(output, lambda i: x[i])
Reorder a tuple into another tuple. For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y). The output argument specifies how to re-order, using integers that refer to indices in the input tuple. For example, if input = (x, y, z) then Reorder(input, output=(1, 0, 2)) = (y, x, z) Reorder(input, output=(0, 0)) = (x, x) Reorder(input, output=(0, (1, 1))) = (x, (y, y)) Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y)) By default (if no output is given) Reorder does nothing (Identity). Args: x: the input tuple to re-order. params: layer parameters (unused). output: the specification of the output tuple: a nested tuple of ints. **kwargs: other arguments (unused). Returns: The re-ordered tuple with the same shape as output.
def _ratio_scores(parameters_value, clusteringmodel_gmm_good, clusteringmodel_gmm_bad): ratio = clusteringmodel_gmm_good.score([parameters_value]) / clusteringmodel_gmm_bad.score([parameters_value]) sigma = 0 return ratio, sigma
The ratio is smaller the better
def size(pathname): if os.path.isfile(pathname): return os.path.getsize(pathname) return sum([size('{}/{}'.format(pathname, name)) for name in get_content_list(pathname)])
Returns size of a file or folder in Bytes :param pathname: path to file or folder to be sized :type pathname: str :return: size of file or folder in Bytes :rtype: int :raises: os.error if file is not accessible
def delete(self): with self._qpart: for cursor in self.cursors(): if cursor.hasSelection(): cursor.deleteChar()
Del or Backspace pressed. Delete selection
def getratio(self, code) : if len(code) == 0 : return 0 code_replaced = self.prog.sub('', code) return (len(code) - len(code_replaced)) / len(code)
Get ratio of code and pattern matched
def _cla_adder_unit(a, b, cin): gen = a & b prop = a ^ b assert(len(prop) == len(gen)) carry = [gen[0] | prop[0] & cin] sum_bit = prop[0] ^ cin cur_gen = gen[0] cur_prop = prop[0] for i in range(1, len(prop)): cur_gen = gen[i] | (prop[i] & cur_gen) cur_prop = cur_prop & prop[i] sum_bit = pyrtl.concat(prop[i] ^ carry[i - 1], sum_bit) carry.append(gen[i] | (prop[i] & carry[i-1])) cout = cur_gen | (cur_prop & cin) return sum_bit, cout
Carry generation and propogation signals will be calculated only using the inputs; their values don't rely on the sum. Every unit generates a cout signal which is used as cin for the next unit.
def _has_role(self, organisation_id, role): if organisation_id is None: return False try: org = self.organisations.get(organisation_id, {}) user_role = org.get('role') state = org.get('state') except AttributeError: return False return user_role == role.value and state == State.approved.name
Check the user's role for the organisation
def _retryable_read_command(self, command, value=1, check=True, allowable_errors=None, read_preference=None, codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): if read_preference is None: read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) def _cmd(session, server, sock_info, slave_ok): return self._command(sock_info, command, slave_ok, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) return self.__client._retryable_read( _cmd, read_preference, session)
Same as command but used for retryable read commands.
def add_field(self, name, fragment_size=150, number_of_fragments=3, fragment_offset=None, order="score", type=None): data = {} if fragment_size: data['fragment_size'] = fragment_size if number_of_fragments is not None: data['number_of_fragments'] = number_of_fragments if fragment_offset is not None: data['fragment_offset'] = fragment_offset if type is not None: data['type'] = type data['order'] = order self.fields[name] = data
Add a field to Highlinghter
def get_instance(self, payload): return WorkflowInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
Build an instance of WorkflowInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.WorkflowInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.WorkflowInstance
def dump(self): return dict( self.other, name=self.name, url=self.url, credentials=self.credentials, description=self.description, )
Return a dict of fields that can be used to recreate this profile. For example:: >>> profile = Profile(name="foobar", ...) >>> profile == Profile(**profile.dump()) True Use this value when persisting a profile.
def append(self, entry): if not self.is_appendable(entry): raise ValueError('entry not appendable') self.data += entry.data
Append an entry to self
def get_importer(path_item): try: importer = sys.path_importer_cache[path_item] except KeyError: for path_hook in sys.path_hooks: try: importer = path_hook(path_item) break except ImportError: pass else: importer = None sys.path_importer_cache.setdefault(path_item, importer) if importer is None: try: importer = ImpImporter(path_item) except ImportError: importer = None return importer
Retrieve a PEP 302 importer for the given path item The returned importer is cached in sys.path_importer_cache if it was newly created by a path hook. If there is no importer, a wrapper around the basic import machinery is returned. This wrapper is never inserted into the importer cache (None is inserted instead). The cache (or part of it) can be cleared manually if a rescan of sys.path_hooks is necessary.
def get_apis(self): out = set(x.api for x in self.types.values() if x.api) for ft in self.features.values(): out.update(ft.get_apis()) for ext in self.extensions.values(): out.update(ext.get_apis()) return out
Returns set of api names referenced in this Registry :return: set of api name strings
def file_can_be_written(path): if path is None: return False try: with io.open(path, "wb") as test_file: pass delete_file(None, path) return True except (IOError, OSError): pass return False
Return ``True`` if a file can be written at the given ``path``. :param string path: the file path :rtype: bool .. warning:: This function will attempt to open the given ``path`` in write mode, possibly destroying the file previously existing there. .. versionadded:: 1.4.0
def readlines(self, timeout=1): lines = [] while 1: line = self.readline(timeout=timeout) if line: lines.append(line) if not line or line[-1:] != '\n': break return lines
read all lines that are available. abort after timeout when no more data arrives.
def onStart(self, event): c = event.container print '+' * 5, 'started:', c kv = lambda s: s.split('=', 1) env = {k: v for (k, v) in (kv(s) for s in c.attrs['Config']['Env'])} print env
Display the environment of a started container
def refresh(self) -> None: logger.info('refreshing sources') for source in list(self): self.unload(source) if not os.path.exists(self.__registry_fn): return with open(self.__registry_fn, 'r') as f: registry = yaml.load(f) assert isinstance(registry, list) for source_description in registry: source = Source.from_dict(source_description) self.load(source) logger.info('refreshed sources')
Reloads all sources that are registered with this server.
def run_locally(self): self.thread = threading.Thread(target=self.execute_locally) self.thread.daemon = True self.thread.start()
A convenience method to run the same result as a SLURM job but locally in a non-blocking way. Useful for testing.
def find_existing(self): instances = self.consul.find_servers(self.tags) maxnames = len(instances) while instances: i = instances.pop(0) server_id = i[A.server.ID] if self.namespace.add_if_unique(server_id): log.info('Found existing server, %s' % server_id) self.server_attrs = i break if len(self.namespace.names) >= maxnames: break instances.append(i)
Searches for existing server instances with matching tags. To match, the existing instances must also be "running".
def walkSignalPorts(rootPort: LPort): if rootPort.children: for ch in rootPort.children: yield from walkSignalPorts(ch) else: yield rootPort
recursively walk ports without any children
def handler(ca_file=None): def request(url, message, **kwargs): scheme, host, port, path = spliturl(url) if scheme != "https": ValueError("unsupported scheme: %s" % scheme) connection = HTTPSConnection(host, port, ca_file) try: body = message.get('body', "") headers = dict(message.get('headers', [])) connection.request(message['method'], path, body, headers) response = connection.getresponse() finally: connection.close() return { 'status': response.status, 'reason': response.reason, 'headers': response.getheaders(), 'body': BytesIO(response.read()) } return request
Returns an HTTP request handler configured with the given ca_file.
def addlayer(self, name, srs, geomType): self.vector.CreateLayer(name, srs, geomType) self.init_layer()
add a layer to the vector layer Parameters ---------- name: str the layer name srs: int, str or :osgeo:class:`osr.SpatialReference` the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options. geomType: int an OGR well-known binary data type. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. Returns -------
def bit_reversal(qubits: List[int]) -> Program: p = Program() n = len(qubits) for i in range(int(n / 2)): p.inst(SWAP(qubits[i], qubits[-i - 1])) return p
Generate a circuit to do bit reversal. :param qubits: Qubits to do bit reversal with. :return: A program to do bit reversal.
def get_input(prompt, check, *, redo_prompt=None, repeat_prompt=False): if isinstance(check, str): check = (check,) to_join = [] for item in check: if item: to_join.append(str(item)) else: to_join.append("''") prompt += " [{}]: ".format('/'.join(to_join)) if repeat_prompt: redo_prompt = prompt elif not redo_prompt: redo_prompt = "Incorrect input, please choose from {}: " \ "".format(str(check)) if callable(check): def _checker(r): return check(r) elif isinstance(check, tuple): def _checker(r): return r in check else: raise ValueError(RESPONSES_ERROR.format(type(check))) response = input(prompt) while not _checker(response): print(response, type(response)) response = input(redo_prompt if redo_prompt else prompt) return response
Ask the user to input something on the terminal level, check their response and ask again if they didn't answer correctly
def get_client_properties_per_page(self, per_page=1000, page=1, params=None): return self._get_resource_per_page( resource=CLIENT_PROPERTIES, per_page=per_page, page=page, params=params )
Get client properties per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
def class_name_str(obj, skip_parent=False): rt = str(type(obj)).split(" ")[1][1:-2] if skip_parent: rt = rt.split(".")[-1] return rt
return's object's class name as string
def ecb(base, target): api_url = 'http://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml' resp = requests.get(api_url, timeout=1) text = resp.text def _find_rate(symbol): if symbol == 'EUR': return decimal.Decimal(1.00) m = re.findall(r"currency='%s' rate='([0-9\.]+)'" % symbol, text) return decimal.Decimal(m[0]) return _find_rate(target) / _find_rate(base)
Parse data from European Central Bank.
def create_prj_model(self, ): prjs = djadapter.projects.all() rootdata = treemodel.ListItemData(['Name', 'Short', 'Rootpath']) prjroot = treemodel.TreeItem(rootdata) for prj in prjs: prjdata = djitemdata.ProjectItemData(prj) treemodel.TreeItem(prjdata, prjroot) prjmodel = treemodel.TreeModel(prjroot) return prjmodel
Create and return a tree model that represents a list of projects :returns: the creeated model :rtype: :class:`jukeboxcore.gui.treemodel.TreeModel` :raises: None
def _hash_data(hasher, data): _hasher = hasher.copy() _hasher.update(data) return _hasher.finalize()
Generate hash of data using provided hash type. :param hasher: Hasher instance to use as a base for calculating hash :type hasher: cryptography.hazmat.primitives.hashes.Hash :param bytes data: Data to sign :returns: Hash of data :rtype: bytes
def _ensure_filepath(filename): filepath = os.path.dirname(filename) if not os.path.exists(filepath): os.makedirs(filepath)
Ensure that the directory exists before trying to write to the file.