code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def touch_file(self, filename): path_to_file = self.__file_class__(os.path.join(self, filename)) path_to_file.touch() return path_to_file
Touch a file in the directory
def writexlsx(self, path, sheetname="default"): writer = ExcelRW.UnicodeWriter(path) writer.set_active_sheet(sheetname) writer.writerow(self.fields) writer.writerows(self) writer.save()
Writes this table to an .xlsx file at the specified path. If you'd like to specify a sheetname, you may do so. If you'd like to write one workbook with different DataTables for each sheet, import the `excel` function from acrylic. You can see that code in `utils.py`. Note that the outgoing file is an .xlsx file, so it'd make sense to name that way.
def job_delayed_message(self, job, queue): return '[%s|%s|%s] job delayed until %s' % ( queue._cached_name, job.pk.get(), job._cached_identifier, job.delayed_until.hget())
Return the message to log when a job was delayed just before or during its execution
def to_dict(self): return { "all_set": self._is_all_set(), "progress": self.progress(), "values": { property_name: getattr(self, property_name) or [] for property_name in worker_mapping().keys() } }
This method is used in with connection to REST API. It basically converts all important properties to dictionary, which may be used by frontend. Returns: dict: ``{"all_set": bool, "progress": [int(done), int(how_many)], \ "values": {"property": [values], ..}}``
def nodes(self): return ( devicetools.Nodes( self.node_prefix+routers for routers in self._router_numbers) + devicetools.Node(self.last_node))
A |Nodes| collection of all required nodes. >>> from hydpy import RiverBasinNumbers2Selection >>> rbns2s = RiverBasinNumbers2Selection( ... (111, 113, 1129, 11269, 1125, 11261, ... 11262, 1123, 1124, 1122, 1121)) Note that the required outlet node is added: >>> rbns2s.nodes Nodes("node_1123", "node_1125", "node_11269", "node_1129", "node_113", "node_outlet") It is both possible to change the prefix names of the nodes and the name of the outlet node separately: >>> rbns2s.node_prefix = 'b_' >>> rbns2s.last_node = 'l_node' >>> rbns2s.nodes Nodes("b_1123", "b_1125", "b_11269", "b_1129", "b_113", "l_node")
def _valid_table_name(name): if name[0] not in "_" + string.ascii_letters or not set(name).issubset( "_" + string.ascii_letters + string.digits ): return False else: return True
Verify if a given table name is valid for `rows` Rules: - Should start with a letter or '_' - Letters can be capitalized or not - Accepts letters, numbers and _
def process_nxml(nxml_filename, pmid=None, extra_annotations=None, cleanup=True, add_grounding=True): if extra_annotations is None: extra_annotations = {} pp_dir = tempfile.mkdtemp('indra_isi_pp_output') pp = IsiPreprocessor(pp_dir) extra_annotations = {} pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations) ip = process_preprocessed(pp) if add_grounding: ip.add_grounding() if cleanup: shutil.rmtree(pp_dir) else: logger.info('Not cleaning up %s' % pp_dir) return ip
Process an NXML file using the ISI reader First converts NXML to plain text and preprocesses it, then runs the ISI reader, and processes the output to extract INDRA Statements. Parameters ---------- nxml_filename : str nxml file to process pmid : Optional[str] pmid of this nxml file, to be added to the Evidence object of the extracted INDRA statements extra_annotations : Optional[dict] Additional annotations to add to the Evidence object of all extracted INDRA statements. Extra annotations called 'interaction' are ignored since this is used by the processor to store the corresponding raw ISI output. cleanup : Optional[bool] If True, the temporary folders created for preprocessed reading input and output are removed. Default: True add_grounding : Optional[bool] If True the extracted Statements' grounding is mapped Returns ------- ip : indra.sources.isi.processor.IsiProcessor A processor containing extracted Statements
def score_samples(self, X): check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation
def head_tail_middle(src): if len(src) == 0: return None, [], None if len(src) == 1: return src[0], [], None if len(src) == 2: return src[0], [], src[1] return src[0], src[1:-1], src[-1]
Returns a tuple consisting of the head of a enumerable, the middle as a list and the tail of the enumerable. If the enumerable is 1 item, the middle will be empty and the tail will be None. >>> head_tail_middle([1, 2, 3, 4]) 1, [2, 3], 4
def append_dict_key_value( in_dict, keys, value, delimiter=DEFAULT_TARGET_DELIM, ordered_dict=False): dict_pointer, last_key = _dict_rpartition(in_dict, keys, delimiter=delimiter, ordered_dict=ordered_dict) if last_key not in dict_pointer or dict_pointer[last_key] is None: dict_pointer[last_key] = [] try: dict_pointer[last_key].append(value) except AttributeError: raise SaltInvocationError('The last key contains a {}, which cannot append.' ''.format(type(dict_pointer[last_key]))) return in_dict
Ensures that in_dict contains the series of recursive keys defined in keys. Also appends `value` to the list that is at the end of `in_dict` traversed with `keys`. :param dict in_dict: The dictionary to work with :param str keys: The delimited string with one or more keys. :param any value: The value to append to the nested dict-key. :param str delimiter: The delimiter to use in `keys`. Defaults to ':'. :param bool ordered_dict: Create OrderedDicts if keys are missing. Default: create regular dicts. :return dict: Though it updates in_dict in-place.
def _prompt_started_hook(self): if not self._reading: self._highlighter.highlighting_on = True self.sig_prompt_ready.emit()
Emit a signal when the prompt is ready.
def figure(bgcolor=(1,1,1), size=(1000,1000)): Visualizer3D._scene = Scene(background_color=np.array(bgcolor)) Visualizer3D._scene.ambient_light = AmbientLight(color=[1.0, 1.0, 1.0], strength=1.0) Visualizer3D._init_size = np.array(size)
Create a blank figure. Parameters ---------- bgcolor : (3,) float Color of the background with values in [0,1]. size : (2,) int Width and height of the figure in pixels.
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): super(SVRGModule, self).bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module, grad_req) if for_training: self._mod_aux.bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module, grad_req)
Binds the symbols to construct executors for both two modules. This is necessary before one can perform computation with the SVRGModule. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. for_training : bool Default is ``True``. Whether the executors should be bound for training. inputs_need_grad : bool Default is ``False``. Whether the gradients to the input data need to be computed. Typically this is not needed. But this might be needed when implementing composition of modules. force_rebind : bool Default is ``False``. This function does nothing if the executors are already bound. But with this ``True``, the executors will be forced to rebind. shared_module : Module Default is ``None``. This is used in bucketing. When not ``None``, the shared module essentially corresponds to a different bucket -- a module with different symbol but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
def validate_unique_slug(self, cleaned_data): date_kwargs = {} error_msg = _("The slug is not unique") pubdate = cleaned_data['publication_date'] or now() if '{year}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE: date_kwargs['year'] = pubdate.year error_msg = _("The slug is not unique within it's publication year.") if '{month}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE: date_kwargs['month'] = pubdate.month error_msg = _("The slug is not unique within it's publication month.") if '{day}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE: date_kwargs['day'] = pubdate.day error_msg = _("The slug is not unique within it's publication day.") date_range = get_date_range(**date_kwargs) dup_filters = self.get_unique_slug_filters(cleaned_data) if date_range: dup_filters['publication_date__range'] = date_range dup_qs = EntryModel.objects.filter(**dup_filters) if self.instance and self.instance.pk: dup_qs = dup_qs.exclude(pk=self.instance.pk) if dup_qs.exists(): raise ValidationError(error_msg)
Test whether the slug is unique within a given time period.
def iri(uri_string): uri_string = str(uri_string) if uri_string[:1] == "?": return uri_string if uri_string[:1] == "[": return uri_string if uri_string[:1] != "<": uri_string = "<{}".format(uri_string.strip()) if uri_string[len(uri_string)-1:] != ">": uri_string = "{}>".format(uri_string.strip()) return uri_string
converts a string to an IRI or returns an IRI if already formated Args: uri_string: uri in string format Returns: formated uri with <>
def attempt_social_login(self, provider, id): if not provider or not id: return False params = dict() params[provider.lower() + '_id'] = id user = self.first(**params) if not user: return False self.force_login(user) return True
Attempt social login and return boolean result
def exit_proc(self, lineno): __DEBUG__('Exiting current scope from lineno %i' % lineno) if len(self.local_labels) <= 1: error(lineno, 'ENDP in global scope (with no PROC)') return for label in self.local_labels[-1].values(): if label.local: if not label.defined: error(lineno, "Undefined LOCAL label '%s'" % label.name) return continue name = label.name _lineno = label.lineno value = label.value if name not in self.global_labels.keys(): self.global_labels[name] = label else: self.global_labels[name].define(value, _lineno) self.local_labels.pop() self.scopes.pop()
Exits current procedure. Local labels are transferred to global scope unless they have been marked as local ones. Raises an error if no current local context (stack underflow)
def _check_request(self, msg): if "jsonrpc" not in msg: raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.") if msg["jsonrpc"] != "2.0": raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'." .format(msg["jsonrpc"])) if "method" not in msg: raise InvalidRequestError("No method specified.") if "id" in msg: if msg["id"] is None: raise InvalidRequestError("typedjsonrpc does not allow id to be None.") if isinstance(msg["id"], float): raise InvalidRequestError("typedjsonrpc does not support float ids.") if not isinstance(msg["id"], (six.string_types, six.integer_types)): raise InvalidRequestError("id must be a string or integer; '{}' is of type {}." .format(msg["id"], type(msg["id"]))) if msg["method"] not in self._name_to_method_info: raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"]))
Checks that the request json is well-formed. :param msg: The request's json data :type msg: dict[str, object]
def export_datasource_schema(back_references): data = dict_import_export.export_schema_to_dict( back_references=back_references) yaml.safe_dump(data, stdout, default_flow_style=False)
Export datasource YAML schema to stdout
def get_system_info(self, x=255, y=255): p2p_tables = self.get_p2p_routing_table(x, y) max_x = max(x_ for (x_, y_), r in iteritems(p2p_tables) if r != consts.P2PTableEntry.none) max_y = max(y_ for (x_, y_), r in iteritems(p2p_tables) if r != consts.P2PTableEntry.none) sys_info = SystemInfo(max_x + 1, max_y + 1) for (x, y), p2p_route in iteritems(p2p_tables): if p2p_route != consts.P2PTableEntry.none: try: sys_info[(x, y)] = self.get_chip_info(x, y) except SCPError: pass return sys_info
Discover the integrity and resource availability of a whole SpiNNaker system. This command performs :py:meth:`.get_chip_info` on all working chips in the system returning an enhanced :py:class:`dict` (:py:class:`.SystemInfo`) containing a look-up from chip coordinate to :py:class:`.ChipInfo`. In addition to standard dictionary functionality, :py:class:`.SystemInfo` provides a number of convenience methods, which allow convenient iteration over various aspects of the information stored. .. note:: This method replaces the deprecated :py:meth:`.get_machine` method. To build a :py:class:`~rig.place_and_route.Machine` for place-and-route purposes, the :py:func:`rig.place_and_route.utils.build_machine` utility function may be used with :py:meth:`.get_system_info` like so:: >> from rig.place_and_route.utils import build_machine >> sys_info = mc.get_system_info() >> machine = build_machine(sys_info) Parameters ---------- x : int y : int The coordinates of the chip from which system exploration should begin, by default (255, 255). Most users will not need to change these parameters. Returns ------- :py:class:`.SystemInfo` An enhanced :py:class:`dict` object {(x, y): :py:class:`.ChipInfo`, ...} with a number of utility methods for accessing higher-level system information.
def get_ticker(self): self._log('get ticker') return self._rest_client.get( endpoint='/ticker', params={'book': self.name} )
Return the latest ticker information. :return: Latest ticker information. :rtype: dict
def join_ext(name, extension): if extension[0] == EXT: ret = name + extension else: ret = name + EXT + extension return ret
Joins a given name with an extension. If the extension doesn't have a '.' it will add it for you
def move(self, source, destination): if source.isfile(): source.copy(destination) source.remove() else: source.copy(destination, recursive=True) source.remove('r')
the semantic should be like unix 'mv' command
def latency(self): with self.lock: self.send('PING %s' % self.server) ctime = self._m_time.time() msg = self._recv(expected_replies=('PONG',)) if msg[0] == 'PONG': latency = self._m_time.time() - ctime return latency
Checks the connection latency.
def remove_collection(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None): if fourth_arg is None: collection_id, branch_name, author = first_arg, sec_arg, third_arg gh_user = branch_name.split('_collection_')[0] parent_sha = self.get_master_sha() else: gh_user, collection_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg if commit_msg is None: commit_msg = "Delete Collection '%s' via OpenTree API" % collection_id return self._remove_document(gh_user, collection_id, parent_sha, author, commit_msg)
Remove a collection Given a collection_id, branch and optionally an author, remove a collection on the given branch and attribute the commit to author. Returns the SHA of the commit on branch.
def print_pole_mean(mean_dictionary): print('Plon: ' + str(round(mean_dictionary['dec'], 1)) + ' Plat: ' + str(round(mean_dictionary['inc'], 1))) print('Number of directions in mean (n): ' + str(mean_dictionary['n'])) print('Angular radius of 95% confidence (A_95): ' + str(round(mean_dictionary['alpha95'], 1))) print('Precision parameter (k) estimate: ' + str(round(mean_dictionary['k'], 1)))
Does a pretty job printing a Fisher mean and associated statistics for mean paleomagnetic poles. Parameters ---------- mean_dictionary: output dictionary of pmag.fisher_mean Examples -------- Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely using ``ipmag.print_pole_mean`` >>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) >>> ipmag.print_pole_mean(my_mean) Plon: 136.3 Plat: 21.3 Number of directions in mean (n): 4 Angular radius of 95% confidence (A_95): 7.3 Precision parameter (k) estimate: 159.7
def backup(path, name=None): from PyHardLinkBackup.phlb.phlb_main import backup backup(path, name)
Start a Backup run
def search(self, id_key=None, **parameters): episode = parameters.get("episode") id_tvdb = parameters.get("id_tvdb") or id_key id_imdb = parameters.get("id_imdb") season = parameters.get("season") series = parameters.get("series") date = parameters.get("date") if id_tvdb: for result in self._search_id_tvdb(id_tvdb, season, episode): yield result elif id_imdb: for result in self._search_id_imdb(id_imdb, season, episode): yield result elif series and date: if not match( r"(19|20)\d{2}(-(?:0[1-9]|1[012])(-(?:[012][1-9]|3[01]))?)?", date, ): raise MapiProviderException("Date must be in YYYY-MM-DD format") for result in self._search_series_date(series, date): yield result elif series: for result in self._search_series(series, season, episode): yield result else: raise MapiNotFoundException
Searches TVDb for movie metadata TODO: Consider making parameters for episode ids
def _CaptureExpression(self, frame, expression): rc, value = _EvaluateExpression(frame, expression) if not rc: return {'name': expression, 'status': value} return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)
Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate).
def get_lang_dict(self): r = self.yandex_translate_request("getLangs") self.handle_errors(r) return r.json()["langs"]
gets supported langs as an dictionary
def _get_host_ref(service_instance, host, host_name=None): search_index = salt.utils.vmware.get_inventory(service_instance).searchIndex if host_name: host_ref = search_index.FindByDnsName(dnsName=host_name, vmSearch=False) else: host_ref = search_index.FindByDnsName(dnsName=host, vmSearch=False) if host_ref is None: host_ref = search_index.FindByIp(ip=host, vmSearch=False) return host_ref
Helper function that returns a host object either from the host location or the host_name. If host_name is provided, that is the host_object that will be returned. The function will first search for hosts by DNS Name. If no hosts are found, it will try searching by IP Address.
def background_estimator(bdata): crowded = False std = numpy.std(bdata) std0 = std mean = bdata.mean() while True: prep = len(bdata) numpy.clip(bdata, mean - 3 * std, mean + 3 * std, out=bdata) if prep == len(bdata): if std < 0.8 * std0: crowded = True break std = numpy.std(bdata) mean = bdata.mean() if crowded: median = numpy.median(bdata) mean = bdata.mean() std = bdata.std() return 2.5 * median - 1.5 * mean, std return bdata.mean(), bdata.std()
Estimate the background in a 2D array
def add_group_email_grant(self, permission, email_address, headers=None): acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers)
Convenience method that provides a quick way to add an email group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission.
def _get_pk(self): pk = None if self._lazy_collection['pks']: if len(self._lazy_collection['pks']) > 1: raise ValueError('Too much pks !') pk = list(self._lazy_collection['pks'])[0] return pk
Return None if we don't have any filter on a pk, the pk if we have one, or raise a ValueError if we have more than one. For internal use only.
def maybe_convert_to_index_date_type(index, date): if isinstance(date, str): return date if isinstance(index, pd.DatetimeIndex): if isinstance(date, np.datetime64): return date else: return np.datetime64(str(date)) else: date_type = index.date_type if isinstance(date, date_type): return date else: if isinstance(date, np.datetime64): date = date.item() if isinstance(date, datetime.date): date = datetime.datetime.combine( date, datetime.datetime.min.time()) return date_type(date.year, date.month, date.day, date.hour, date.minute, date.second, date.microsecond)
Convert a datetime-like object to the index's date type. Datetime indexing in xarray can be done using either a pandas DatetimeIndex or a CFTimeIndex. Both support partial-datetime string indexing regardless of the calendar type of the underlying data; therefore if a string is passed as a date, we return it unchanged. If a datetime-like object is provided, it will be converted to the underlying date type of the index. For a DatetimeIndex that is np.datetime64; for a CFTimeIndex that is an object of type cftime.datetime specific to the calendar used. Parameters ---------- index : pd.Index Input time index date : datetime-like object or str Input datetime Returns ------- date of the type appropriate for the time index of the Dataset
def powerline(): bindings_dir, scripts_dir = install_upgrade_powerline() set_up_powerline_fonts() set_up_powerline_daemon(scripts_dir) powerline_for_vim(bindings_dir) powerline_for_bash_or_powerline_shell(bindings_dir) powerline_for_tmux(bindings_dir) powerline_for_i3(bindings_dir) print('\nYou may have to reboot for make changes take effect')
Install and set up powerline for vim, bash, tmux, and i3. It uses pip (python2) and the most up to date powerline version (trunk) from the github repository. More infos: https://github.com/powerline/powerline https://powerline.readthedocs.io/en/latest/installation.html https://github.com/powerline/fonts https://youtu.be/_D6RkmgShvU http://www.tecmint.com/powerline-adds-powerful-statuslines-and-prompts-to-vim-and-bash/
def change_password(self, old_password, new_password): body = self._formdata({ "old_password": old_password, "password": new_password, }, ["old_password", "password"]) content = self._fetch("/current_user/password", method="POST", body=body) return FastlyUser(self, content)
Update the user's password to a new one.
async def message_throttled(self, message: types.Message, throttled: Throttled): handler = current_handler.get() dispatcher = Dispatcher.get_current() if handler: key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}") else: key = f"{self.prefix}_message" delta = throttled.rate - throttled.delta if throttled.exceeded_count <= 2: await message.reply('Too many requests! ') await asyncio.sleep(delta) thr = await dispatcher.check_key(key) if thr.exceeded_count == throttled.exceeded_count: await message.reply('Unlocked.')
Notify user only on first exceed and notify about unlocking only on last exceed :param message: :param throttled:
def _l_cv_weight_factor(self): b = 0.0047 * sqrt(0) + 0.0023 / 2 c = 0.02609 / (self.catchment.record_length - 1) return c / (b + c)
Return multiplier for L-CV weightings in case of enhanced single site analysis. Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b
def _move_cursor_to_line(self, line): last_line = self._text_edit.document().blockCount() - 1 self._cursor.clearSelection() self._cursor.movePosition(self._cursor.End) to_insert = '' for i in range(line - last_line): to_insert += '\n' if to_insert: self._cursor.insertText(to_insert) self._cursor.movePosition(self._cursor.Start) self._cursor.movePosition(self._cursor.Down, self._cursor.MoveAnchor, line) self._last_cursor_pos = self._cursor.position()
Moves the cursor to the specified line, if possible.
def add_menu(self, name): if self.menubar is None: raise ValueError("No menu bar configured") return self.menubar.add_name(name)
Add a menu with name `name` to the global menu bar. Returns a menu widget.
def _log(self, priority, message, *args, **kwargs): for arg in args: message = message + "\n" + self.pretty_printer.pformat(arg) self.logger.log(priority, message)
Generic log functions
def _ignore_sql(self, query): return any([ re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS'] ])
Check to see if we should ignore the sql query.
def _xxrange(self, start, end, step_count): _step = (end - start) / float(step_count) return (start + (i * _step) for i in xrange(int(step_count)))
Generate n values between start and end.
def display(self, tool): self._tools.append(tool) self._justDisplay(tool)
Displays the given tool above the current layer, and sets the title to its name.
def cancel_all(self, product_id=None): if product_id is not None: params = {'product_id': product_id} else: params = None return self._send_message('delete', '/orders', params=params)
With best effort, cancel all open orders. Args: product_id (Optional[str]): Only cancel orders for this product_id Returns: list: A list of ids of the canceled orders. Example:: [ "144c6f8e-713f-4682-8435-5280fbe8b2b4", "debe4907-95dc-442f-af3b-cec12f42ebda", "cf7aceee-7b08-4227-a76c-3858144323ab", "dfc5ae27-cadb-4c0c-beef-8994936fde8a", "34fecfbf-de33-4273-b2c6-baf8e8948be4" ]
def unregister_signals_oaiset(self): from .models import OAISet from .receivers import after_insert_oai_set, \ after_update_oai_set, after_delete_oai_set if contains(OAISet, 'after_insert', after_insert_oai_set): remove(OAISet, 'after_insert', after_insert_oai_set) remove(OAISet, 'after_update', after_update_oai_set) remove(OAISet, 'after_delete', after_delete_oai_set)
Unregister signals oaiset.
def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False): n_frames = len(ref_freqs) true_positives = np.zeros((n_frames, )) for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)): if chroma: matching = util.match_events( ref_frame, est_frame, window, distance=util._outer_distance_mod_n) else: matching = util.match_events(ref_frame, est_frame, window) true_positives[i] = len(matching) return true_positives
Compute the number of true positives in an estimate given a reference. A frequency is correct if it is within a quartertone of the correct frequency. Parameters ---------- ref_freqs : list of np.ndarray reference frequencies (MIDI) est_freqs : list of np.ndarray estimated frequencies (MIDI) window : float Window size, in semitones chroma : bool If True, computes distances modulo n. If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n. Returns ------- true_positives : np.ndarray Array the same length as ref_freqs containing the number of true positives.
def do_set_workdir(self, args): params = args.split() workdir = None try: workdir = params[0] except IndexError: _LOGGING.error('Device name required.') self.do_help('set_workdir') if workdir: self.tools.workdir = workdir
Set the working directory. The working directory is used to load and save known devices to improve startup times. During startup the application loads and saves a file `insteon_plm_device_info.dat`. This file is saved in the working directory. The working directory has no default value. If the working directory is not set, the `insteon_plm_device_info.dat` file is not loaded or saved. Usage: set_workdir workdir Arguments: workdir: Required - Working directory to load and save devie list
def sha1_hexdigest(self): if self._sha1_hexdigest is None: self._sha1_hexdigest = hashlib.sha1(self._pem_bytes).hexdigest() return self._sha1_hexdigest
A SHA-1 digest of the whole object for easy differentiation. .. versionadded:: 18.1.0
def check_run(check, env, rate, times, pause, delay, log_level, as_json, break_point): envs = get_configured_envs(check) if not envs: echo_failure('No active environments found for `{}`.'.format(check)) echo_info('See what is available to start via `ddev env ls {}`.'.format(check)) abort() if not env: if len(envs) > 1: echo_failure('Multiple active environments found for `{}`, please specify one.'.format(check)) echo_info('See what is active via `ddev env ls`.') abort() env = envs[0] if env not in envs: echo_failure('`{}` is not an active environment.'.format(env)) echo_info('See what is active via `ddev env ls`.') abort() environment = create_interface(check, env) environment.run_check( rate=rate, times=times, pause=pause, delay=delay, log_level=log_level, as_json=as_json, break_point=break_point ) echo_success('Note: ', nl=False) echo_info('If some metrics are missing, you may want to try again with the -r / --rate flag.')
Run an Agent check.
def assigned_state(instance): analyses = instance.getAnalyses() if not analyses: return "unassigned" for analysis in analyses: analysis_object = api.get_object(analysis) if not analysis_object.getWorksheet(): return "unassigned" return "assigned"
Returns `assigned` or `unassigned` depending on the state of the analyses the analysisrequest contains. Return `unassigned` if the Analysis Request has at least one analysis in `unassigned` state. Otherwise, returns `assigned`
async def fetch_state(self, request): error_traps = [ error_handlers.InvalidAddressTrap, error_handlers.StateNotFoundTrap] address = request.match_info.get('address', '') head = request.url.query.get('head', None) head, root = await self._head_to_root(head) response = await self._query_validator( Message.CLIENT_STATE_GET_REQUEST, client_state_pb2.ClientStateGetResponse, client_state_pb2.ClientStateGetRequest( state_root=root, address=address), error_traps) return self._wrap_response( request, data=response['value'], metadata=self._get_metadata(request, response, head=head))
Fetches data from a specific address in the validator's state tree. Request: query: - head: The id of the block to use as the head of the chain - address: The 70 character address of the data to be fetched Response: data: The base64 encoded binary data stored at that address head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block
def get_langids(dev): r from usb.control import get_descriptor buf = get_descriptor( dev, 254, DESC_TYPE_STRING, 0 ) if len(buf) < 4 or buf[0] < 4 or buf[0]&1 != 0: return () return tuple(map(lambda x,y: x+(y<<8), buf[2:buf[0]:2], buf[3:buf[0]:2]))
r"""Retrieve the list of supported Language IDs from the device. Most client code should not call this function directly, but instead use the langids property on the Device object, which will call this function as needed and cache the result. USB LANGIDs are 16-bit integers familiar to Windows developers, where for example instead of en-US you say 0x0409. See the file USB_LANGIDS.pdf somewhere on the usb.org site for a list, which does not claim to be complete. It requires "system software must allow the enumeration and selection of LANGIDs that are not currently on this list." It also requires "system software should never request a LANGID not defined in the LANGID code array (string index = 0) presented by a device." Client code can check this tuple before issuing string requests for a specific language ID. dev is the Device object whose supported language IDs will be retrieved. The return value is a tuple of integer LANGIDs, possibly empty if the device does not support strings at all (which USB 3.1 r1.0 section 9.6.9 allows). In that case client code should not request strings at all. A USBError may be raised from this function for some devices that have no string support, instead of returning an empty tuple. The accessor for the langids property on Device catches that case and supplies an empty tuple, so client code can ignore this detail by using the langids property instead of directly calling this function.
def create(self, name, indexes = {}, fields = {}, **kwargs): for k, v in six.iteritems(indexes): if isinstance(v, dict): v = json.dumps(v) kwargs['index.' + k] = v for k, v in six.iteritems(fields): kwargs['field.' + k] = v return self.post(name=name, **kwargs)
Creates a KV Store Collection. :param name: name of collection to create :type name: ``string`` :param indexes: dictionary of index definitions :type indexes: ``dict`` :param fields: dictionary of field definitions :type fields: ``dict`` :param kwargs: a dictionary of additional parameters specifying indexes and field definitions :type kwargs: ``dict`` :return: Result of POST request
def move(self, i, lat, lng, change_time=True): if i < 0 or i >= self.count(): print("Invalid fence point number %u" % i) self.points[i].lat = lat self.points[i].lng = lng if i == 1: self.points[self.count()-1].lat = lat self.points[self.count()-1].lng = lng if i == self.count() - 1: self.points[1].lat = lat self.points[1].lng = lng if change_time: self.last_change = time.time()
move a fence point
def idsKEGG(organism): ORG=urlopen("http://rest.kegg.jp/list/"+organism).read() ORG=ORG.split("\n") final=[] for k in ORG: final.append(k.split("\t")) df=pd.DataFrame(final[0:len(final)-1])[[0,1]] df.columns=['KEGGid','description'] field = pd.DataFrame(df['description'].str.split(';',1).tolist())[0] field = pd.DataFrame(field) df = pd.concat([df[['KEGGid']],field],axis=1) df.columns=['KEGGid','gene_name'] df=df[['gene_name','KEGGid']] return df
Uses KEGG to retrieve all ids for a given KEGG organism :param organism: an organism as listed in organismsKEGG() :returns: a Pandas dataframe of with 'gene_name' and 'KEGGid'.
def convert_magicc7_to_openscm_variables(variables, inverse=False): if isinstance(variables, (list, pd.Index)): return [ _apply_convert_magicc7_to_openscm_variables(v, inverse) for v in variables ] else: return _apply_convert_magicc7_to_openscm_variables(variables, inverse)
Convert MAGICC7 variables to OpenSCM variables Parameters ---------- variables : list_like, str Variables to convert inverse : bool If True, convert the other way i.e. convert OpenSCM variables to MAGICC7 variables Returns ------- ``type(variables)`` Set of converted variables
async def on_open(self): self.__ensure_barrier() while self.connected: try: if self.__lastping > self.__lastpong: raise IOError("Last ping remained unanswered") self.send_message("2") self.send_ack() self.__lastping = time.time() await asyncio.sleep(self.ping_interval) except Exception as ex: LOGGER.exception("Failed to ping") try: self.reraise(ex) except Exception: LOGGER.exception( "failed to force close connection after ping error" ) break
DingDongmaster the connection is open
def parse_fade_requirement(text): text = text.strip() if "::" in text: repo_raw, requirement = text.split("::", 1) try: repo = {'pypi': REPO_PYPI, 'vcs': REPO_VCS}[repo_raw] except KeyError: logger.warning("Not understood fades repository: %r", repo_raw) return else: if ":" in text and "/" in text: repo = REPO_VCS else: repo = REPO_PYPI requirement = text if repo == REPO_VCS: dependency = VCSDependency(requirement) else: dependency = list(parse_requirements(requirement))[0] return repo, dependency
Return a requirement and repo from the given text, already parsed and converted.
def launch(self, image, command, **kwargs): if isinstance(command, PythonCall): return PythonJob(self, image, command, **kwargs) else: return Job(self, image, command, **kwargs)
Create a job on this engine Args: image (str): name of the docker image to launch command (str): shell command to run
def name(self, pretty=False): name = self.os_release_attr('name') \ or self.lsb_release_attr('distributor_id') \ or self.distro_release_attr('name') \ or self.uname_attr('name') if pretty: name = self.os_release_attr('pretty_name') \ or self.lsb_release_attr('description') if not name: name = self.distro_release_attr('name') \ or self.uname_attr('name') version = self.version(pretty=True) if version: name = name + ' ' + version return name or ''
Return the name of the OS distribution, as a string. For details, see :func:`distro.name`.
def dataframe(self): if self._dataframe is None: try: import pandas as pd except ImportError: raise RuntimeError('To enable dataframe support, ' 'run \'pip install datadotworld[pandas]\'') self._dataframe = pd.DataFrame.from_records(self._iter_rows(), coerce_float=True) return self._dataframe
Build and cache a dataframe from query results
def _zadd(self, key, pk, ts=None, ttl=None): return self.r.eval(self.LUA_ZADD, 1, key, ts or self._time(), pk)
Redis lua func to add an event to the corresponding sorted set. :param key: the key to be stored in redis server :param pk: the primary key of event :param ts: timestamp of the event, default to redis_server's current timestamp :param ttl: the expiration time of event since the last update
def runtime_paths(self): runtimepath = self._vim.options['runtimepath'] plugin = "ensime-vim" paths = [] for path in runtimepath.split(','): if plugin in path: paths.append(os.path.expanduser(path)) return paths
All the runtime paths of ensime-vim plugin files.
def to_glyphs_font_attributes(self, source, master, is_initial): if is_initial: _set_glyphs_font_attributes(self, source) else: _compare_and_merge_glyphs_font_attributes(self, source)
Copy font attributes from `ufo` either to `self.font` or to `master`. Arguments: self -- The UFOBuilder ufo -- The current UFO being read master -- The current master being written is_initial -- True iff this the first UFO that we process
def add(self, path): if not path.startswith(os.sep): raise ValueError("Non-absolute path '{}'".format(path)) path = path.rstrip(os.sep) while True: self._paths[path] = None path, _ = os.path.split(path) if path == os.sep: break
Add a path to the overlay filesytem. Any filesystem operation involving the this path or any sub-paths of it will be transparently redirected to temporary root dir. @path: An absolute path string.
def com_google_fonts_check_metadata_license(family_metadata): licenses = ["APACHE2", "OFL", "UFL"] if family_metadata.license in licenses: yield PASS, ("Font license is declared" " in METADATA.pb as \"{}\"").format(family_metadata.license) else: yield FAIL, ("METADATA.pb license field (\"{}\")" " must be one of the following:" " {}").format(family_metadata.license, licenses)
METADATA.pb license is "APACHE2", "UFL" or "OFL"?
def print_diff(self, ignore=[]): ignore.append(inspect.currentframe()) diff = self.get_diff(ignore) print("Added objects:") summary.print_(summary.summarize(diff['+'])) print("Removed objects:") summary.print_(summary.summarize(diff['-'])) del ignore[:]
Print the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore
def connection_made(self): LOG.info( 'Connection to peer: %s established', self._neigh_conf.ip_address, extra={ 'resource_name': self._neigh_conf.name, 'resource_id': self._neigh_conf.id } )
Protocols connection established handler
def get_completed_tasks(self): self.owner.sync() tasks = [] offset = 0 while True: response = API.get_all_completed_tasks(self.owner.api_token, limit=_PAGE_LIMIT, offset=offset, project_id=self.id) _fail_if_contains_errors(response) response_json = response.json() tasks_json = response_json['items'] if len(tasks_json) == 0: break for task_json in tasks_json: project = self.owner.projects[task_json['project_id']] tasks.append(Task(task_json, project)) offset += _PAGE_LIMIT return tasks
Return a list of all completed tasks in this project. :return: A list of all completed tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> task = project.add_task('Install PyTodoist') >>> task.complete() >>> completed_tasks = project.get_completed_tasks() >>> for task in completed_tasks: ... task.uncomplete()
def get_extra_element_count(curr_count, opt_count, extra_allowed_cnt): if curr_count > opt_count: if extra_allowed_cnt > 0: extra_allowed_cnt -= 1 extra_cnt = curr_count - opt_count - 1 else: extra_cnt = curr_count - opt_count else: extra_cnt = 0 return extra_cnt, extra_allowed_cnt
Evaluate and return extra same element count based on given values. :key-term: group: In here group can be any base where elements are place i.e. replication-group while placing replicas (elements) or brokers while placing partitions (elements). element: Generic term for units which are optimally placed over group. :params: curr_count: Given count opt_count: Optimal count for each group. extra_allowed_cnt: Count of groups which can have 1 extra element _ on each group.
def children_rest_names(self): names = [] for fetcher in self.fetchers: names.append(fetcher.__class__.managed_object_rest_name()) return names
Gets the list of all possible children ReST names. Returns: list: list containing all possible rest names as string Example: >>> entity = NUEntity() >>> entity.children_rest_names ["foo", "bar"]
def addCollector(self, collector): collector.setSchema(self) self.__collectors[collector.name()] = collector
Adds the inputted collector reference to this table schema. :param collector | <orb.Collector>
def resize_hess(self, func): if func is None: return None @wraps(func) def resized(*args, **kwargs): out = func(*args, **kwargs) out = np.atleast_2d(np.squeeze(out)) mask = [p not in self._fixed_params for p in self.parameters] return np.atleast_2d(out[mask, mask]) return resized
Removes values with identical indices to fixed parameters from the output of func. func has to return the Hessian of a scalar function. :param func: Hessian function to be wrapped. Is assumed to be the Hessian of a scalar function. :return: Hessian corresponding to free parameters only.
def build_output_partitions(cls, name='inputTablePartitions', output_name='output'): obj = cls(name) obj.exporter = 'get_output_table_partition' obj.output_name = output_name return obj
Build an output table partition parameter :param name: parameter name :type name: str :param output_name: bind input port name :type output_name: str :return: output description :rtype: ParamDef
def determine_encoding(path, default=None): byte_order_marks = ( ('utf-8-sig', (codecs.BOM_UTF8, )), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) try: with open(path, 'rb') as infile: raw = infile.read(4) except IOError: return default for encoding, boms in byte_order_marks: if any(raw.startswith(bom) for bom in boms): return encoding return default
Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file.
def flush(self, fsync=False): if self._handle is not None: self._handle.flush() if fsync: try: os.fsync(self._handle.fileno()) except OSError: pass
Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere.
def register_classes(yaml: ruamel.yaml.YAML, classes: Optional[Iterable[Any]] = None) -> ruamel.yaml.YAML: if classes is None: classes = [] for cls in classes: logger.debug(f"Registering class {cls} with YAML") yaml.register_class(cls) return yaml
Register externally defined classes.
def convertDatetime(t): epoch = datetime.datetime.utcfromtimestamp(0) delta = t - epoch millis = delta.total_seconds() * 1000 return int(millis)
Converts the specified datetime object into its appropriate protocol value. This is the number of milliseconds from the epoch.
def ResolveMulti(self, subject, attributes, timestamp=None, limit=None): for attribute in attributes: query, args = self._BuildQuery(subject, attribute, timestamp, limit) result, _ = self.ExecuteQuery(query, args) for row in result: value = self._Decode(attribute, row["value"]) yield (attribute, value, row["timestamp"]) if limit: limit -= len(result) if limit is not None and limit <= 0: break
Resolves multiple attributes at once for one subject.
def xmeans(cls, initial_centers=None, kmax=20, tolerance=0.025, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=False): model = xmeans(None, initial_centers, kmax, tolerance, criterion, ccore) return cls(model)
Constructor of the x-means clustering.rst algorithm :param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...] Note: The dimensions of the initial centers should be same as of the dataset. :param kmax: Maximum number of clusters that can be allocated. :param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing :param criterion: Type of splitting creation. :param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not. :return: returns the clustering.rst object
def _validated(self, data): for sub in self.schemas: data = sub(data) return data
Validate data if all subschemas validate it.
def aggregate(self): for report in self.reportset: printtime('Processing {}'.format(report.split('.')[0]), self.start) header = '' if report != 'mlst.csv' else 'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\n' data = '' with open(os.path.join(self.reportpath, report), 'w') as aggregate: for sample in self.runmetadata.samples: try: with open(os.path.join(sample.general.reportpath, report), 'r') as runreport: if not header: header = runreport.readline() else: for row in runreport: if not row.endswith('\n'): row += '\n' if row.split(',')[0] != header.split(',')[0]: data += row except IOError: pass aggregate.write(header) aggregate.write(data)
Aggregate all reports of the same type into a master report
def print_locale_info (out=stderr): for key in ("LANGUAGE", "LC_ALL", "LC_CTYPE", "LANG"): print_env_info(key, out=out) print(_("Default locale:"), i18n.get_locale(), file=out)
Print locale info.
def update(table, values, where=(), **kwargs): where = dict(where, **kwargs).items() sql, args = makeSQL("UPDATE", table, values=values, where=where) return execute(sql, args).rowcount
Convenience wrapper for database UPDATE.
def get_default_ssl_version(): if hasattr(ssl, 'PROTOCOL_TLSv1_2'): return ssl.PROTOCOL_TLSv1_2 elif hasattr(ssl, 'PROTOCOL_TLSv1_1'): return ssl.PROTOCOL_TLSv1_1 elif hasattr(ssl, 'PROTOCOL_TLSv1'): return ssl.PROTOCOL_TLSv1 return None
Get the highest support TLS version, if none is available, return None. :rtype: bool|None
def weight_by_edge_odds_ratios(self, edges_expected_weight, flag_as_significant): for edge_id, expected_weight in edges_expected_weight: edge_obj = self.edges[edge_id] edge_obj.weight /= expected_weight if edge_id in flag_as_significant: edge_obj.significant = True else: edge_obj.significant = False
Applied during the permutation test. Update the edges in the network to be weighted by their odds ratios. The odds ratio measures how unexpected the observed edge weight is based on the expected weight. Parameters ----------- edges_expected_weight : list(tup(int, int), float) A tuple list of (edge id, edge expected weight) generated from the permutation test step. flag_as_significant : [set|list](tup(int, int)) A set or list of edge ids that are considered significant against the null model of random associations generated in the permutation test
def scrub(zpool, stop=False, pause=False): if stop: action = ['-s'] elif pause: action = ['-p'] else: action = None res = __salt__['cmd.run_all']( __utils__['zfs.zpool_command']( command='scrub', flags=action, target=zpool, ), python_shell=False, ) if res['retcode'] != 0: return __utils__['zfs.parse_command_result'](res, 'scrubbing') ret = OrderedDict() if stop or pause: ret['scrubbing'] = False else: ret['scrubbing'] = True return ret
Scrub a storage pool zpool : string Name of storage pool stop : boolean If ``True``, cancel ongoing scrub pause : boolean If ``True``, pause ongoing scrub .. versionadded:: 2018.3.0 .. note:: Pause is only available on recent versions of ZFS. If both ``pause`` and ``stop`` are ``True``, then ``stop`` will win. CLI Example: .. code-block:: bash salt '*' zpool.scrub myzpool
def concretize(self, **kwargs): lengths = [self.state.solver.eval(x[1], **kwargs) for x in self.content] kwargs['cast_to'] = bytes return [b'' if i == 0 else self.state.solver.eval(x[0][i*self.state.arch.byte_width-1:], **kwargs) for i, x in zip(lengths, self.content)]
Returns a list of the packets read or written as bytestrings.
def get_one(self, key): query = build_db_query(self.primary_key, key) collection = self.ds.connection(self.collection_name) document = collection.find_one(query) if document is None: raise LookupError('{0} with key {1} was not found'.format(self.model_klass.__name__, query)) return self.model_klass.from_json(document)
method finds single record base on the given primary key and returns it to the caller
def dictionary(_object, *args): error_msg = 'not of type dictionary' if is_callable(_object): _validator = _object @wraps(_validator) def decorated(value): ensure(isinstance(value, dict), error_msg) return _validator(value) return decorated try: ensure(isinstance(_object, dict), error_msg) except AssertionError: if args: msg = 'did not pass validation against callable: dictionary' raise Invalid('', msg=msg, reason=error_msg, *args) raise
Validates a given input is of type dictionary. Example usage:: data = {'a' : {'b': 1}} schema = ('a', dictionary) You can also use this as a decorator, as a way to check for the input before it even hits a validator you may be writing. .. note:: If the argument is a callable, the decorating behavior will be triggered, otherwise it will act as a normal function.
def qtiling(fseries, qrange, frange, mismatch=0.2): qplane_tile_dict = {} qs = list(_iter_qs(qrange, deltam_f(mismatch))) for q in qs: qtilefreq = _iter_frequencies(q, frange, mismatch, fseries.duration) qplane_tile_dict[q] = numpy.array(list(qtilefreq)) return qplane_tile_dict
Iterable constructor of QTile tuples Parameters ---------- fseries: 'pycbc FrequencySeries' frequency-series data set qrange: upper and lower bounds of q range frange: upper and lower bounds of frequency range mismatch: percentage of desired fractional mismatch Returns ------- qplane_tile_dict: 'dict' dictionary containing Q-tile tuples for a set of Q-planes
def _construct_target(self, function): target = { 'Arn': function.get_runtime_attr("arn"), 'Id': self.logical_id + 'LambdaTarget' } if self.Input is not None: target['Input'] = self.Input if self.InputPath is not None: target['InputPath'] = self.InputPath return target
Constructs the Target property for the CloudWatch Events Rule. :returns: the Target property :rtype: dict
def array_keys(self): for key in sorted(listdir(self._store, self._path)): path = self._key_prefix + key if contains_array(self._store, path): yield key
Return an iterator over member names for arrays only. Examples -------- >>> import zarr >>> g1 = zarr.group() >>> g2 = g1.create_group('foo') >>> g3 = g1.create_group('bar') >>> d1 = g1.create_dataset('baz', shape=100, chunks=10) >>> d2 = g1.create_dataset('quux', shape=200, chunks=20) >>> sorted(g1.array_keys()) ['baz', 'quux']
def _clean_dic(self, dic): aux_dic = dic.copy() for key, value in iter(dic.items()): if value is None or value == '': del aux_dic[key] elif type(value) is dict: cleaned_dict = self._clean_dic(value) if not cleaned_dict: del aux_dic[key] continue aux_dic[key] = cleaned_dict return aux_dic
Clean recursively all empty or None values inside a dict.
def setup_completion(shell): import glob try: import readline except ImportError: import pyreadline as readline def _complete(text, state): buf = readline.get_line_buffer() if buf.startswith('help ') or " " not in buf: return [x for x in shell.valid_identifiers() if x.startswith(text)][state] return (glob.glob(os.path.expanduser(text)+'*')+[None])[state] readline.set_completer_delims(' \t\n;') if readline.__doc__ is not None and 'libedit' in readline.__doc__: readline.parse_and_bind("bind ^I rl_complete") else: readline.parse_and_bind("tab: complete") readline.set_completer(_complete)
Setup readline to tab complete in a cross platform way.
def sinter(self, *other_sets): return self.db.sinter([self.key] + [s.key for s in other_sets])
Performs an intersection between Sets. Returns a set of common members. Uses Redis.sinter.
def decode(self, frame: Frame, *, max_size: Optional[int] = None) -> Frame: if frame.opcode in CTRL_OPCODES: return frame if frame.opcode == OP_CONT: if not self.decode_cont_data: return frame if frame.fin: self.decode_cont_data = False else: if not frame.rsv1: return frame if not frame.fin: self.decode_cont_data = True if self.remote_no_context_takeover: self.decoder = zlib.decompressobj(wbits=-self.remote_max_window_bits) data = frame.data if frame.fin: data += _EMPTY_UNCOMPRESSED_BLOCK max_length = 0 if max_size is None else max_size data = self.decoder.decompress(data, max_length) if self.decoder.unconsumed_tail: raise PayloadTooBig( f"Uncompressed payload length exceeds size limit (? > {max_size} bytes)" ) if frame.fin and self.remote_no_context_takeover: del self.decoder return frame._replace(data=data, rsv1=False)
Decode an incoming frame.
def abstract(cls, predstr): lemma, pos, sense, _ = split_pred_string(predstr) return cls(Pred.ABSTRACT, lemma, pos, sense, predstr)
Instantiate a Pred from its symbol string.