code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_cmd_handler(self, cmd): cmd = cmd.replace('-', '_') handler = getattr(self, cmd, None) if not handler: raise BuildException( 'Command {} is not supported as a ' 'build command'.format(cmd) ) return handler
Return an handler for cmd. The handler and the command should have the same name. See class description for more info about handlers. Args: cmd (str): The name of the command Returns: callable: which handles cmd Raises: lago.build.BuildException: If an handler for cmd doesn't exist
def _handle_actionconstantpool(self, _): obj = _make_object("ActionConstantPool") obj.Count = count = unpack_ui16(self._src) obj.ConstantPool = pool = [] for _ in range(count): pool.append(self._get_struct_string()) yield obj
Handle the ActionConstantPool action.
def tagify(suffix='', prefix='', base=SALT): parts = [base, TAGS.get(prefix, prefix)] if hasattr(suffix, 'append'): parts.extend(suffix) else: parts.append(suffix) for index, _ in enumerate(parts): try: parts[index] = salt.utils.stringutils.to_str(parts[index]) except TypeError: parts[index] = str(parts[index]) return TAGPARTER.join([part for part in parts if part])
convenience function to build a namespaced event tag string from joining with the TABPART character the base, prefix and suffix If string prefix is a valid key in TAGS Then use the value of key prefix Else use prefix string If suffix is a list Then join all string elements of suffix individually Else use string suffix
def clone(self): clone = InferenceContext(self.path, inferred=self.inferred) clone.callcontext = self.callcontext clone.boundnode = self.boundnode clone.extra_context = self.extra_context return clone
Clone inference path For example, each side of a binary operation (BinOp) starts with the same context but diverge as each side is inferred so the InferenceContext will need be cloned
def walk(self, work, predicate=None): if not callable(work): raise ValueError('work must be callable but was {}'.format(work)) if predicate and not callable(predicate): raise ValueError('predicate must be callable but was {}'.format(predicate)) self._build_graph.walk_transitive_dependency_graph([self.address], work, predicate)
Walk of this target's dependency graph, DFS preorder traversal, visiting each node exactly once. If a predicate is supplied it will be used to test each target before handing the target to work and descending. Work can return targets in which case these will be added to the walk candidate set if not already walked. :API: public :param work: Callable that takes a :py:class:`pants.build_graph.target.Target` as its single argument. :param predicate: Callable that takes a :py:class:`pants.build_graph.target.Target` as its single argument and returns True if the target should passed to ``work``.
def _findSamesetProteins(protToPeps, proteins=None): proteins = viewkeys(protToPeps) if proteins is None else proteins equalEvidence = ddict(set) for protein in proteins: peptides = protToPeps[protein] equalEvidence[tuple(sorted(peptides))].add(protein) equalProteins = list() for proteins in viewvalues(equalEvidence): if len(proteins) > 1: equalProteins.append(tuple(sorted(proteins))) return equalProteins
Find proteins that are mapped to an identical set of peptides. :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for having equal evidence. If not specified all proteins are tested :returns: a list of sorted protein tuples that share equal peptide evidence
def check_authorization(self, response): if not hasattr(request, '_authorized'): raise Unauthorized elif not request._authorized: raise Unauthorized return response
checks that an authorization call has been made during the request
def delete_subscription(self): url = self._build_url('subscription', base_url=self._api) return self._boolean(self._delete(url), 204, 404)
Delete subscription for this thread. :returns: bool
def add_empty_magic_table(self, dtype, col_names=None, groups=None): if dtype not in self.table_names: print("-W- {} is not a valid MagIC table name".format(dtype)) print("-I- Valid table names are: {}".format(", ".join(self.table_names))) return data_container = MagicDataFrame(dtype=dtype, columns=col_names, groups=groups) self.tables[dtype] = data_container
Add a blank MagicDataFrame to the contribution. You can provide either a list of column names, or a list of column group names. If provided, col_names takes precedence.
def get_children(self, path, watch=None): _log.debug( "ZK: Getting children of {path}".format(path=path), ) return self.zk.get_children(path, watch)
Returns the children of the specified node.
def get_name(self): return getattr(self, 'dependent_host_name', '') + '/'\ + getattr(self, 'dependent_service_description', '') \ + '..' + getattr(self, 'host_name', '') + '/' \ + getattr(self, 'service_description', '')
Get name based on 4 class attributes Each attribute is substituted by '' if attribute does not exist :return: dependent_host_name/dependent_service_description..host_name/service_description :rtype: str TODO: Clean this function (use format for string)
def tag(self, tags): tags = tags.lower() previous = self.tagged tagset = previous.copy() for tag in tags.replace(',', ' ').split(): if tag.startswith('-'): tagset.discard(tag[1:]) elif tag.startswith('+'): tagset.add(tag[1:]) else: tagset.add(tag) tagset.discard('') if tagset != previous: tagset = ' '.join(sorted(tagset)) self._make_it_so("setting tags %r on" % (tagset,), ["custom.set"], "tags", tagset) self._fields["custom_tags"] = tagset
Add or remove tags.
def _setup(self): settings_module = os.environ.get(ENVIRONMENT_SETTINGS_VARIABLE, 'settings') if not settings_module: raise ImproperlyConfigured( 'Requested settings module points to an empty variable. ' 'You must either define the environment variable {0} ' 'or call settings.configure() before accessing the settings.' .format(ENVIRONMENT_SETTINGS_VARIABLE)) self._wrapped = Settings(settings_module, default_settings=global_settings)
Load the settings module pointed to by the environment variable. This is used the first time we need any settings at all, if the user has not previously configured the settings manually.
def get_schedules(profile='pagerduty', subdomain=None, api_key=None): return _list_items( 'schedules', 'id', profile=profile, subdomain=subdomain, api_key=api_key, )
List schedules belonging to this account CLI Example: salt myminion pagerduty.get_schedules
def _raise_document_too_large(operation, doc_size, max_size): if operation == "insert": raise DocumentTooLarge("BSON document too large (%d bytes)" " - the connected server supports" " BSON document sizes up to %d" " bytes." % (doc_size, max_size)) else: raise DocumentTooLarge("%r command document too large" % (operation,))
Internal helper for raising DocumentTooLarge.
def connect(self): if self.state == DISCONNECTED: raise errors.NSQException('connection already closed') if self.is_connected: return stream = Stream(self.address, self.port, self.timeout) stream.connect() self.stream = stream self.state = CONNECTED self.send(nsq.MAGIC_V2)
Initialize connection to the nsqd.
def load_img(path, grayscale=False, target_size=None): img = io.imread(path, grayscale) if target_size: img = transform.resize(img, target_size, preserve_range=True).astype('uint8') return img
Utility function to load an image from disk. Args: path: The image file path. grayscale: True to convert to grayscale image (Default value = False) target_size: (w, h) to resize. (Default value = None) Returns: The loaded numpy image.
def flush_all(self): for queue_name in chain(self.queues, self.delay_queues): self.flush(queue_name)
Drop all messages from all declared queues.
def n_tasks(dec_num): bitstring = "" try: bitstring = dec_num[2:] except: bitstring = bin(int(dec_num))[2:] return bitstring.count("1")
Takes a decimal number as input and returns the number of ones in the binary representation. This translates to the number of tasks being done by an organism with a phenotype represented as a decimal number.
def get_ini_config(config=os.path.join(os.path.expanduser('~'), '.zdeskcfg'), default_section=None, section=None): plac_ini.call(__placeholder__, config=config, default_section=default_section) return __placeholder__.getconfig(section)
This is a convenience function for getting the zdesk configuration from an ini file without the need to decorate and call your own function. Handy when using zdesk and zdeskcfg from the interactive prompt.
def Set(self, key, value): if not value == None: self.prefs[key] = value else: self.prefs.pop(key) self.Dump()
Sets the key-value pair and dumps to the preferences file.
def get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False): return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only)
This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API. Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.
def add_user(self, attrs): username = attrs[self.key] if username in self.users: raise UserAlreadyExists(username, self.backend_name) self.users[username] = attrs self.users[username]['groups'] = set([])
Add a user to the backend :param attrs: attributes of the user :type attrs: dict ({<attr>: <value>}) .. warning:: raise UserAlreadyExists if user already exists
def factory(cls, config, db): if not hasattr(db, 'register_script'): LOG.debug("Redis client does not support register_script()") return GetBucketKeyByLock(config, db) info = db.info() if version_greater('2.6', info['redis_version']): LOG.debug("Redis server supports register_script()") return GetBucketKeyByScript(config, db) LOG.debug("Redis server does not support register_script()") return GetBucketKeyByLock(config, db)
Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis.
def get_broker_id(data_path): META_FILE_PATH = "{data_path}/meta.properties" if not data_path: raise ValueError("You need to specify the data_path if broker_id == -1") meta_properties_path = META_FILE_PATH.format(data_path=data_path) return _read_generated_broker_id(meta_properties_path)
This function will look into the data folder to get the automatically created broker_id. :param string data_path: the path to the kafka data folder :returns int: the real broker_id
def init_db(self, app, entry_point_group='invenio_db.models', **kwargs): app.config.setdefault( 'SQLALCHEMY_DATABASE_URI', 'sqlite:///' + os.path.join(app.instance_path, app.name + '.db') ) app.config.setdefault('SQLALCHEMY_ECHO', False) database = kwargs.get('db', db) database.init_app(app) self.init_versioning(app, database, kwargs.get('versioning_manager')) if entry_point_group: for base_entry in pkg_resources.iter_entry_points( entry_point_group): base_entry.load() sa.orm.configure_mappers() if app.config['DB_VERSIONING']: manager = self.versioning_manager if manager.pending_classes: if not versioning_models_registered(manager, database.Model): manager.builder.configure_versioned_classes() elif 'transaction' not in database.metadata.tables: manager.declarative_base = database.Model manager.create_transaction_model() manager.plugins.after_build_tx_class(manager)
Initialize Flask-SQLAlchemy extension.
def set_playback(self, playback): req_url = ENDPOINTS["setPlayback"].format(self._ip_address) params = {"playback": playback} return request(req_url, params=params)
Send Playback command.
def for_target(self, target): if not isinstance(target, JarLibrary): return None found_target = target.managed_dependencies if not found_target: return self.default_artifact_set return self._artifact_set_map[found_target.id]
Computes and returns the PinnedJarArtifactSet that should be used to manage the given target. This returns None if the target is not a JarLibrary. :param Target target: The jar_library for which to find the managed_jar_dependencies object. :return: The the artifact set of the managed_jar_dependencies object for the target, or the default artifact set from --default-target. :rtype: PinnedJarArtifactSet
def cancelAllPendingResults( self ): for k in self._results.keys(): rs = self._results[k] self._results[k] = [ j for j in rs if isinstance(j, dict) ] self._pending = dict()
Cancel all pending results. Note that this only affects the notebook's record, not any job running in a lab.
def add_listener(self, callback, event_type=None): listener_id = uuid4() self.listeners.append( { 'uid': listener_id, 'callback': callback, 'event_type': event_type } ) return listener_id
Add a callback handler for events going to this room. Args: callback (func(room, event)): Callback called when an event arrives. event_type (str): The event_type to filter for. Returns: uuid.UUID: Unique id of the listener, can be used to identify the listener.
def update(self, **kwargs): if 'monitor' in kwargs: value = self._format_monitor_parameter(kwargs['monitor']) kwargs['monitor'] = value elif 'monitor' in self.__dict__: value = self._format_monitor_parameter(self.__dict__['monitor']) self.__dict__['monitor'] = value return super(Pool, self)._update(**kwargs)
Custom update method to implement monitor parameter formatting.
def remove_connection(self, connection): if not self._closing: self.connections.remove(connection) logger.debug("removed connection")
Called by the connections themselves when they have been closed.
def forget(empowered, powerupClass, interface): className = fullyQualifiedName(powerupClass) withThisName = _StoredByName.className == className items = empowered.store.query(_StoredByName, withThisName) if items.count() == 0: template = "No named powerups for {} (interface: {})".format raise ValueError(template(powerupClass, interface)) for stored in items: empowered.powerDown(stored, interface) stored.deleteFromStore()
Forgets powerups previously stored with ``remember``. :param empowered: The Empowered (Store or Item) to be powered down. :type empowered: ``axiom.item.Empowered`` :param powerupClass: The class for which powerups will be forgotten. :type powerupClass: class :param interface: The interface the powerups were installed for. :type interface: ``zope.interface.Interface`` :returns: ``None`` :raises ValueError: Class wasn't previously remembered.
def getConfigFile(): fileName = '.wakatime.cfg' home = os.environ.get('WAKATIME_HOME') if home: return os.path.join(os.path.expanduser(home), fileName) return os.path.join(os.path.expanduser('~'), fileName)
Returns the config file location. If $WAKATIME_HOME env varialbe is defined, returns $WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg.
def stream(self, sha): hexsha, typename, size, stream = self._git.stream_object_data(bin_to_hex(sha)) return OStream(hex_to_bin(hexsha), typename, size, stream)
For now, all lookup is done by git itself
def accept_lit(char, buf, pos): if pos >= len(buf) or buf[pos] != char: return None, pos return char, pos+1
Accept a literal character at the current buffer position.
def handle_json_GET_boundboxstops(self, params): schedule = self.server.schedule n = float(params.get('n')) e = float(params.get('e')) s = float(params.get('s')) w = float(params.get('w')) limit = int(params.get('limit')) stops = schedule.GetStopsInBoundingBox(north=n, east=e, south=s, west=w, n=limit) return [StopToTuple(s) for s in stops]
Return a list of up to 'limit' stops within bounding box with 'n','e' and 's','w' in the NE and SW corners. Does not handle boxes crossing longitude line 180.
def camel_to_snake(camel_str): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_str) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Convert `camel_str` from camelCase to snake_case
def publish_topology_description_changed(self, previous_description, new_description, topology_id): event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) for subscriber in self.__topology_listeners: try: subscriber.description_changed(event) except Exception: _handle_exception()
Publish a TopologyDescriptionChangedEvent to all topology listeners. :Parameters: - `previous_description`: The previous topology description. - `new_description`: The new topology description. - `topology_id`: A unique identifier for the topology this server is a part of.
def _get_siblings(self, pos): parent = self.parent_position(pos) siblings = [pos] if parent is not None: siblings = self._list_dir(parent) return siblings
lists the parent directory of pos
def list_authors(): authors = Author.query.all() content = '<p>Authors:</p>' for author in authors: content += '<p>%s</p>' % author.name return content
List all authors. e.g.: GET /authors
def _sample_oat(problem, N, num_levels=4): group_membership = np.asmatrix(np.identity(problem['num_vars'], dtype=int)) num_params = group_membership.shape[0] sample = np.zeros((N * (num_params + 1), num_params)) sample = np.array([generate_trajectory(group_membership, num_levels) for n in range(N)]) return sample.reshape((N * (num_params + 1), num_params))
Generate trajectories without groups Arguments --------- problem : dict The problem definition N : int The number of samples to generate num_levels : int, default=4 The number of grid levels
def top(self, container, ps_args=None): u = self._url("/containers/{0}/top", container) params = {} if ps_args is not None: params['ps_args'] = ps_args return self._result(self._get(u, params=params), True)
Display the running processes of a container. Args: container (str): The container to inspect ps_args (str): An optional arguments passed to ps (e.g. ``aux``) Returns: (str): The output of the top Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def index_labels(labels, case_sensitive=False): label_to_index = {} index_to_label = {} if not case_sensitive: labels = [str(s).lower() for s in labels] for index, s in enumerate(sorted(set(labels))): label_to_index[s] = index index_to_label[index] = s indices = [label_to_index[s] for s in labels] return indices, index_to_label
Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]``
def get_lang_array(self): r = self.yandex_translate_request("getLangs", "") self.handle_errors(r) return r.json()["dirs"]
gets supported langs as an array
def collect_variables(self, selections) -> None: super().collect_variables(selections) self.insert_variables(self.device2base, self.basespecs, selections)
Apply method |ChangeItem.collect_variables| of the base class |ChangeItem| and also apply method |ExchangeItem.insert_variables| of class |ExchangeItem| to collect the relevant base variables handled by the devices of the given |Selections| object. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, TestIO = prepare_full_example_2() >>> from hydpy import AddItem >>> item = AddItem( ... 'alpha', 'hland_v1', 'control.sfcf', 'control.rfcf', 0) >>> item.collect_variables(pub.selections) >>> land_dill = hp.elements.land_dill >>> control = land_dill.model.parameters.control >>> item.device2target[land_dill] is control.sfcf True >>> item.device2base[land_dill] is control.rfcf True >>> for device in sorted(item.device2base, key=lambda x: x.name): ... print(device) land_dill land_lahn_1 land_lahn_2 land_lahn_3
def _has_commit(version, debug=False): if _has_tag(version, debug) or _has_branch(version, debug): return False cmd = sh.git.bake('cat-file', '-e', version) try: util.run_command(cmd, debug=debug) return True except sh.ErrorReturnCode: return False
Determine a version is a local git commit sha or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool
def _machinectl(cmd, output_loglevel='debug', ignore_retcode=False, use_vt=False): prefix = 'machinectl --no-legend --no-pager' return __salt__['cmd.run_all']('{0} {1}'.format(prefix, cmd), output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt)
Helper function to run machinectl
def _init_job_from_response(self, response): job = None if response and 'jobReference' in response: job = _job.Job(job_id=response['jobReference']['jobId'], context=self._context) return job
Helper function to create a Job instance from a response.
def use_embedded_pkgs(embedded_lib_path=None): if embedded_lib_path is None: embedded_lib_path = get_embedded_lib_path() old_sys_path = list(sys.path) sys.path.insert( 1, embedded_lib_path ) try: yield finally: sys.path = old_sys_path
Temporarily prepend embedded packages to sys.path.
def knots_from_marginal(marginal, nr_knots, spline_order): cumsum = np.cumsum(marginal) cumsum = cumsum/cumsum.max() borders = np.linspace(0,1,nr_knots) knot_placement = [0] + np.unique([np.where(cumsum>=b)[0][0] for b in borders[1:-1]]).tolist() +[len(marginal)-1] knots = augknt(knot_placement, spline_order) return knots
Determines knot placement based on a marginal distribution. It places knots such that each knot covers the same amount of probability mass. Two of the knots are reserved for the borders which are treated seperatly. For example, a uniform distribution with 5 knots will cause the knots to be equally spaced with 25% of the probability mass between each two knots. Input: marginal: Array Estimate of the marginal distribution used to estimate knot placement. nr_knots: int Number of knots to be placed. spline_order: int Order of the splines Returns: knots: Array Sequence of knot positions
def get_itoken(self, env): if not self.itoken or self.itoken_expires < time() or \ env.get('HTTP_X_AUTH_NEW_TOKEN', 'false').lower() in \ TRUE_VALUES: self.itoken = '%sitk%s' % (self.reseller_prefix, uuid4().hex) memcache_key = '%s/auth/%s' % (self.reseller_prefix, self.itoken) self.itoken_expires = time() + self.token_life memcache_client = cache_from_env(env) if not memcache_client: raise Exception( 'No memcache set up; required for Swauth middleware') memcache_client.set( memcache_key, (self.itoken_expires, '.auth,.reseller_admin,%s.auth' % self.reseller_prefix), time=self.token_life) return self.itoken
Returns the current internal token to use for the auth system's own actions with other services. Each process will create its own itoken and the token will be deleted and recreated based on the token_life configuration value. The itoken information is stored in memcache because the auth process that is asked by Swift to validate the token may not be the same as the auth process that created the token.
def get_last_post_for_model(cr, uid, ids, model_pool): if type(ids) is not list: ids = [ids] res = {} for obj in model_pool.browse(cr, uid, ids): message_ids = obj.message_ids if message_ids: res[obj.id] = sorted( message_ids, key=lambda x: x.date, reverse=True)[0].date else: res[obj.id] = False return res
Given a set of ids and a model pool, return a dict of each object ids with their latest message date as a value. To be called in post-migration scripts :param cr: database cursor :param uid: user id, assumed to be openerp.SUPERUSER_ID :param ids: ids of the model in question to retrieve ids :param model_pool: orm model pool, assumed to be from pool.get() :return: a dict with ids as keys and with dates as values
def commit(self): num_mutations = len(self._rule_pb_list) if num_mutations == 0: return {} if num_mutations > MAX_MUTATIONS: raise ValueError( "%d total append mutations exceed the maximum " "allowable %d." % (num_mutations, MAX_MUTATIONS) ) data_client = self._table._instance._client.table_data_client row_response = data_client.read_modify_write_row( table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list ) self.clear() return _parse_rmw_row_response(row_response)
Makes a ``ReadModifyWriteRow`` API request. This commits modifications made by :meth:`append_cell_value` and :meth:`increment_cell_value`. If no modifications were made, makes no API request and just returns ``{}``. Modifies a row atomically, reading the latest existing timestamp / value from the specified columns and writing a new value by appending / incrementing. The new cell created uses either the current server time or the highest timestamp of a cell in that column (if it exceeds the server time). After committing the accumulated mutations, resets the local mutations. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_commit] :end-before: [END bigtable_row_commit] :rtype: dict :returns: The new contents of all modified cells. Returned as a dictionary of column families, each of which holds a dictionary of columns. Each column contains a list of cells modified. Each cell is represented with a two-tuple with the value (in bytes) and the timestamp for the cell. :raises: :class:`ValueError <exceptions.ValueError>` if the number of mutations exceeds the :data:`MAX_MUTATIONS`.
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha, use_tpu=False): return self._beam_decode_slow(features, decode_length, beam_size, top_beams, alpha, use_tpu)
Beam search decoding. Models should ideally implement a more efficient version of this function. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for longer translations. use_tpu: A bool, whether to do beam decode on TPU. Returns: samples: an integer `Tensor`. Top samples from the beam search
def whois_nameservers(self, nameservers): api_name = 'opendns-whois-nameservers' fmt_url_path = u'whois/nameservers/{0}' return self._multi_get(api_name, fmt_url_path, nameservers)
Calls WHOIS Nameserver end point Args: emails: An enumerable of nameservers Returns: A dict of {nameserver: domain_result}
def fit(self, x, y, dcoef='none'): self.x = x self.y = y if dcoef is not 'none': coef = dcoef else: coef = self.coef fcoef=optimize.leastsq(self.residual,coef,args=(y,self.func,x)) self.fcoef = fcoef[0].tolist() return fcoef[1]
performs the fit x, y : list Matching data arrays that define a numerical function y(x), this is the data to be fitted. dcoef : list or string You can provide a different guess for the coefficients, or provide the string 'none' to use the inital guess. The default is 'none'. Returns ------- ierr Values between 1 and 4 signal success. Notes ----- self.fcoef, contains the fitted coefficients.
def window(ible, length): if length <= 0: raise ValueError ible = iter(ible) while True: elts = [xx for ii, xx in zip(range(length), ible)] if elts: yield elts else: break
Split `ible` into multiple lists of length `length`. >>> list(window(range(5), 2)) [[0, 1], [2, 3], [4]]
def unique_field_data_types(self): data_type_names = set() for field in self.fields: if not is_void_type(field.data_type): if field.data_type.name in data_type_names: return False else: data_type_names.add(field.data_type.name) else: return True
Checks if all variants have different data types. If so, the selected variant can be determined just by the data type of the value without needing a field name / tag. In some languages, this lets us make a shortcut
def _normalise_key_values(filter_obj, attr_map=None): new_filter = {} for key, constraints in filter_obj.items(): aliased_key = key if attr_map is not None: aliased_key = attr_map.get(key) if aliased_key is None: raise CloudValueError( 'Invalid key %r for filter attribute; must be one of:\n%s' % ( key, attr_map.keys() ) ) if not isinstance(constraints, dict): constraints = {'eq': constraints} for operator, value in constraints.items(): canonical_operator = FILTER_OPERATOR_ALIASES.get(operator.lstrip('$')) if canonical_operator is None: raise CloudValueError( 'Invalid operator %r for filter key %s; must be one of:\n%s' % ( operator, key, FILTER_OPERATOR_ALIASES.keys() ) ) canonical_key = str('%s__%s' % (aliased_key, canonical_operator)) new_filter[canonical_key] = _normalise_value(value) return new_filter
Converts nested dictionary filters into django-style key value pairs Map filter operators and aliases to operator-land Additionally, perform replacements according to attribute map Automatically assumes __eq if not explicitly defined
def is_child_of_family(self, id_, family_id): if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=family_id) return self._hierarchy_session.is_child(id_=family_id, child_id=id_)
Tests if a family is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if the ``id`` is a child of ``family_id,`` ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``id`` or ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
def get_resource_cache(resourceid): if not resourceid: raise ResourceInitError("Resource id missing") if not DutInformationList._cache.get(resourceid): DutInformationList._cache[resourceid] = dict() return DutInformationList._cache[resourceid]
Get a cached dictionary related to an individual resourceid. :param resourceid: String resource id. :return: dict
def render_unregistered(error=None): return template( read_index_template(), registered=False, error=error, seeder_data=None, url_id=None, )
Render template file for the unregistered user. Args: error (str, default None): Optional error message. Returns: str: Template filled with data.
def unpack(self, data): size = self._get_calculated_size(self.size, data) self.set_value(data[0:size]) return data[len(self):]
Takes in a byte string and set's the field value based on field definition. :param structure: The message structure class object :param data: The byte string of the data to unpack :return: The remaining data for subsequent fields
def inverse_transform(self, maps): out = {} out[parameters.chirp_distance] = \ conversions.chirp_distance(maps[parameters.distance], maps[parameters.mchirp], ref_mass=self.ref_mass) return self.format_output(maps, out)
This function transforms from luminosity distance to chirp distance, given the chirp mass. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy as np >>> from pycbc import transforms >>> t = transforms.ChirpDistanceToDistance() >>> t.inverse_transform({'distance': np.array([40.]), 'mchirp': np.array([1.2])}) {'distance': array([ 40.]), 'chirp_distance': array([ 40.52073522]), 'mchirp': array([ 1.2])} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
def organization_fields(self, organization): return self._query_zendesk(self.endpoint.organization_fields, 'organization_field', id=organization)
Retrieve the organization fields for this organization. :param organization: Organization object or id
def send_command(self, command: str, *args, **kwargs): info = 'send command `%s` to bot. Args: %s | Kwargs: %s' self._messaging_logger.command.info(info, command, args, kwargs) command = command.encode('utf8') args = _json.dumps(args).encode('utf8') kwargs = _json.dumps(kwargs).encode('utf8') frame = (b'', command, args, kwargs) debug = ' send command run_control_loop: %s' self._messaging_logger.command.debug(debug, self._run_control_loop) if self._run_control_loop: self.add_callback(self.command_socket.send_multipart, frame) else: self.command_socket.send_multipart(frame)
For request bot to perform some action
def cmd_follow(self, args): if len(args) < 2: print("map follow 0|1") return follow = int(args[1]) self.map.set_follow(follow)
control following of vehicle
def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True, use_time_reversal=True, comment=None): return cls( kpts=[ngkpt], kpt_shifts=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak, comment=comment if comment else "Monkhorst-Pack scheme with user-specified shiftk")
Convenient static constructor for a Monkhorst-Pack mesh. Args: ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors. shiftk: Shift to be applied to the kpoints. use_symmetries: Use spatial symmetries to reduce the number of k-points. use_time_reversal: Use time-reversal symmetry to reduce the number of k-points. Returns: :class:`KSampling` object.
def get_proxy_ancestor_classes(klass): proxy_ancestor_classes = set() for superclass in klass.__bases__: if hasattr(superclass, '_meta') and superclass._meta.proxy: proxy_ancestor_classes.add(superclass) proxy_ancestor_classes.update( get_proxy_ancestor_classes(superclass)) return proxy_ancestor_classes
Return a set containing all the proxy model classes that are ancestors of the given class. NOTE: This implementation is for Django 1.7, it might need to work differently for other versions especially 1.8+.
def write_long(self, n, pack=Struct('>I').pack): if 0 <= n <= 0xFFFFFFFF: self._output_buffer.extend(pack(n)) else: raise ValueError('Long %d out of range 0..0xFFFFFFFF', n) return self
Write an integer as an unsigned 32-bit value.
def _convert(self, format): if self.format == format: return self else: image = Image(self.pil_image) image._format = format return image
Return a new Image instance with the given format. Returns self if the format is already the same.
def generate_openmp_enabled_py(packagename, srcdir='.', disable_openmp=None): if packagename.lower() == 'astropy': packagetitle = 'Astropy' else: packagetitle = packagename epoch = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) timestamp = datetime.datetime.utcfromtimestamp(epoch) if disable_openmp is not None: import builtins builtins._ASTROPY_DISABLE_SETUP_WITH_OPENMP_ = disable_openmp if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_: log.info("OpenMP support has been explicitly disabled.") openmp_support = False if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_ else is_openmp_supported() src = _IS_OPENMP_ENABLED_SRC.format(packagetitle=packagetitle, timestamp=timestamp, return_bool=openmp_support) package_srcdir = os.path.join(srcdir, *packagename.split('.')) is_openmp_enabled_py = os.path.join(package_srcdir, 'openmp_enabled.py') with open(is_openmp_enabled_py, 'w') as f: f.write(src)
Generate ``package.openmp_enabled.is_openmp_enabled``, which can then be used to determine, post build, whether the package was built with or without OpenMP support.
def insert(self, row): data = self._convert_value(row) self._service.InsertRow(data, self._ss.id, self.id)
Insert a new row. The row will be added to the end of the spreadsheet. Before inserting, the field names in the given row will be normalized and values with empty field names removed.
def zmag(self,): if self._zmag is None: hdulist_index = self.get_hdulist_idx(self.reading.get_ccd_num()) self._zmag = self.hdulist[hdulist_index].header.get('PHOTZP', 0.0) return self._zmag
Return the photometric zeropoint of the CCD associated with the reading. @return: float
def parse(self, rule: str): if not rule: return checks.TrueCheck() for token, value in self._parse_tokenize(rule): self._shift(token, value) try: return self.result except ValueError: LOG.exception('Failed to understand rule %r', rule) return checks.FalseCheck()
Parses policy to tree. Translate a policy written in the policy language into a tree of Check objects.
def _send(self, msg): if isinstance(msg, six.binary_type): method = uwsgi.websocket_send_binary else: method = uwsgi.websocket_send if self._req_ctx is not None: method(msg, request_context=self._req_ctx) else: method(msg)
Transmits message either in binary or UTF-8 text mode, depending on its type.
def bind( self, port: int, address: str = None, family: socket.AddressFamily = socket.AF_UNSPEC, backlog: int = 128, reuse_port: bool = False, ) -> None: sockets = bind_sockets( port, address=address, family=family, backlog=backlog, reuse_port=reuse_port ) if self._started: self.add_sockets(sockets) else: self._pending_sockets.extend(sockets)
Binds this server to the given port on the given address. To start the server, call `start`. If you want to run this server in a single process, you can call `listen` as a shortcut to the sequence of `bind` and `start` calls. Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen <socket.socket.listen>`. The ``reuse_port`` argument has the same meaning as for `.bind_sockets`. This method may be called multiple times prior to `start` to listen on multiple ports or interfaces. .. versionchanged:: 4.4 Added the ``reuse_port`` argument.
def try_run_setup(**kwargs): try: run_setup(**kwargs) except Exception as e: print(str(e)) if "xgboost" in str(e).lower(): kwargs["test_xgboost"] = False print("Couldn't install XGBoost for testing!") try_run_setup(**kwargs) elif "lightgbm" in str(e).lower(): kwargs["test_lightgbm"] = False print("Couldn't install LightGBM for testing!") try_run_setup(**kwargs) elif kwargs["with_binary"]: kwargs["with_binary"] = False print("WARNING: The C extension could not be compiled, sklearn tree models not supported.") try_run_setup(**kwargs) else: print("ERROR: Failed to build!")
Fails gracefully when various install steps don't work.
def get_org_smarthost(self, orgid, serverid): return self.api_call( ENDPOINTS['orgsmarthosts']['get'], dict(orgid=orgid, serverid=serverid))
Get an organization smarthost
def _maybe_get_plugin_name(cls, classpath_element): def process_info_file(cp_elem, info_file): plugin_info = ElementTree.parse(info_file).getroot() if plugin_info.tag != 'plugin': raise TaskError('File {} in {} is not a valid scalac plugin descriptor'.format( _SCALAC_PLUGIN_INFO_FILE, cp_elem)) return plugin_info.find('name').text if os.path.isdir(classpath_element): try: with open(os.path.join(classpath_element, _SCALAC_PLUGIN_INFO_FILE), 'r') as plugin_info_file: return process_info_file(classpath_element, plugin_info_file) except IOError as e: if e.errno != errno.ENOENT: raise else: with open_zip(classpath_element, 'r') as jarfile: try: with closing(jarfile.open(_SCALAC_PLUGIN_INFO_FILE, 'r')) as plugin_info_file: return process_info_file(classpath_element, plugin_info_file) except KeyError: pass return None
If classpath_element is a scalac plugin, returns its name. Returns None otherwise.
def device(value): browser = None for regex, name in BROWSERS: if regex.search(value): browser = name break device = None for regex, name in DEVICES: if regex.search(value): device = name break if browser and device: return _('%(browser)s on %(device)s') % { 'browser': browser, 'device': device } if browser: return browser if device: return device return None
Transform a User Agent into human readable text. Example output: * Safari on iPhone * Chrome on Windows 8.1 * Safari on OS X * Firefox * Linux * None
def reset_instance_attribute(self, instance_id, attribute): params = {'InstanceId' : instance_id, 'Attribute' : attribute} return self.get_status('ResetInstanceAttribute', params, verb='POST')
Resets an attribute of an instance to its default value. :type instance_id: string :param instance_id: ID of the instance :type attribute: string :param attribute: The attribute to reset. Valid values are: kernel|ramdisk :rtype: bool :return: Whether the operation succeeded or not
def get_formset(self, request, obj=None, **kwargs): data = super().get_formset(request, obj, **kwargs) if obj: data.form.base_fields['user'].initial = request.user.id return data
Default user to the current version owner.
def RunOnce(self): from grr_response_server.gui import gui_plugins if config.CONFIG.Get("AdminUI.django_secret_key", None): logging.warning( "The AdminUI.django_secret_key option has been deprecated, " "please use AdminUI.csrf_secret_key instead.")
Import the plugins once only.
def preRun_(self): self.report("preRun_") super().preRun_() self.client = ShmemRGBClient( name=self.shmem_name, n_ringbuffer=self.n_buffer, width=self.image_dimensions[0], height=self.image_dimensions[1], mstimeout=1000, verbose=False )
Create the shared memory client immediately after fork
def show(ctx, short_name): wva = get_wva(ctx) subscription = wva.get_subscription(short_name) cli_pprint(subscription.get_metadata())
Show metadata for a specific subscription Example: \b $ wva subscriptions show speed {'buffer': 'queue', 'interval': 5, 'uri': 'vehicle/data/VehicleSpeed'}
def fromfile(cls, path): parser = etree.XMLParser(remove_blank_text=True) return cls.fromtree(etree.parse(path, parser=parser))
Creates a METS by parsing a file. :param str path: Path to a METS document.
def _add_dynamic_field_to_instance(self, field, field_name): new_field = field._create_dynamic_version() new_field.name = field_name new_field._attach_to_instance(self) if field_name not in self._fields: if id(self._fields) == id(self.__class__._fields): self._fields = list(self._fields) self._fields.append(field_name) if isinstance(field, limpyd_fields.InstanceHashField): if id(self._instancehash_fields) == id(self.__class__._instancehash_fields): self._instancehash_fields = list(self._instancehash_fields) self._instancehash_fields.append(field_name) setattr(self, field_name, new_field) return new_field
Add a copy of the DynamicField "field" to the current instance using the "field_name" name
def __get_rc_handle(self, root_dir): rc_path = os.path.join(root_dir, '.rc') env_path = os.path.join(root_dir, '.env') fh = open(rc_path, "w+") fh.write(source_template % (env_path, env_path)) return (rc_path, fh)
get the filepath and filehandle to the rc file for the environment
def expect_all(a, b): assert all(_a == _b for _a, _b in zip_longest(a, b))
\ Asserts that two iterables contain the same values.
def setAndDrawInspectorById(self, identifier): self.setInspectorById(identifier) regItem = self.inspectorRegItem if regItem and not regItem.successfullyImported: msg = "Unable to import {} inspector.\n{}".format(regItem.identifier, regItem.exception) QtWidgets.QMessageBox.warning(self, "Warning", msg) logger.warn(msg) self.drawInspectorContents(reason=UpdateReason.INSPECTOR_CHANGED)
Sets the inspector and draw the contents. Does NOT trigger any actions, so the check marks in the menus are not updated. To achieve this, the user must update the actions by hand (or call getInspectorActionById(identifier).trigger() instead).
def create_group(self, attrs, members, folder_id=None, tags=None): cn = {} cn['m'] = members if folder_id: cn['l'] = str(folder_id) if tags: cn['tn'] = tags attrs = [{'n': k, '_content': v} for k, v in attrs.items()] attrs.append({'n': 'type', '_content': 'group'}) cn['a'] = attrs resp = self.request_single('CreateContact', {'cn': cn}) return zobjects.Contact.from_dict(resp)
Create a contact group XML example : <cn l="7> ## ContactSpec <a n="lastName">MARTIN</a> <a n="firstName">Pierre</a> <a n="email">pmartin@example.com</a> </cn> Which would be in zimsoap : attrs = { 'lastname': 'MARTIN', 'firstname': 'Pierre', 'email': 'pmartin@example.com' } folder_id = 7 :param folder_id: a string of the ID's folder where to create contact. Default '7' :param tags: comma-separated list of tag names :param members: list of dict. Members with their type. Example {'type': 'I', 'value': 'manual_addresse@example.com'}. :param attrs: a dictionary of attributes to set ({key:value,...}). At least one attr is required :returns: the created zobjects.Contact
def wormhole(context, dump_timing, transit_helper, relay_url, appid): context.obj = cfg = Config() cfg.appid = appid cfg.relay_url = relay_url cfg.transit_helper = transit_helper cfg.dump_timing = dump_timing
Create a Magic Wormhole and communicate through it. Wormholes are created by speaking the same magic CODE in two different places at the same time. Wormholes are secure against anyone who doesn't use the same code.
def release_downloads(request, package_name, version): session = DBSession() release_files = ReleaseFile.by_release(session, package_name, version) if release_files: release_files = [(f.release.package.name, f.filename) for f in release_files] return release_files
Retrieve a list of files and download count for a given package and release version.
def namelist(self): l = [] for data in self.filelist: l.append(data.filename) return l
Return a list of file names in the archive.
def _reachable_subsystems(network, indices, state): validate.is_network(network) for subset in utils.powerset(indices, nonempty=True, reverse=True): try: yield Subsystem(network, state, subset) except exceptions.StateUnreachableError: pass
A generator over all subsystems in a valid state.
def download_person_playlists(self): with open(person_info_path, 'r') as person_info: user_id = int(person_info.read()) self.download_user_playlists_by_id(user_id)
Download person playlist including private playlist. note: login required.
def isometric_view_interactive(self): interactor = self.iren.GetInteractorStyle() renderer = interactor.GetCurrentRenderer() renderer.view_isometric()
sets the current interactive render window to isometric view
def _shallow_copy(self, obj=None, obj_type=None, **kwargs): if obj is None: obj = self._selected_obj.copy() if obj_type is None: obj_type = self._constructor if isinstance(obj, obj_type): obj = obj.obj for attr in self._attributes: if attr not in kwargs: kwargs[attr] = getattr(self, attr) return obj_type(obj, **kwargs)
return a new object with the replacement attributes