code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def override(cls): def check_override(method): if method.__name__ not in dir(cls): raise NameError("{} does not override any method of {}".format( method, cls)) return method return check_override
Annotation for documenting method overrides. Arguments: cls (type): The superclass that provides the overriden method. If this cls does not actually have the method, an error is raised.
def gcv(data, channels=None): if channels is None: data_stats = data else: data_stats = data[:, channels] return np.sqrt(np.exp(np.std(np.log(data_stats), axis=0)**2) - 1)
Calculate the geometric CV of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The geometric coefficient of variation of the events in the specified channels of `data`.
def synchronized(*args): if callable(args[0]): return decorate_synchronized(args[0], _synchronized_lock) else: def wrap(function): return decorate_synchronized(function, args[0]) return wrap
A synchronized function prevents two or more callers to interleave its execution preventing race conditions. The synchronized decorator accepts as optional parameter a Lock, RLock or Semaphore object which will be employed to ensure the function's atomicity. If no synchronization object is given, a single threading.Lock will be used. This implies that between different decorated function only one at a time will be executed.
def policy_version_exists(policyName, policyVersionId, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) policy = conn.get_policy_version(policyName=policyName, policyversionId=policyVersionId) return {'exists': bool(policy)} except ClientError as e: err = __utils__['boto3.get_error'](e) if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException': return {'exists': False} return {'error': __utils__['boto3.get_error'](e)}
Given a policy name and version ID, check to see if the given policy version exists. Returns True if the given policy version exists and returns False if the given policy version does not exist. CLI Example: .. code-block:: bash salt myminion boto_iot.policy_version_exists mypolicy versionid
def averageSize(self): cm = self.centerOfMass() coords = self.coordinates(copy=False) if not len(coords): return 0 s, c = 0.0, 0.0 n = len(coords) step = int(n / 10000.0) + 1 for i in np.arange(0, n, step): s += utils.mag(coords[i] - cm) c += 1 return s / c
Calculate the average size of a mesh. This is the mean of the vertex distances from the center of mass.
def replication_connection_string_and_slot_using_pgpass(target_node_info): connection_info, slot = connection_info_and_slot(target_node_info) connection_info["dbname"] = "replication" connection_info["replication"] = "true" connection_string = create_pgpass_file(connection_info) return connection_string, slot
Like `connection_string_and_slot_using_pgpass` but returns a connection string for a replication connection.
def compare_last_two_snapshots(obj, raw=False): if get_snapshot_count(obj) < 2: return {} version = get_version(obj) snap1 = get_snapshot_by_version(obj, version - 1) snap2 = get_snapshot_by_version(obj, version) return compare_snapshots(snap1, snap2, raw=raw)
Helper to compare the last two snapshots directly
def print_info(self, obj=None, buf=sys.stdout): if not obj: self._print_info(buf) return True b = False for fn in (self._print_tool_info, self._print_package_info, self._print_suite_info, self._print_context_info): b_ = fn(obj, buf, b) b |= b_ if b_: print >> buf, '' if not b: print >> buf, "Rez does not know what '%s' is" % obj return b
Print a status message about the given object. If an object is not provided, status info is shown about the current environment - what the active context is if any, and what suites are visible. Args: obj (str): String which may be one of the following: - A tool name; - A package name, possibly versioned; - A context filepath; - A suite filepath; - The name of a context in a visible suite.
def get_item(self, key): keys = list(self.keys()) if not key in keys: self.print_message("ERROR: '"+str(key)+"' not found.") return None try: x = eval(self.get_value(1,keys.index(key))) return x except: self.print_message("ERROR: '"+str(self.get_value(1,keys.index(key)))+"' cannot be evaluated.") return None
Returns the value associated with the key.
def create_seq(self, project): dialog = SequenceCreatorDialog(project=project, parent=self) dialog.exec_() seq = dialog.sequence return seq
Create and return a new sequence :param project: the project for the sequence :type deps: :class:`jukeboxcore.djadapter.models.Project` :returns: The created sequence or None :rtype: None | :class:`jukeboxcore.djadapter.models.Sequence` :raises: None
def main(): settings.init() configuration_details = shell.how_to_configure() if ( configuration_details and configuration_details.can_configure_automatically ): if _is_already_configured(configuration_details): logs.already_configured(configuration_details) return elif _is_second_run(): _configure(configuration_details) logs.configured_successfully(configuration_details) return else: _record_first_run() logs.how_to_configure_alias(configuration_details)
Shows useful information about how-to configure alias on a first run and configure automatically on a second. It'll be only visible when user type fuck and when alias isn't configured.
def validate_query(self, query): if query is None: return query query = self.update_reading_list(query) return query
Confirm query exists given common filters.
def getElementsByTagName(self, tagName): ret = TagCollection() if len(self) == 0: return ret tagName = tagName.lower() _cmpFunc = lambda tag : bool(tag.tagName == tagName) for tag in self: TagCollection._subset(ret, _cmpFunc, tag) return ret
getElementsByTagName - Gets elements within this collection having a specific tag name @param tagName - String of tag name @return - TagCollection of unique elements within this collection with given tag name
def filter_featured_apps(admin_apps, request): featured_apps = [] for orig_app_spec in appsettings.DASHBOARD_FEATURED_APPS: app_spec = orig_app_spec.copy() if "verbose_name" in app_spec: warnings.warn( "DASHBOARD_FEATURED_APPS[]['verbose_name'] = '%s' is deprecated. " "Use 'name' instead)" % app_spec['verbose_name'], DeprecationWarning, stacklevel=2 ) app_spec['name'] = app_spec['verbose_name'] if hasattr(app_spec['models'], 'items'): warnings.warn( "DASHBOARD_FEATURED_APPS[]['models'] for '%s' should now be a " "list of tuples, not a dict." % app_spec['name'], DeprecationWarning, stacklevel=2 ) app_spec['models'] = app_spec['models'].items() app_spec['models'] = _build_app_models( request, admin_apps, app_spec['models'] ) if app_spec['models']: featured_apps.append(app_spec) return featured_apps
Given a list of apps return a set of pseudo-apps considered featured. Apps are considered featured if the are defined in the settings property called `DASHBOARD_FEATURED_APPS` which contains a list of the apps that are considered to be featured. :param admin_apps: A list of apps. :param request: Django request. :return: Subset of app like objects that are listed in the settings `DASHBOARD_FEATURED_APPS` setting.
def get_alias(self, using=None, **kwargs): return self._get_connection(using).indices.get_alias(index=self._name, **kwargs)
Retrieve a specified alias. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_alias`` unchanged.
def create(cls, **kwargs): try: return cls.add(cls.new(**kwargs)) except: cls.session.rollback() raise
Initializes a new instance, adds it to the db and commits the transaction. Args: **kwargs: The keyword arguments for the init constructor. Examples: >>> user = User.create(name="Vicky", email="vicky@h.com") >>> user.id 35
def server_by_name(self, name): return self.server_show_libcloud( self.server_list().get(name, {}).get('id', '') )
Find a server by its name
def padto8(data): length = len(data) return data + b'\xdb' * (roundto8(length) - length)
Pads data to the multiplies of 8 bytes. This makes x86_64 faster and prevents undefined behavior on other platforms
def eprint(*args, **kwargs): end = kwargs.get("end", "\n") sep = kwargs.get("sep", " ") (filename, lineno) = inspect.stack()[1][1:3] print("{}:{}: ".format(filename, lineno), end="") print(*args, end=end, file=sys.stderr, sep=sep)
Print an error message to standard error, prefixing it with file name and line number from which method was called.
def mx_page_trees(self, mx_page): resp = dict() for tree_name, tree in self.scheduler.timetable.trees.items(): if tree.mx_page == mx_page: rest_tree = self._get_tree_details(tree_name) resp[tree.tree_name] = rest_tree.document return resp
return trees assigned to given MX Page
def nan_circstd(samples, high=2.0*np.pi, low=0.0, axis=None): samples = np.asarray(samples) samples = samples[~np.isnan(samples)] if samples.size == 0: return np.nan ang = (samples - low) * 2.0 * np.pi / (high - low) smean = np.sin(ang).mean(axis=axis) cmean = np.cos(ang).mean(axis=axis) rmean = np.sqrt(smean**2 + cmean**2) circstd = (high - low) * np.sqrt(-2.0 * np.log(rmean)) / (2.0 * np.pi) return circstd
NaN insensitive version of scipy's circular standard deviation routine Parameters ----------- samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circstd : float Circular standard deviation
def ping_hub(self): if self.plugin is not None: try: self.plugin.execute(self.plugin.hub_name, 'ping', timeout_s=1, silent=True) except IOError: self.on_heartbeat_error() else: self.heartbeat_alive_timestamp = datetime.now() logger.debug('Hub connection alive as of %s', self.heartbeat_alive_timestamp) return True
Attempt to ping the ZeroMQ plugin hub to verify connection is alive. If ping is successful, record timestamp. If ping is unsuccessful, call `on_heartbeat_error` method.
def start_container(self, cls, **kwargs): kwargs = self._replace_stylename(kwargs) container = cls(**kwargs) self._containers.append(container)
Append a new container.
def validate_commit_range(repo_dir, old_commit, new_commit): try: commits = get_commits(repo_dir, old_commit, new_commit) except Exception: commits = [] if len(commits) == 0: try: commits = get_commits(repo_dir, new_commit, old_commit) except Exception: commits = [] if len(commits) == 0: msg = ("The commit range {0}..{1} is invalid for {2}." "You may need to use the --update option to fetch the " "latest updates to the git repositories stored on your " "local computer.".format(old_commit, new_commit, repo_dir)) raise exceptions.InvalidCommitRangeException(msg) else: return 'flip' return True
Check if commit range is valid. Flip it if needed.
def _compute_projection(self, X, W): X = check_array(X) D = np.diag(W.sum(1)) L = D - W evals, evecs = eigh_robust(np.dot(X.T, np.dot(L, X)), np.dot(X.T, np.dot(D, X)), eigvals=(0, self.n_components - 1)) return evecs
Compute the LPP projection matrix Parameters ---------- X : array_like, (n_samples, n_features) The input data W : array_like or sparse matrix, (n_samples, n_samples) The precomputed adjacency matrix Returns ------- P : ndarray, (n_features, self.n_components) The matrix encoding the locality preserving projection
def attrget(self, groupname, attrname, rownr): return self._attrget(groupname, attrname, rownr)
Get the value of an attribute in the given row in a group.
def get_container(cls, scheduler): if scheduler in cls._container_cache: return cls._container_cache[scheduler] else: c = cls(scheduler) cls._container_cache[scheduler] = c return c
Create temporary instance for helper functions
def _on_access_token(self, future, response): LOGGER.info(response.body) content = escape.json_decode(response.body) if 'error' in content: LOGGER.error('Error fetching access token: %s', content['error']) future.set_exception(auth.AuthError('StackExchange auth error: %s' % str(content['error']))) return callback = self.async_callback(self._on_stackexchange_user, future, content['access_token']) self.stackexchange_request('me', callback, content['access_token'])
Invoked as a callback when StackExchange has returned a response to the access token request. :param method future: The callback method to pass along :param tornado.httpclient.HTTPResponse response: The HTTP response
def get_parallel_regions(batch): samples = [utils.to_single_data(d) for d in batch] regions = _get_parallel_regions(samples[0]) return [{"region": "%s:%s-%s" % (c, s, e)} for c, s, e in regions]
CWL target to retrieve a list of callable regions for parallelization.
def perform(self, node, inputs, output_storage): x = inputs[0] z = output_storage[0] z[0] = np.asarray(self.operator(x))
Evaluate this node's computation. Parameters ---------- node : `theano.gof.graph.Apply` The node of this Op in the computation graph. inputs : 1-element list of arrays Contains an array (usually `numpy.ndarray`) of concrete values supplied for the symbolic input variable ``x``. output_storage : 1-element list of 1-element lists The single 1-element list contained in ``output_storage`` by default contains only ``None``. This value must be replaced by the result of the application of `odl_op`. Examples -------- Perform a matrix multiplication: >>> space = odl.rn(3) >>> matrix = np.array([[1, 0, 1], ... [0, 1, 1]], dtype=float) >>> op = odl.MatrixOperator(matrix, domain=space) >>> matrix_op = TheanoOperator(op) >>> x = theano.tensor.dvector() >>> op_x = matrix_op(x) >>> op_func = theano.function([x], op_x) >>> op_func([1, 2, 3]) array([ 4., 5.]) Evaluate a functional, i.e., an operator with scalar output: >>> space = odl.rn(3) >>> functional = odl.solvers.L2NormSquared(space) >>> func_op = TheanoOperator(functional) >>> x = theano.tensor.dvector() >>> op_x = func_op(x) >>> op_func = theano.function([x], op_x) >>> op_func([1, 2, 3]) array(14.0)
def binaryEntropy(x): entropy = - x*x.log2() - (1-x)*(1-x).log2() entropy[x*(1 - x) == 0] = 0 return entropy, entropy.sum()
Calculate entropy for a list of binary random variables :param x: (torch tensor) the probability of the variable to be 1. :return: entropy: (torch tensor) entropy, sum(entropy)
def get_microversion_for_features(service, features, wrapper_class, min_ver, max_ver): feature_versions = get_requested_versions(service, features) if not feature_versions: return None for version in feature_versions: microversion = wrapper_class(version) if microversion.matches(min_ver, max_ver): return microversion return None
Retrieves that highest known functional microversion for features
def set_scheme(self, scheme_name): self.stack.setCurrentIndex(self.order.index(scheme_name)) self.last_used_scheme = scheme_name
Set the current stack by 'scheme_name'.
def update_batches(self, X_batch, L, Min): self.X_batch = X_batch if X_batch is not None: self.r_x0, self.s_x0 = self._hammer_function_precompute(X_batch, L, Min, self.model)
Updates the batches internally and pre-computes the
def get_dG_at_T(seq, temp): r_cal = scipy.constants.R / scipy.constants.calorie seq = ssbio.protein.sequence.utils.cast_to_str(seq) oobatake = {} for t in range(20, 51): oobatake[t] = calculate_oobatake_dG(seq, t) stable = [i for i in oobatake.values() if i > 0] if len(stable) == 0: dG = 0.238846 * calculate_dill_dG(len(seq), temp) method='Dill' else: dG = oobatake[temp] method='Oobatake' keq = math.exp(-1 * dG / (r_cal * (temp + 273.15))) return dG, keq, method
Predict dG at temperature T, using best predictions from Dill or Oobatake methods. Args: seq (str, Seq, SeqRecord): Amino acid sequence temp (float): Temperature in degrees C Returns: (tuple): tuple containing: dG (float) Free energy of unfolding dG (cal/mol) keq (float): Equilibrium constant Keq method (str): Method used to calculate
async def _send_typing(self, request: Request, stack: Stack): t = stack.get_layer(lyr.Typing) if t.active: await self.call( 'sendChatAction', chat_id=request.message.get_chat_id(), action='typing', )
In telegram, the typing stops when the message is received. Thus, there is no "typing stops" messages to send. The API is only called when typing must start.
def _check_timeindex(self, timeseries): try: timeseries.loc[self.edisgo.network.timeseries.timeindex] except: message = 'Time index of storage time series does not match ' \ 'with load and feed-in time series.' logging.error(message) raise KeyError(message)
Raises an error if time index of storage time series does not comply with the time index of load and feed-in time series. Parameters ----------- timeseries : :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power the storage is charged (negative) and discharged (positive) with in kW in column 'p' and reactive power in kVA in column 'q'.
def match(self, string): if self.casesensitive: return self.pattern == os.path.normcase(string) else: return self.pattern.lower() == os.path.normcase(string).lower()
Returns True if the argument matches the constant.
def takes_instance_or_queryset(func): @wraps(func) def decorated_function(self, request, queryset): if not isinstance(queryset, QuerySet): try: queryset = self.get_queryset(request).filter(pk=queryset.pk) except AttributeError: try: model = queryset._meta.model except AttributeError: model = queryset._meta.concrete_model queryset = model.objects.filter(pk=queryset.pk) return func(self, request, queryset) return decorated_function
Decorator that makes standard Django admin actions compatible.
def apply_change(self): changes = self.input['change'] key = self.current.task_data['role_id'] role = RoleModel.objects.get(key=key) for change in changes: permission = PermissionModel.objects.get(code=change['id']) if change['checked'] is True: role.add_permission(permission) else: role.remove_permission(permission) role.save()
Applies changes to the permissions of the role. To make a change to the permission of the role, a request in the following format should be sent: .. code-block:: python { 'change': { 'id': 'workflow2.lane1.task1', 'checked': false }, } The 'id' field of the change is the id of the tree element that was sent to the UI (see `Permissions.edit_permissions`). 'checked' field is the new state of the element.
def enrolled_device_id(self, enrolled_device_id): if enrolled_device_id is None: raise ValueError("Invalid value for `enrolled_device_id`, must not be `None`") if enrolled_device_id is not None and not re.search('^[A-Za-z0-9]{32}', enrolled_device_id): raise ValueError("Invalid value for `enrolled_device_id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`") self._enrolled_device_id = enrolled_device_id
Sets the enrolled_device_id of this EnrollmentIdentity. The ID of the device in the Device Directory once it has been registered. :param enrolled_device_id: The enrolled_device_id of this EnrollmentIdentity. :type: str
def create_entry(self, text="", sensitive="False"): text_entry = Gtk.Entry() text_entry.set_sensitive(sensitive) text_entry.set_text(text) return text_entry
Function creates an Entry with corresponding text
def exec_command( self, command, bufsize=-1, timeout=None, get_pty=False, environment=None, ): chan = self._transport.open_session(timeout=timeout) if get_pty: chan.get_pty() chan.settimeout(timeout) if environment: chan.update_environment(environment) chan.exec_command(command) stdin = chan.makefile("wb", bufsize) stdout = chan.makefile("r", bufsize) stderr = chan.makefile_stderr("r", bufsize) return stdin, stdout, stderr
Execute a command on the SSH server. A new `.Channel` is opened and the requested command is executed. The command's input and output streams are returned as Python ``file``-like objects representing stdin, stdout, and stderr. :param str command: the command to execute :param int bufsize: interpreted the same way as by the built-in ``file()`` function in Python :param int timeout: set command's channel timeout. See `.Channel.settimeout` :param bool get_pty: Request a pseudo-terminal from the server (default ``False``). See `.Channel.get_pty` :param dict environment: a dict of shell environment variables, to be merged into the default environment that the remote command executes within. .. warning:: Servers may silently reject some environment variables; see the warning in `.Channel.set_environment_variable` for details. :return: the stdin, stdout, and stderr of the executing command, as a 3-tuple :raises: `.SSHException` -- if the server fails to execute the command .. versionchanged:: 1.10 Added the ``get_pty`` kwarg.
def close_fileoutput (self): if self.fd is not None: try: self.flush() except IOError: pass if self.close_fd: try: self.fd.close() except IOError: pass self.fd = None
Flush and close the file output denoted by self.fd.
def _init_hdrgos(self, hdrgos_dflt, hdrgos_usr=None, add_dflt=True): if (hdrgos_usr is None or hdrgos_usr is False) and not self.sections: return set(hdrgos_dflt) hdrgos_init = set() if hdrgos_usr: chk_goids(hdrgos_usr, "User-provided GO group headers") hdrgos_init |= set(hdrgos_usr) if self.sections: self._chk_sections(self.sections) hdrgos_sec = set([hg for _, hdrgos in self.sections for hg in hdrgos]) chk_goids(hdrgos_sec, "User-provided GO group headers in sections") hdrgos_init |= hdrgos_sec if add_dflt: return set(hdrgos_init).union(hdrgos_dflt) return hdrgos_init
Initialize GO high
def hsetnx(self, key, field, value): return self._execute([b'HSETNX', key, field, value])
Sets `field` in the hash stored at `key` only if it does not exist. Sets `field` in the hash stored at `key` only if `field` does not yet exist. If `key` does not exist, a new key holding a hash is created. If `field` already exists, this operation has no effect. .. note:: *Time complexity*: ``O(1)`` :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param field: The field in the hash to set :type key: :class:`str`, :class:`bytes` :param value: The value to set the field to :returns: ``1`` if `field` is a new field in the hash and `value` was set. ``0`` if `field` already exists in the hash and no operation was performed :rtype: int
def _generate_components(self, X): rs = check_random_state(self.random_state) if (self._use_mlp_input): self._compute_biases(rs) self._compute_weights(X, rs) if (self._use_rbf_input): self._compute_centers(X, sp.issparse(X), rs) self._compute_radii()
Generate components of hidden layer given X
def get_value_for_expr(self, expr, target): if expr in LOGICAL_OPERATORS.values(): return None rvalue = expr['value'] if rvalue == HISTORICAL: history = self.history[target] if len(history) < self.history_size: return None rvalue = sum(history) / float(len(history)) rvalue = expr['mod'](rvalue) return rvalue
I have no idea.
def image_undo(): if len(image_undo_list) <= 0: print("no undos in memory") return [image, Z] = image_undo_list.pop(-1) image.set_array(Z) _pylab.draw()
Undoes the last coarsen or smooth command.
def _newer(a, b): if not os.path.exists(a): return False if not os.path.exists(b): return True return os.path.getmtime(a) >= os.path.getmtime(b)
Inquire whether file a was written since file b.
def open_acqdata(filename, user='unknown', filemode='w-'): if filename.lower().endswith((".hdf5", ".h5")): return HDF5Data(filename, user, filemode) elif filename.lower().endswith((".pst", ".raw")): return BatlabData(filename, user, filemode) else: print "File format not supported: ", filename
Opens and returns the correct AcquisitionData object according to filename extention. Supported extentions: * .hdf5, .h5 for sparkle data * .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>` examples (if data file already exists):: data = open_acqdata('myexperiment.hdf5', filemode='r') print data.dataset_names() for batlab data:: data = open('mouse666.raw', filemode='r') print data.dataset_names()
def Imm(extended_map, s, lmax): import numpy as np extended_map = np.ascontiguousarray(extended_map, dtype=np.complex128) NImm = (2*lmax + 1)**2 imm = np.empty(NImm, dtype=np.complex128) _Imm(extended_map, imm, s, lmax) return imm
Take the fft of the theta extended map, then zero pad and reorganize it This is mostly an internal function, included here for backwards compatibility. See map2salm and salm2map for more useful functions.
def fit(self, X, y=None, **kwargs): if self.tagset == "penn_treebank": self.pos_tag_counts_ = self._penn_tag_map() self._handle_treebank(X) elif self.tagset == "universal": self.pos_tag_counts_ = self._uni_tag_map() self._handle_universal(X) self.draw() return self
Fits the corpus to the appropriate tag map. Text documents must be tokenized & tagged before passing to fit. Parameters ---------- X : list or generator Should be provided as a list of documents or a generator that yields a list of documents that contain a list of sentences that contain (token, tag) tuples. y : ndarray or Series of length n An optional array of target values that are ignored by the visualizer. kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer
def parse(self): self._options, self._arguments = self._parse_args() self._arguments = self._arguments or ['.'] if not self._validate_options(self._options): raise IllegalConfiguration() self._run_conf = self._create_run_config(self._options) config = self._create_check_config(self._options, use_defaults=False) self._override_by_cli = config
Parse the configuration. If one of `BASE_ERROR_SELECTION_OPTIONS` was selected, overrides all error codes to check and disregards any error code related configurations from the configuration files.
def scale_up_dyno(self, process, quantity, size): self._run( [ "heroku", "ps:scale", "{}={}:{}".format(process, quantity, size), "--app", self.name, ] )
Scale up a dyno.
def load(self, dataset_keys, previous_datasets=None): all_datasets = previous_datasets or DatasetDict() datasets = DatasetDict() dsids = [self.get_dataset_key(ds_key) for ds_key in dataset_keys] coordinates = self._get_coordinates_for_dataset_keys(dsids) all_dsids = list(set().union(*coordinates.values())) + dsids for dsid in all_dsids: if dsid in all_datasets: continue coords = [all_datasets.get(cid, None) for cid in coordinates.get(dsid, [])] ds = self._load_dataset_with_area(dsid, coords) if ds is not None: all_datasets[dsid] = ds if dsid in dsids: datasets[dsid] = ds self._load_ancillary_variables(all_datasets) return datasets
Load `dataset_keys`. If `previous_datasets` is provided, do not reload those.
def set_default_encoder_parameters(): cparams = CompressionParametersType() argtypes = [ctypes.POINTER(CompressionParametersType)] OPENJPEG.opj_set_default_encoder_parameters.argtypes = argtypes OPENJPEG.opj_set_default_encoder_parameters(ctypes.byref(cparams)) return cparams
Wrapper for openjpeg library function opj_set_default_encoder_parameters.
def read_elastic_tensor(self): header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+" \ r"Direction\s+([X-Z][X-Z]\s+)+" \ r"\-+" row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6) footer_pattern = r"\-+" et_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float) self.data["elastic_tensor"] = et_table
Parse the elastic tensor data. Returns: 6x6 array corresponding to the elastic tensor from the OUTCAR.
def save_config(self, data: dict) -> Path: out_dir = Path(self.families_dir) / data['family'] out_dir.mkdir(parents=True, exist_ok=True) out_path = out_dir / 'pedigree.yaml' dump = ruamel.yaml.round_trip_dump(data, indent=4, block_seq_indent=2) out_path.write_text(dump) return out_path
Save a config to the expected location.
def parent_of(self, name): if not self._in_tag(name): return node = self.cur_node while node.tag != name: node = node.getparent() self.cur_node = node.getparent()
go to parent of node with name, and set as cur_node. Useful for creating new paragraphs
def _load_data(self): ipa_canonical_string_to_ascii_str = dict() for line in load_data_file( file_path=self.DATA_FILE_PATH, file_path_is_relative=True, line_format=u"sxA" ): i_desc, i_ascii = line if len(i_ascii) == 0: raise ValueError("Data file '%s' contains a bad line: '%s'" % (self.DATA_FILE_PATH, line)) key = (variant_to_canonical_string(i_desc),) ipa_canonical_string_to_ascii_str[key] = i_ascii[0] return ipa_canonical_string_to_ascii_str
Load the Kirshenbaum ASCII IPA data from the built-in database.
def clone_exception(error, args): new_error = error.__class__(*args) new_error.__dict__ = error.__dict__ return new_error
return a new cloned error when do: ``` try: do_sth() except BaseException as e: handle(e) def handle(error): # do sth with error raise e # <- won't work! This can generate a new cloned error of the same class Parameters ---------- error: the caught error args: the new args to init the cloned error Returns ------- new error of the same class
def is_last_child(self, child_pid): last_child = self.last_child if last_child is None: return False return last_child == child_pid
Determine if 'pid' is the latest version of a resource. Resolves True for Versioned PIDs which are the oldest of its siblings. False otherwise, also for Head PIDs.
def onSiliconCheck(ra_deg, dec_deg, FovObj, padding_pix=DEFAULT_PADDING): dist = angSepVincenty(FovObj.ra0_deg, FovObj.dec0_deg, ra_deg, dec_deg) if dist >= 90.: return False return FovObj.isOnSilicon(ra_deg, dec_deg, padding_pix=padding_pix)
Check a single position.
def vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop): return libvlc_vlm_change_media(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
Edit the parameters of a media. This will delete all existing inputs and add the specified one. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error.
def encode_utf8(s, f): encode = codecs.getencoder('utf8') encoded_str_bytes, num_encoded_chars = encode(s) num_encoded_str_bytes = len(encoded_str_bytes) assert 0 <= num_encoded_str_bytes <= 2**16-1 num_encoded_bytes = num_encoded_str_bytes + 2 f.write(FIELD_U8.pack((num_encoded_str_bytes & 0xff00) >> 8)) f.write(FIELD_U8.pack(num_encoded_str_bytes & 0x00ff)) f.write(encoded_str_bytes) return num_encoded_bytes
UTF-8 encodes string `s` to file-like object `f` according to the MQTT Version 3.1.1 specification in section 1.5.3. The maximum length for the encoded string is 2**16-1 (65535) bytes. An assertion error will result if the encoded string is longer. Parameters ---------- s: str String to be encoded. f: file File-like object. Returns ------- int Number of bytes written to f.
def pfadd(self, key, value, *values): return self.execute(b'PFADD', key, value, *values)
Adds the specified elements to the specified HyperLogLog.
def check_move(new, old, t): if (t <= 0) or numpy.isclose(t, 0.0): return False K_BOLTZ = 1.9872041E-003 if new < old: return True else: move_prob = math.exp(-(new - old) / (K_BOLTZ * t)) if move_prob > random.uniform(0, 1): return True return False
Determines if a model will be accepted.
def do_output(self, *args): if args: action, params = args[0], args[1:] log.debug("Pass %s directly to output with %s", action, params) function = getattr(self.output, "do_" + action, None) if function: function(*params)
Pass a command directly to the current output processor
def expire(self, name, time): with self.pipe as pipe: return pipe.expire(self.redis_key(name), time)
Allow the key to expire after ``time`` seconds. :param name: str the name of the redis key :param time: time expressed in seconds. :return: Future()
def write_slide_list(self, logname, slides): with open('%s/%s' % (self.cache, logname), 'w') as logfile: for slide in slides: heading = slide['heading']['text'] filename = self.get_image_name(heading) print('%s,%d' % (filename, slide.get('time', 0)), file=logfile)
Write list of slides to logfile
def upstream(self, f, n=1): if f.strand == -1: return self.right(f, n) return self.left(f, n)
find n upstream features where upstream is determined by the strand of the query Feature f Overlapping features are not considered. f: a Feature object n: the number of features to return
def _get_filename(self): self.filename = expanduser( os.path.join(self.rsr_dir, 'rsr_{0}_{1}.h5'.format(self.instrument, self.platform_name))) LOG.debug('Filename: %s', str(self.filename)) if not os.path.exists(self.filename) or not os.path.isfile(self.filename): LOG.warning("No rsr file %s on disk", self.filename) if self._rsr_data_version_uptodate: LOG.info("RSR data up to date, so seems there is no support for this platform and sensor") else: if self.do_download: LOG.info("Will download from internet...") download_rsr() if self._get_rsr_data_version() == RSR_DATA_VERSION: self._rsr_data_version_uptodate = True if not self._rsr_data_version_uptodate: LOG.warning("rsr data may not be up to date: %s", self.filename) if self.do_download: LOG.info("Will download from internet...") download_rsr()
Get the rsr filname from platform and instrument names, and download if not available.
def save_current(self): if self.current_widget() is not None: editor = self.current_widget() self._save(editor)
Save current editor. If the editor.file.path is None, a save as dialog will be shown.
def set_umask(mask): if mask is None or salt.utils.platform.is_windows(): yield else: try: orig_mask = os.umask(mask) yield finally: os.umask(orig_mask)
Temporarily set the umask and restore once the contextmanager exits
def decode(self, litmap): return Or(*[And(*[litmap[idx] for idx in clause]) for clause in self.clauses])
Convert the DNF to an expression.
def __read(self): self._socket.setblocking(0) while not self._stop_event.is_set(): ready = select.select([self._socket], [], [], 1) if ready[0]: data, sender = self._socket.recvfrom(1024) try: self._handle_heartbeat(sender, data) except Exception as ex: _logger.exception("Error handling the heart beat: %s", ex)
Reads packets from the socket
def refresh_index(meta, index) -> None: projection_keys = set.union(meta.keys, index.keys) proj = index.projection mode = proj["mode"] if mode == "keys": proj["included"] = projection_keys elif mode == "all": proj["included"] = meta.columns elif mode == "include": if all(isinstance(p, str) for p in proj["included"]): proj["included"] = set(meta.columns_by_name[n] for n in proj["included"]) else: proj["included"] = set(proj["included"]) proj["included"].update(projection_keys) if proj["strict"]: proj["available"] = proj["included"] else: proj["available"] = meta.columns
Recalculate the projection, hash_key, and range_key for the given index. :param meta: model.Meta to find columns by name :param index: The index to refresh
def path(value, allow_empty = False, **kwargs): if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if hasattr(os, 'PathLike'): if not isinstance(value, (str, bytes, int, os.PathLike)): raise errors.NotPathlikeError('value (%s) is path-like' % value) else: if not isinstance(value, int): try: os.path.exists(value) except TypeError: raise errors.NotPathlikeError('value (%s) is not path-like' % value) return value
Validate that ``value`` is a valid path-like object. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: The path represented by ``value``. :rtype: Path-like object / :obj:`None <python:None>` :raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value`` is empty :raises NotPathlikeError: if ``value`` is not a valid path-like object
def output_datacenter(gandi, datacenter, output_keys, justify=14): output_generic(gandi, datacenter, output_keys, justify) if 'dc_name' in output_keys: output_line(gandi, 'datacenter', datacenter['name'], justify) if 'status' in output_keys: deactivate_at = datacenter.get('deactivate_at') if deactivate_at: output_line(gandi, 'closing on', deactivate_at.strftime('%d/%m/%Y'), justify) closing = [] iaas_closed_for = datacenter.get('iaas_closed_for') if iaas_closed_for == 'ALL': closing.append('vm') paas_closed_for = datacenter.get('paas_closed_for') if paas_closed_for == 'ALL': closing.append('paas') if closing: output_line(gandi, 'closed for', ', '.join(closing), justify)
Helper to output datacenter information.
def _infer_transform_options(transform): TransformOptions = collections.namedtuple("TransformOptions", ['CB', 'dual_index', 'triple_index', 'MB', 'SB']) CB = False SB = False MB = False dual_index = False triple_index = False for rx in transform.values(): if not rx: continue if "CB1" in rx: if "CB3" in rx: triple_index = True else: dual_index = True if "SB" in rx: SB = True if "CB" in rx: CB = True if "MB" in rx: MB = True return TransformOptions(CB=CB, dual_index=dual_index, triple_index=triple_index, MB=MB, SB=SB)
figure out what transform options should be by examining the provided regexes for keywords
def list_vms_sub(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachines', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VMs in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM model views.
def install(source, package_id): if is_installed(package_id): return True uri = urllib.parse.urlparse(source) if not uri.scheme == '': msg = 'Unsupported scheme for source uri: {0}'.format(uri.scheme) raise SaltInvocationError(msg) _install_from_path(source) return is_installed(package_id)
Install a .pkg from an URI or an absolute path. :param str source: The path to a package. :param str package_id: The package ID :return: True if successful, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' pkgutil.install source=/vagrant/build_essentials.pkg package_id=com.apple.pkg.gcc4.2Leo
def iteritems_sorted(dict_): if isinstance(dict_, OrderedDict): return six.iteritems(dict_) else: return iter(sorted(six.iteritems(dict_)))
change to iteritems ordered
def new_scope(self, new_scope={}): old_scopes, self.scopes = self.scopes, self.scopes.new_child(new_scope) yield self.scopes = old_scopes
Add a new innermost scope for the duration of the with block. Args: new_scope (dict-like): The scope to add.
def end(self): for depth in xrange(len(self.names) - 1, -1, -1): self.out_f.write('{0}}}\n'.format(self.prefix(depth)))
Generate the closing part
def rename(self, names, inplace=False): if (type(names) is not dict): raise TypeError('names must be a dictionary: oldname -> newname') all_columns = set(self.column_names()) for k in names: if not k in all_columns: raise ValueError('Cannot find column %s in the SFrame' % k) if inplace: ret = self else: ret = self.copy() with cython_context(): for k in names: colid = ret.column_names().index(k) ret.__proxy__.set_column_name(colid, names[k]) ret._cache = None return ret
Returns an SFrame with columns renamed. ``names`` is expected to be a dict specifying the old and new names. This changes the names of the columns given as the keys and replaces them with the names given as the values. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- names : dict [string, string] Dictionary of [old_name, new_name] inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The current SFrame. See Also -------- column_names Examples -------- >>> sf = SFrame({'X1': ['Alice','Bob'], ... 'X2': ['123 Fake Street','456 Fake Street']}) >>> res = sf.rename({'X1': 'name', 'X2':'address'}) >>> res +-------+-----------------+ | name | address | +-------+-----------------+ | Alice | 123 Fake Street | | Bob | 456 Fake Street | +-------+-----------------+ [2 rows x 2 columns]
def need_summary(self, now, max_updates, max_age): if self.summarized is True and self.last_summarize_ts + max_age <= now: return True return self.summarized is False and self.updates >= max_updates
Helper method to determine if a "summarize" record should be added. :param now: The current time. :param max_updates: Maximum number of updates before a summarize is required. :param max_age: Maximum age of the last summarize record. This is used in the case where a summarize request has been lost by the compactor. :returns: True if a "summarize" record should be added, False otherwise.
def dummy_signatures(self): if not self.signing_algorithm: return [] algo_id = {'sha1': 1, 'sha384': 2}[self.signing_algorithm] signature = make_dummy_signature(algo_id) return [(algo_id, signature)]
Create a dummy signature. This is used when initially writing the MAR header and we don't know what the final signature data will be. Returns: Fake signature data suitable for writing to the header with .write_signatures()
def get_cart_deformed_cell(base_cryst, axis=0, size=1): cryst = Atoms(base_cryst) uc = base_cryst.get_cell() s = size/100.0 L = diag(ones(3)) if axis < 3: L[axis, axis] += s else: if axis == 3: L[1, 2] += s elif axis == 4: L[0, 2] += s else: L[0, 1] += s uc = dot(uc, L) cryst.set_cell(uc, scale_atoms=True) return cryst
Return the cell deformed along one of the cartesian directions Creates new deformed structure. The deformation is based on the base structure and is performed along single axis. The axis is specified as follows: 0,1,2 = x,y,z ; sheers: 3,4,5 = yz, xz, xy. The size of the deformation is in percent and degrees, respectively. :param base_cryst: structure to be deformed :param axis: direction of deformation :param size: size of the deformation :returns: new, deformed structure
def _playsoundOSX(sound, block = True): from AppKit import NSSound from Foundation import NSURL from time import sleep if '://' not in sound: if not sound.startswith('/'): from os import getcwd sound = getcwd() + '/' + sound sound = 'file://' + sound url = NSURL.URLWithString_(sound) nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True) if not nssound: raise IOError('Unable to load sound named: ' + sound) nssound.play() if block: sleep(nssound.duration())
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports. Probably works on OS X 10.5 and newer. Probably works with all versions of Python. Inspired by (but not copied from) Aaron's Stack Overflow answer here: http://stackoverflow.com/a/34568298/901641 I never would have tried using AppKit.NSSound without seeing his code.
def acquire(self): self.lock = retry_call( self._attempt, retries=float('inf'), trap=zc.lockfile.LockError, cleanup=functools.partial(self._check_timeout, timing.Stopwatch()), )
Attempt to acquire the lock every `delay` seconds until the lock is acquired or until `timeout` has expired. Raises FileLockTimeout if the timeout is exceeded. Errors opening the lock file (other than if it exists) are passed through.
def _run_driver(self, item_session: ItemSession, request, response): _logger.debug('Started PhantomJS processing.') session = PhantomJSCoprocessorSession( self._phantomjs_driver_factory, self._root_path, self._processing_rule, self._file_writer_session, request, response, item_session, self._phantomjs_params, self._warc_recorder ) with contextlib.closing(session): yield from session.run() _logger.debug('Ended PhantomJS processing.')
Start PhantomJS processing.
def count_trackbacks_handler(sender, **kwargs): entry = kwargs['entry'] entry.trackback_count = F('trackback_count') + 1 entry.save(update_fields=['trackback_count'])
Update Entry.trackback_count when a trackback was posted.
def make_multi_cols(self, num_class, name): cols = ['c' + str(i) + '_' for i in xrange(num_class)] cols = map(lambda x: x + name, cols) return cols
make cols for multi-class predictions
def confidential_interval(x, alpha=0.98): from scipy.stats import t if x.ndim == 1: df = len(x) - 1 cv = t.interval(alpha, df) std = np.std(x) else: df = len(x[0]) - 1 cv = t.interval(alpha, df)[1] std = np.std(x, axis=1) return std * cv / np.sqrt(df)
Return a numpy array of column confidential interval Parameters ---------- x : ndarray A numpy array instance alpha : float Alpha value of confidential interval Returns ------- ndarray A 1 x n numpy array which indicate the each difference from sample average point to confidential interval point
def join_locale(comps): loc = comps['language'] if comps.get('territory'): loc += '_' + comps['territory'] if comps.get('codeset'): loc += '.' + comps['codeset'] if comps.get('modifier'): loc += '@' + comps['modifier'] if comps.get('charmap'): loc += ' ' + comps['charmap'] return loc
Join a locale specifier split in the format returned by split_locale.
def timestamp(value): value = value if timezone.is_naive(value) else timezone.localtime(value) return value.strftime(settings.DATE_FORMAT)
Return the timestamp of a datetime.datetime object. :param value: a datetime object :type value: datetime.datetime :return: the timestamp :rtype: str
def voted_me_witness(self, account=None, limit=100): if not account: account = self.mainaccount self.has_voted = [] self.has_not_voted = [] following = self.following(account, limit) for f in following: wv = self.account(f)['witness_votes'] voted = False for w in wv: if w == account: self.has_voted.append(f) voted = True if not voted: self.has_not_voted.append(f) return self.has_voted
Fetches all those a given account is following and sees if they have voted that account as witness.
def xlim(self, low, high): self.chart['xAxis'][0]['min'] = low self.chart['xAxis'][0]['max'] = high return self
Set xaxis limits Parameters ---------- low : number high : number Returns ------- Chart