code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def delete(self): key = 'https://plex.tv/devices/%s.xml' % self.id self._server.query(key, self._server._session.delete)
Remove this device from your account.
def rotate_key(self, name, mount_point=DEFAULT_MOUNT_POINT): api_path = '/v1/{mount_point}/keys/{name}/rotate'.format( mount_point=mount_point, name=name, ) return self._adapter.post( url=api_path, )
Rotate the version of the named key. After rotation, new plaintext requests will be encrypted with the new version of the key. To upgrade ciphertext to be encrypted with the latest version of the key, use the rewrap endpoint. This is only supported with keys that support encryption and decryption operations. Supported methods: POST: /{mount_point}/keys/{name}/rotate. Produces: 204 (empty body) :param name: Specifies the name of the key to read information about. This is specified as part of the URL. :type name: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
def build_script(data): return bitcoin.core.script.CScript( bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data))
Creates an output script containing an OP_RETURN and a PUSHDATA. :param bytes data: The content of the PUSHDATA. :return: The final script. :rtype: CScript
def serialize_for_header(key, value): if key in QUOTE_FIELDS: return json.dumps(value) elif isinstance(value, str): if " " in value or "\t" in value: return json.dumps(value) else: return value elif isinstance(value, list): return "[{}]".format(", ".join(value)) else: return str(value)
Serialize value for the given mapping key for a VCF header line
def get_clusters_interfaces(clusters, extra_cond=lambda nic: True): interfaces = {} for cluster in clusters: nics = get_cluster_interfaces(cluster, extra_cond=extra_cond) interfaces.setdefault(cluster, nics) return interfaces
Returns for each cluster the available cluster interfaces Args: clusters (str): list of the clusters extra_cond (lambda): extra predicate to filter network card retrieved from the API. E.g lambda nic: not nic['mounted'] will retrieve all the usable network cards that are not mounted by default. Returns: dict of cluster with their associated nic names Examples: .. code-block:: python # pseudo code actual = get_clusters_interfaces(["paravance"]) expected = {"paravance": ["eth0", "eth1"]} assertDictEquals(expected, actual)
def disable_key(self): print("This command will disable a enabled key.") apiKeyID = input("API Key ID: ") try: key = self._curl_bitmex("/apiKey/disable", postdict={"apiKeyID": apiKeyID}) print("Key with ID %s disabled." % key["id"]) except: print("Unable to disable key, please try again.") self.disable_key()
Disable an existing API Key.
def network_stop(name, **kwargs): conn = __get_conn(**kwargs) try: net = conn.networkLookupByName(name) return not bool(net.destroy()) finally: conn.close()
Stop a defined virtual network. :param name: virtual network name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.network_stop default
def _handle_events(self, fd: int, events: int) -> None: action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
Called by IOLoop when there is activity on one of our file descriptors.
def all_replica_set_links(rs_id, rel_to=None): return [ replica_set_link(rel, rs_id, self_rel=(rel == rel_to)) for rel in ( 'get-replica-set-info', 'delete-replica-set', 'replica-set-command', 'get-replica-set-members', 'add-replica-set-member', 'get-replica-set-secondaries', 'get-replica-set-primary', 'get-replica-set-arbiters', 'get-replica-set-hidden-members', 'get-replica-set-passive-members', 'get-replica-set-servers' ) ]
Get a list of all links to be included with replica sets.
def _regex_flags_from_bits(self, bits): flags = 'ilmsuxa' return ''.join(flags[i - 1] if (1 << i) & bits else '' for i in range(1, len(flags) + 1))
Return the textual equivalent of numerically encoded regex flags.
def range(self): chrs = set([x.range.chr for x in self.get_transcripts()]) if len(chrs) != 1: return None start = min([x.range.start for x in self.get_transcripts()]) end = max([x.range.end for x in self.get_transcripts()]) return GenomicRange(list(chrs)[0],start,end)
Return the range the transcript loci covers :return: range :rtype: GenomicRange
def retrieve_dcnm_subnet_info(self, tenant_id, direc): serv_obj = self.get_service_obj(tenant_id) subnet_dict = serv_obj.get_dcnm_subnet_dict(direc) return subnet_dict
Retrieves the DCNM subnet info for a tenant.
def k2g(kml_path, output_dir, separate_folders, style_type, style_filename): m.convert(kml_path, output_dir, separate_folders, style_type, style_filename)
Given a path to a KML file, convert it to a a GeoJSON FeatureCollection file and save it to the given output directory. If ``--separate_folders``, then create several GeoJSON files, one for each folder in the KML file that contains geodata or that has a descendant node that contains geodata. Warning: this can produce GeoJSON files with the same geodata in case the KML file has nested folders with geodata. If ``--style_type`` is specified, then also build a JSON style file of the given style type and save it to the output directory under the file name given by ``--style_filename``.
def parse_lock(self): try: with open(self.lock_file, "r") as reader: data = json.loads(reader.read()) self.last_update = datetime.datetime.strptime( data["last_update"], AppCronLock.DATETIME_FORMAT ) except: self.write_lock(last_update=datetime.datetime.fromtimestamp(0)) self.parse_lock()
Parses app lock file :return: Details about last update
def _call_analysis_function(options, module): args, kwargs = _get_args_kwargs(options, module) return eval("%s.%s(*args, **kwargs)" % (module, options['analysis']))
Call function from module and get result, using inputs from options Parameters ---------- options : dict Option names and values for analysis module : str Short name of module within macroeco containing analysis function Returns ------- dataframe, array, value, list of tuples Functions from emp module return a list of tuples in which first element of the tuple gives a string describing the result and the second element giving the result of the analysis as a dataframe. Functions in other modules return dataframe, array, or value.
def stream_logs(self, stdout=True, stderr=True, tail='all', timeout=10.0): return stream_logs( self.inner(), stdout=stdout, stderr=stderr, tail=tail, timeout=timeout)
Stream container output.
def main(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, required=True) parser.add_argument("--user", type=str, required=True) parser.add_argument("--password", type=str) parser.add_argument("--token", type=str) args = parser.parse_args() if not args.password and not args.token: print('password or token is required') exit(1) example(args.host, args.user, args.password, args.token)
Main entry.
def get_cleaned_args(self, args): if not args: return args cleaned_args = [] for arg in args: condition = self._get_linguist_condition(arg, True) if condition: cleaned_args.append(condition) return cleaned_args
Returns positional arguments for related model query.
def prox_gradf12(x, step, j=None, Xs=None): if j == 0: return x - step*grad_fx(Xs[0][0], Xs[1][0]) if j == 1: y = x return y - step*grad_fy(Xs[0][0], Xs[1][0]) raise NotImplementedError
1D gradient operator for x or y
def on_modified(self, event): self._logger.debug('Detected modify event on watched path: %s', event.src_path) self._process_event(event)
Function called everytime a new file is modified. Args: event: Event to process.
def get_mzid_specfile_ids(mzidfn, namespace): sid_fn = {} for specdata in mzid_specdata_generator(mzidfn, namespace): sid_fn[specdata.attrib['id']] = specdata.attrib['name'] return sid_fn
Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict. Keys == IDs, values == fns
def loc(lexer: Lexer, start_token: Token) -> Optional[Location]: if not lexer.no_location: end_token = lexer.last_token source = lexer.source return Location( start_token.start, end_token.end, start_token, end_token, source ) return None
Return a location object. Used to identify the place in the source that created a given parsed object.
def get_valid_residue(residue): if residue is not None and amino_acids.get(residue) is None: res = amino_acids_reverse.get(residue.lower()) if res is None: raise InvalidResidueError(residue) else: return res return residue
Check if the given string represents a valid amino acid residue.
def one_of(self, chset: str) -> str: res = self.peek() if res in chset: self.offset += 1 return res raise UnexpectedInput(self, "one of " + chset)
Parse one character form the specified set. Args: chset: string of characters to try as alternatives. Returns: The character that was actually matched. Raises: UnexpectedInput: If the next character is not in `chset`.
def transform_raw_abundance(biomf, fn=math.log10, sampleIDs=None, sample_abd=True): totals = raw_abundance(biomf, sampleIDs, sample_abd) return {sid: fn(abd) for sid, abd in totals.items()}
Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm.
def reload(self): if time.time() - self.updated > self.ttl: self.force_reload()
Reload catalog if sufficient time has passed
def merge_configs(self, config, datas): if not isinstance(config, dict) or len([x for x in datas if not isinstance(x, dict)]) > 0: raise TypeError("Unable to merge: Dictionnary expected") for key, value in config.items(): others = [x[key] for x in datas if key in x] if len(others) > 0: if isinstance(value, dict): config[key] = self.merge_configs(value, others) else: config[key] = others[-1] return config
Merge configs files
def from_proto(cls, repeated_split_infos): split_dict = cls() for split_info_proto in repeated_split_infos: split_info = SplitInfo() split_info.CopyFrom(split_info_proto) split_dict.add(split_info) return split_dict
Returns a new SplitDict initialized from the `repeated_split_infos`.
def get_goobjs_altgo2goobj(go2obj): goobjs = set() altgo2goobj = {} for goid, goobj in go2obj.items(): goobjs.add(goobj) if goid != goobj.id: altgo2goobj[goid] = goobj return goobjs, altgo2goobj
Separate alt GO IDs and key GO IDs.
def multi_mask_sequences(records, slices): for record in records: record_indices = list(range(len(record))) keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]), slices, frozenset(record_indices)) seq = ''.join(b if i in keep_indices else '-' for i, b in enumerate(str(record.seq))) record.seq = Seq(seq) yield record
Replace characters sliced by slices with gap characters.
def __reset_crosshair(self): self.lhor.set_ydata(self.y_coord) self.lver.set_xdata(self.x_coord)
redraw the cross-hair on the horizontal slice plot Parameters ---------- x: int the x image coordinate y: int the y image coordinate Returns -------
def embed_data_in_blockchain(data, private_key, blockchain_client=BlockchainInfoClient(), fee=OP_RETURN_FEE, change_address=None, format='bin'): signed_tx = make_op_return_tx(data, private_key, blockchain_client, fee=fee, change_address=change_address, format=format) response = broadcast_transaction(signed_tx, blockchain_client) return response
Builds, signs, and dispatches an OP_RETURN transaction.
def remove_flag(self, flag): super(Entry, self).remove_flag(flag) self._changed_attrs.add('flags')
Remove flag to the flags and memorize this attribute has changed so we can regenerate it when outputting text.
def issubclass(cls, ifaces): ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: return all(( _check_for_definition( iface, cls, '__iclassattribute__', _is_attribute, ), _check_for_definition( iface, cls, '__iproperty__', _is_property, ), _check_for_definition( iface, cls, '__imethod__', _is_method, ), _check_for_definition( iface, cls, '__iclassmethod__', _is_classmethod, ), ))
Check if the given class is an implementation of the given iface.
def import_field(field_classpath): if '.' in field_classpath: fully_qualified = field_classpath else: fully_qualified = "django.db.models.%s" % field_classpath try: return import_dotted_path(fully_qualified) except ImportError: raise ImproperlyConfigured("The EXTRA_MODEL_FIELDS setting contains " "the field '%s' which could not be " "imported." % field_classpath)
Imports a field by its dotted class path, prepending "django.db.models" to raw class names and raising an exception if the import fails.
def get_datetime_properties(self, recursive=True): res = {} for name, field in self.properties.items(): if isinstance(field, DateField): res[name] = field elif recursive and isinstance(field, ObjectField): for n, f in field.get_datetime_properties(recursive=recursive): res[name + "." + n] = f return res
Returns a dict of property.path and property. :param recursive the name of the property :returns a dict
def logpath(self): name = '{}-{}.catan'.format(self.timestamp_str(), '-'.join([p.name for p in self._players])) path = os.path.join(self._log_dir, name) if not os.path.exists(self._log_dir): os.mkdir(self._log_dir) return path
Return the logfile path and filename as a string. The file with name self.logpath() is written to on flush(). The filename contains the log's timestamp and the names of players in the game. The logpath changes when reset() or _set_players() are called, as they change the timestamp and the players, respectively.
def new_keypair(key, value, ambig, unambig): if key in ambig: return if key in unambig and value != unambig[key]: ambig.add(key) del unambig[key] return unambig[key] = value return
Check new keypair against existing unambiguous dict :param key: of pair :param value: of pair :param ambig: set of keys with ambig decoding :param unambig: set of keys with unambig decoding :return:
def yearly_average(arr, dt): assert_matching_time_coord(arr, dt) yr_str = TIME_STR + '.year' dt = dt.where(np.isfinite(arr)) return ((arr*dt).groupby(yr_str).sum(TIME_STR) / dt.groupby(yr_str).sum(TIME_STR))
Average a sub-yearly time-series over each year. Resulting timeseries comprises one value for each year in which the original array had valid data. Accounts for (i.e. ignores) masked values in original data when computing the annual averages. Parameters ---------- arr : xarray.DataArray The array to be averaged dt : xarray.DataArray Array of the duration of each timestep Returns ------- xarray.DataArray Has the same shape and mask as the original ``arr``, except for the time dimension, which is truncated to one value for each year that ``arr`` spanned
def set_units(self, unit): self._units = validate_type(unit, type(None), *six.string_types)
Set the unit for this data point Unit, as with data_type, are actually associated with the stream and not the individual data point. As such, changing this within a stream is not encouraged. Setting the unit on the data point is useful when the stream might be created with the write of a data point.
def _load_sequences_to_strain(self, strain_id, force_rerun=False): gp_seqs_path = op.join(self.model_dir, '{}_gp_withseqs.pckl'.format(strain_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=gp_seqs_path): gp_noseqs = ssbio.io.load_pickle(self.strain_infodict[strain_id]['gp_noseqs_path']) strain_sequences = SeqIO.index(self.strain_infodict[strain_id]['genome_path'], 'fasta') for strain_gene in gp_noseqs.functional_genes: strain_gene_key = self.df_orthology_matrix.at[strain_gene.id, strain_id] new_id = '{}_{}'.format(strain_gene.id, strain_id) if strain_gene.protein.sequences.has_id(new_id): continue strain_gene.protein.load_manual_sequence(seq=strain_sequences[strain_gene_key], ident=new_id, set_as_representative=True) gp_noseqs.save_pickle(outfile=gp_seqs_path) return strain_id, gp_seqs_path
Load strain GEMPRO with functional genes defined, load sequences to it, save as new GEMPRO
def _extract_local_mean_gauss(image, mask = slice(None), sigma = 1, voxelspacing = None): if voxelspacing is None: voxelspacing = [1.] * image.ndim sigma = _create_structure_array(sigma, voxelspacing) return _extract_intensities(gaussian_filter(image, sigma), mask)
Internal, single-image version of `local_mean_gauss`.
def _continuous_colormap(hue, cmap, vmin, vmax): mn = min(hue) if vmin is None else vmin mx = max(hue) if vmax is None else vmax norm = mpl.colors.Normalize(vmin=mn, vmax=mx) return mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
Creates a continuous colormap. Parameters ---------- hue : iterable The data column whose entries are being discretely colorized. Note that although top-level plotter ``hue`` parameters ingest many argument signatures, not just iterables, they are all preprocessed to standardized iterables before this method is called. cmap : ``matplotlib.cm`` instance The `matplotlib` colormap instance which will be used to colorize the geometries. vmin : float A strict floor on the value associated with the "bottom" of the colormap spectrum. Data column entries whose value is below this level will all be colored by the same threshold value. The value for this variable is meant to be inherited from the top-level variable of the same name. vmax : float A strict ceiling on the value associated with the "top" of the colormap spectrum. Data column entries whose value is above this level will all be colored by the same threshold value. The value for this variable is meant to be inherited from the top-level variable of the same name. Returns ------- cmap : ``mpl.cm.ScalarMappable`` instance A normalized scalar version of the input ``cmap`` which has been fitted to the data and inputs.
def add(self, properties): resource = self.resource_class(self, properties) self._resources[resource.oid] = resource self._hmc.all_resources[resource.uri] = resource return resource
Add a faked resource to this manager. For URI-based lookup, the resource is also added to the faked HMC. Parameters: properties (dict): Resource properties. If the URI property (e.g. 'object-uri') or the object ID property (e.g. 'object-id') are not specified, they will be auto-generated. Returns: FakedBaseResource: The faked resource object.
def get_freesurfer_cmap(vis_type): if vis_type in ('cortical_volumetric', 'cortical_contour'): LUT = get_freesurfer_cortical_LUT() cmap = ListedColormap(LUT) elif vis_type in ('labels_volumetric', 'labels_contour'): black = np.array([0, 0, 0, 1]) cmap = plt.get_cmap('hsv') cmap = cmap(np.linspace(0, 1, 20)) colors = np.vstack((black, cmap)) cmap = ListedColormap(colors, 'my_colormap') else: raise NotImplementedError('color map for the visualization type {} has not been implemented!'.format(vis_type)) return cmap
Provides different colormaps for different visualization types.
def _get_missing_trees(self, path, root_tree): dirpath = posixpath.split(path)[0] dirs = dirpath.split('/') if not dirs or dirs == ['']: return [] def get_tree_for_dir(tree, dirname): for name, mode, id in tree.iteritems(): if name == dirname: obj = self.repository._repo[id] if isinstance(obj, objects.Tree): return obj else: raise RepositoryError("Cannot create directory %s " "at tree %s as path is occupied and is not a " "Tree" % (dirname, tree)) return None trees = [] parent = root_tree for dirname in dirs: tree = get_tree_for_dir(parent, dirname) if tree is None: tree = objects.Tree() dirmode = 040000 parent.add(dirmode, dirname, tree.id) parent = tree trees.append(tree) return trees
Creates missing ``Tree`` objects for the given path. :param path: path given as a string. It may be a path to a file node (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must end with slash (i.e. ``foo/bar/``). :param root_tree: ``dulwich.objects.Tree`` object from which we start traversing (should be commit's root tree)
def get_component(self, component=None, **kwargs): if component is not None: kwargs['component'] = component kwargs['context'] = 'component' return self.filter(**kwargs)
Filter in the 'component' context :parameter str component: name of the component (optional) :parameter **kwargs: any other tags to do the filter (except component or context) :return: :class:`phoebe.parameters.parameters.ParameterSet`
def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None): tags = [] client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) result = client.get_all_tags(filters={"resource-id": instance_id}) if result: for tag in result: tags.append({tag.name: tag.value}) else: log.info("No tags found for instance_id %s", instance_id) return tags
Given an instance_id, return a list of tags associated with that instance. returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt myminion boto_ec2.get_tags instance_id
def post_video(self, videoUrl, name=None, ingestMedia=True): if name is None: name = os.path.basename(videoUrl) url = '/videos' data = {'name': name} new_video = self._make_request(self.CMS_Server, 'POST', url, data=data) if ingestMedia: self.ingest_video(new_video['id'], videoUrl) return new_video
Post and optionally ingest media from the specified URL
def AddProcessingOptions(self, argument_group): argument_helper_names = ['temporary_directory', 'zeromq'] if self._CanEnforceProcessMemoryLimit(): argument_helper_names.append('process_resources') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=argument_helper_names) argument_group.add_argument( '--worker-memory-limit', '--worker_memory_limit', dest='worker_memory_limit', action='store', type=int, metavar='SIZE', help=( 'Maximum amount of memory (data segment and shared memory) ' 'a worker process is allowed to consume in bytes, where 0 ' 'represents no limit. The default limit is 2147483648 (2 GiB). ' 'If a worker process exceeds this limit is is killed by the main ' '(foreman) process.'))
Adds processing options to the argument group Args: argument_group (argparse._ArgumentGroup): argparse argument group.
def lock(self, seconds=5): self._current_application().lock(robot.utils.timestr_to_secs(seconds))
Lock the device for a certain period of time. iOS only.
def assert_raises_regex(exception, regex, msg_fmt="{msg}"): def test(exc): compiled = re.compile(regex) if not exc.args: msg = "{} without message".format(exception.__name__) fail( msg_fmt.format( msg=msg, text=None, pattern=compiled.pattern, exc_type=exception, exc_name=exception.__name__, ) ) text = exc.args[0] if not compiled.search(text): msg = "{!r} does not match {!r}".format(text, compiled.pattern) fail( msg_fmt.format( msg=msg, text=text, pattern=compiled.pattern, exc_type=exception, exc_name=exception.__name__, ) ) context = AssertRaisesRegexContext(exception, regex, msg_fmt) context.add_test(test) return context
Fail unless an exception with a message that matches a regular expression is raised within the context. The regular expression can be a regular expression string or object. >>> with assert_raises_regex(ValueError, r"\\d+"): ... raise ValueError("Error #42") ... >>> with assert_raises_regex(ValueError, r"\\d+"): ... raise ValueError("Generic Error") ... Traceback (most recent call last): ... AssertionError: 'Generic Error' does not match '\\\\d+' The following msg_fmt arguments are supported: * msg - the default error message * exc_type - exception type that is expected * exc_name - expected exception type name * text - actual error text * pattern - expected error message as regular expression string
def select_projects(self, *args): new_query = copy.deepcopy(self) new_query._filter.projects = args return new_query
Copy the query and add filtering by monitored projects. This is only useful if the target project represents a Stackdriver account containing the specified monitored projects. Examples:: query = query.select_projects('project-1') query = query.select_projects('project-1', 'project-2') :type args: tuple :param args: Project IDs limiting the resources to be included in the query. :rtype: :class:`Query` :returns: The new query object.
def _onInstanceAttribute(self, name, line, pos, absPosition, level): attributes = self.objectsStack[level - 1].instanceAttributes for item in attributes: if item.name == name: return attributes.append(InstanceAttribute(name, line, pos, absPosition))
Memorizes a class instance attribute
def add_install_button(self, grid_lang, row, column): btn = self.button_with_label('<b>Install more...</b>') if row == 0 and column == 0: grid_lang.add(btn) else: grid_lang.attach(btn, column, row, 1, 1) btn.connect("clicked", self.parent.install_btn_clicked) return btn
Add button that opens the window for installing more assistants
def deep_merge(dict_one, dict_two): merged = dict_one.copy() for key, value in dict_two.items(): if (key in dict_one and isinstance(dict_one[key], dict) and isinstance(value, dict)): merged[key] = deep_merge(dict_one[key], value) elif (key in dict_one and isinstance(dict_one[key], list) and isinstance(value, list)): merged[key] = list(set(dict_one[key] + value)) else: merged[key] = value return merged
Deep merge two dicts.
def handle_packet(self, packet): if self.packet_callback: self.packet_callback(packet) else: print('packet', packet)
Process incoming packet dict and optionally call callback.
def _load_managed_entries(self): for process_name, process_entry in context.process_context.items(): if isinstance(process_entry, ManagedProcessEntry): function = self.fire_managed_worker else: self.logger.warning('Skipping non-managed context entry {0} of type {1}.' .format(process_name, process_entry.__class__.__name__)) continue try: self._register_process_entry(process_entry, function) except Exception: self.logger.error('Managed Thread Handler {0} failed to start. Skipping it.' .format(process_entry.key), exc_info=True)
loads scheduler managed entries. no start-up procedures are performed
def comments(self, article): return self._query_zendesk(self.endpoint.comments, object_type='comment', id=article)
Retrieve comments for an article :param article: Article ID or object
def is_slice_or_dim_range_request(key, depth=0): return (is_slice_or_dim_range(key) or (depth == 0 and non_str_len_no_throw(key) > 0 and all(is_slice_or_dim_range_request(subkey, depth+1) for subkey in key)))
Checks if a particular key is a slice, DimensionRange or list of those types
def _mount(self): if is_osx(): if self.connection["jss"].verbose: print self.connection["mount_url"] if mount_share: self.connection["mount_point"] = mount_share( self.connection["mount_url"]) else: args = ["mount", "-t", self.protocol, self.connection["mount_url"], self.connection["mount_point"]] if self.connection["jss"].verbose: print " ".join(args) subprocess.check_call(args) elif is_linux(): args = ["mount_afp", "-t", self.protocol, self.connection["mount_url"], self.connection["mount_point"]] if self.connection["jss"].verbose: print " ".join(args) subprocess.check_call(args) else: raise JSSError("Unsupported OS.")
Mount based on which OS is running.
def untranslateName(s): s = s.replace('DOT', '.') s = s.replace('DOLLAR', '$') if s[:2] == 'PY': s = s[2:] s = s.replace('.PY', '.') return s
Undo Python conversion of CL parameter or variable name.
def write_json(data, path, file_name): if os.path.exists(path) and not os.path.isdir(path): return elif not os.path.exists(path): mkdir_p(path) with open(os.path.join(path, file_name), 'w') as f: json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True)
Write out data to a json file. Args: data: A dictionary representation of the data to write out path: The directory to output the file in file_name: The name of the file to write out
def panels(self): ax1 = self.fig.add_subplot(211) ax2 = self.fig.add_subplot(212, sharex=ax1) return (ax2, self.gene_panel), (ax1, self.signal_panel)
Add 2 panels to the figure, top for signal and bottom for gene models
def update_extent_location(self, extent_loc): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized') self.extent_location = extent_loc
A method to update the extent location for this Path Table Record. Parameters: extent_loc - The new extent location. Returns: Nothing.
def ext_pillar(minion_id, pillar, *args, **kwargs): for i in args: if 'path' not in i: path = '/srv/saltclass' args[i]['path'] = path log.warning('path variable unset, using default: %s', path) else: path = i['path'] salt_data = { '__opts__': __opts__, '__salt__': __salt__, '__grains__': __grains__, '__pillar__': pillar, 'minion_id': minion_id, 'path': path } return sc.get_pillars(minion_id, salt_data)
Compile pillar data
def topological_order(self): q = Queue() in_degree = {} for i in range(self.n_nodes): in_degree[i] = 0 for u in range(self.n_nodes): for v, _ in self.adj_list[u]: in_degree[v] += 1 for i in range(self.n_nodes): if in_degree[i] == 0: q.put(i) order_list = [] while not q.empty(): u = q.get() order_list.append(u) for v, _ in self.adj_list[u]: in_degree[v] -= 1 if in_degree[v] == 0: q.put(v) return order_list
Return the topological order of the node IDs from the input node to the output node.
def map_components(notsplit_packages, components): packages = set() for c in components: if c in notsplit_packages: packages.add('ceph') else: packages.add(c) return list(packages)
Returns a list of packages to install based on component names This is done by checking if a component is in notsplit_packages, if it is, we know we need to install 'ceph' instead of the raw component name. Essentially, this component hasn't been 'split' from the master 'ceph' package yet.
def _blas_is_applicable(*args): if any(x.dtype != args[0].dtype for x in args[1:]): return False elif any(x.dtype not in _BLAS_DTYPES for x in args): return False elif not (all(x.flags.f_contiguous for x in args) or all(x.flags.c_contiguous for x in args)): return False elif any(x.size > np.iinfo('int32').max for x in args): return False else: return True
Whether BLAS routines can be applied or not. BLAS routines are available for single and double precision float or complex data only. If the arrays are non-contiguous, BLAS methods are usually slower, and array-writing routines do not work at all. Hence, only contiguous arrays are allowed. Parameters ---------- x1,...,xN : `NumpyTensor` The tensors to be tested for BLAS conformity. Returns ------- blas_is_applicable : bool ``True`` if all mentioned requirements are met, ``False`` otherwise.
def rename(self, oldpath, newpath): oldpath = self._adjust_cwd(oldpath) newpath = self._adjust_cwd(newpath) self._log(DEBUG, "rename({!r}, {!r})".format(oldpath, newpath)) self._request(CMD_RENAME, oldpath, newpath)
Rename a file or folder from ``oldpath`` to ``newpath``. .. note:: This method implements 'standard' SFTP ``RENAME`` behavior; those seeking the OpenSSH "POSIX rename" extension behavior should use `posix_rename`. :param str oldpath: existing name of the file or folder :param str newpath: new name for the file or folder, must not exist already :raises: ``IOError`` -- if ``newpath`` is a folder, or something else goes wrong
def values(self): lower = float(self.lowerSpnbx.value()) upper = float(self.upperSpnbx.value()) return (lower, upper)
Gets the user enter max and min values of where the raster points should appear on the y-axis :returns: (float, float) -- (min, max) y-values to bound the raster plot by
def is_clustered(self): self.open() clust = lvm_vg_is_clustered(self.handle) self.close() return bool(clust)
Returns True if the VG is clustered, False otherwise.
def exists(project, credentials): user, oauth_access_token = parsecredentials(credentials) printdebug("Checking if project " + project + " exists for " + user) return os.path.isdir(Project.path(project, user))
Check if the project exists
def deferral(): deferred = [] defer = lambda f, *a, **k: deferred.append((f, a, k)) try: yield defer finally: while deferred: f, a, k = deferred.pop() f(*a, **k)
Defers a function call when it is being required like Go. :: with deferral() as defer: sys.setprofile(f) defer(sys.setprofile, None) # do something.
def update_stream(self, data): self._group['stream_id'] = data['stream_id'] self.callback() _LOGGER.info('updated stream to %s on %s', self.stream, self.friendly_name)
Update stream.
def resolve_code_path(cwd, codeuri): LOG.debug("Resolving code path. Cwd=%s, CodeUri=%s", cwd, codeuri) if not cwd or cwd == PRESENT_DIR: cwd = os.getcwd() cwd = os.path.abspath(cwd) if not os.path.isabs(codeuri): codeuri = os.path.normpath(os.path.join(cwd, codeuri)) return codeuri
Returns path to the function code resolved based on current working directory. Parameters ---------- cwd str Current working directory codeuri CodeURI of the function. This should contain the path to the function code Returns ------- str Absolute path to the function code
def validate_IRkernel(venv_dir): r_exe_name = find_exe(venv_dir, "R") if r_exe_name is None: return [], None, None import subprocess ressources_dir = None try: print_resources = 'cat(as.character(system.file("kernelspec", package = "IRkernel")))' resources_dir_bytes = subprocess.check_output([r_exe_name, '--slave', '-e', print_resources]) resources_dir = resources_dir_bytes.decode(errors='ignore') except: return [], None, None argv = [r_exe_name, "--slave", "-e", "IRkernel::main()", "--args", "{connection_file}"] if not os.path.exists(resources_dir.strip()): resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logos", "r") return argv, "r", resources_dir
Validates that this env contains an IRkernel kernel and returns info to start it Returns: tuple (ARGV, language, resource_dir)
async def destroy_unit(self, *unit_names): connection = self.connection() app_facade = client.ApplicationFacade.from_connection(connection) log.debug( 'Destroying unit%s %s', 's' if len(unit_names) == 1 else '', ' '.join(unit_names)) return await app_facade.DestroyUnits(list(unit_names))
Destroy units by name.
def qteStopRecordingHook(self, msgObj): if self.qteRecording: self.qteRecording = False self.qteMain.qteStatus('Macro recording stopped') self.qteMain.qtesigKeyparsed.disconnect(self.qteKeyPress) self.qteMain.qtesigAbort.disconnect(self.qteStopRecordingHook)
Stop macro recording. The signals from the event handler are disconnected and the event handler policy set to default.
def _task_batcher(tasks, batch_size=None): from itertools import izip_longest if not batch_size: batch_size = DEFAULT_TASK_BATCH_SIZE batch_size = min(batch_size, 100) args = [iter(tasks)] * batch_size return ([task for task in group if task] for group in izip_longest(*args))
Batches large task lists into groups of 100 so that they can all be inserted.
def create(self, body=None, raise_exc=True, headers=None, **kwargs): return self._request(POST, body, raise_exc, headers, **kwargs)
Performs an HTTP POST to the server, to create a subordinate resource. Returns a new HALNavigator representing that resource. `body` may either be a string or a dictionary representing json `headers` are additional headers to send in the request
def paginate(data: typing.Iterable, page: int = 0, limit: int = 10) -> typing.Iterable: return data[page * limit:page * limit + limit]
Slice data over pages :param data: any iterable object :type data: :obj:`typing.Iterable` :param page: number of page :type page: :obj:`int` :param limit: items per page :type limit: :obj:`int` :return: sliced object :rtype: :obj:`typing.Iterable`
async def add_relation(self, local_relation, remote_relation): if ':' not in local_relation: local_relation = '{}:{}'.format(self.name, local_relation) return await self.model.add_relation(local_relation, remote_relation)
Add a relation to another application. :param str local_relation: Name of relation on this application :param str remote_relation: Name of relation on the other application in the form '<application>[:<relation_name>]'
def queryFilter(self, function=None): if function is not None: self.__query_filter = function return function def wrapper(func): self.__query_filter = func return func return wrapper
Defines a decorator that can be used to filter queries. It will assume the function being associated with the decorator will take a query as an input and return a modified query to use. :usage class MyModel(orb.Model): objects = orb.ReverseLookup('Object') @classmethod @objects.queryFilter() def objectsFilter(cls, query, **context): return orb.Query() :param function: <callable> :return: <wrapper>
def delete(self): if lib.EnvDeleteInstance(self._env, self._ist) != 1: raise CLIPSError(self._env)
Delete the instance.
def getDiscountAmount(self): has_client_discount = self.aq_parent.getMemberDiscountApplies() if has_client_discount: discount = Decimal(self.getDefaultMemberDiscount()) return Decimal(self.getSubtotal() * discount / 100) else: return 0
It computes and returns the analysis service's discount amount without VAT
def _getEventFromUid(self, request, uid): event = getEventFromUid(request, uid) if event.get_ancestors().filter(id=self.id).exists(): return event
Try and find a child event with the given UID.
def add_record_set(self, record_set): if not isinstance(record_set, ResourceRecordSet): raise ValueError("Pass a ResourceRecordSet") self._additions += (record_set,)
Append a record set to the 'additions' for the change set. :type record_set: :class:`google.cloud.dns.resource_record_set.ResourceRecordSet` :param record_set: the record set to append. :raises: ``ValueError`` if ``record_set`` is not of the required type.
def setImageMode(self): if self._version_server == 3.889: self.setPixelFormat( bpp = 16, depth = 16, bigendian = 0, truecolor = 1, redmax = 31, greenmax = 63, bluemax = 31, redshift = 11, greenshift = 5, blueshift = 0 ) self.image_mode = "BGR;16" elif (self.truecolor and (not self.bigendian) and self.depth == 24 and self.redmax == 255 and self.greenmax == 255 and self.bluemax == 255): pixel = ["X"] * self.bypp offsets = [offset // 8 for offset in (self.redshift, self.greenshift, self.blueshift)] for offset, color in zip(offsets, "RGB"): pixel[offset] = color self.image_mode = "".join(pixel) else: self.setPixelFormat()
Extracts color ordering and 24 vs. 32 bpp info out of the pixel format information
def load_rule_definitions(self, ruleset_generator = False, rule_dirs = []): self.rule_definitions = {} for rule_filename in self.rules: for rule in self.rules[rule_filename]: if not rule.enabled and not ruleset_generator: continue self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename, rule_dirs = rule_dirs) if ruleset_generator: rule_dirs.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/findings')) rule_filenames = [] for rule_dir in rule_dirs: rule_filenames += [f for f in os.listdir(rule_dir) if os.path.isfile(os.path.join(rule_dir, f))] for rule_filename in rule_filenames: if rule_filename not in self.rule_definitions: self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename)
Load definition of rules declared in the ruleset :param services: :param ip_ranges: :param aws_account_id: :param generator: :return:
def _fetch(self, params, required, defaults): defaults.update(params) pp_params = self._check_and_update_params(required, defaults) pp_string = self.signature + urlencode(pp_params) response = self._request(pp_string) response_params = self._parse_response(response) log.debug('PayPal Request:\n%s\n', pprint.pformat(defaults)) log.debug('PayPal Response:\n%s\n', pprint.pformat(response_params)) nvp_params = {} tmpd = defaults.copy() tmpd.update(response_params) for k, v in tmpd.items(): if k in self.NVP_FIELDS: nvp_params[str(k)] = v if 'timestamp' in nvp_params: nvp_params['timestamp'] = paypaltime2datetime(nvp_params['timestamp']) nvp_obj = PayPalNVP(**nvp_params) nvp_obj.init(self.request, params, response_params) nvp_obj.save() return nvp_obj
Make the NVP request and store the response.
def aead_filename(aead_dir, key_handle, public_id): parts = [aead_dir, key_handle] + pyhsm.util.group(public_id, 2) path = os.path.join(*parts) if not os.path.isdir(path): os.makedirs(path) return os.path.join(path, public_id)
Return the filename of the AEAD for this public_id, and create any missing directorys.
def start(self, build_requests=None, callback=None): if callback: self.callback = callback if build_requests: self.build_requests = build_requests self.sw = threading.Thread(target=self.run) self.sw.start()
Run the client using a background thread.
def get_data(model, instance_id, kind=''): instance = get_instance(model, instance_id) if not instance: return return ins2dict(instance, kind)
Get instance data by id. :param model: a string, model name in rio.models :param id: an integer, instance id. :param kind: a string specified which kind of dict tranformer should be called. :return: data.
def tableexists(tablename): result = True try: t = table(tablename, ack=False) except: result = False return result
Test if a table exists.
def sendRequest(self, name, args): (respEvt, id) = self.newResponseEvent() self.sendMessage({"id":id, "method":name, "params": args}) return respEvt
sends a request to the peer
def load(self, name, location='local'): path = self._get_path(name, location, file_ext='.json') if op.exists(path): return _load_json(path) path = self._get_path(name, location, file_ext='.pkl') if op.exists(path): return _load_pickle(path) logger.debug("The file `%s` doesn't exist.", path) return {}
Load saved data from the cache directory.
def load_configuration(app_name): if sys.prefix == '/usr': conf_dir = '/etc' share_dir = '/usr/share' else: conf_dir = os.path.join(sys.prefix, 'etc') share_dir = os.path.join(sys.prefix, 'share') yml_config = {} for fname in [ '%s.yml'%(app_name,), os.path.expanduser('~/.%s.yml'%(app_name,)), os.path.join(conf_dir, '%s.yml'%(app_name,))]: if os.path.exists(fname): yml_config = yaml.load(open(fname)) break try: data_dir = yml_config['paths']['data_dir'] except KeyError: try: data_dir = os.environ[app_name.upper()] except KeyError: data_dir = os.path.join(share_dir, app_name) return AppContext(yml_config, data_dir)
creates a new configuration and loads the appropriate files.
def Throughput(self): try: throughput = spectrum.TabularSpectralElement() product = self._multiplyThroughputs(0) throughput._wavetable = product.GetWaveSet() throughput._throughputtable = product(throughput._wavetable) throughput.waveunits = product.waveunits throughput.name='*'.join([str(x) for x in self.components]) return throughput except IndexError: return None
Combined throughput from multiplying all the components together. Returns ------- throughput : `~pysynphot.spectrum.TabularSpectralElement` or `None` Combined throughput.
def handle_abort(self, reason): self._welcome_queue.put(reason) self.close() self.disconnect()
We're out?