code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_ranges_from_array(arr, append_last=True): right = arr[1:] if append_last: left = arr[:] right = np.append(right, None) else: left = arr[:-1] return np.column_stack((left, right))
Takes an array and calculates ranges [start, stop[. The last range end is none to keep the same length. Parameters ---------- arr : array like append_last: bool If True, append item with a pair of last array item and None. Returns ------- numpy.array The array formed by pairs of values by the given array. Example ------- >>> a = np.array((1,2,3,4)) >>> get_ranges_from_array(a, append_last=True) array([[1, 2], [2, 3], [3, 4], [4, None]]) >>> get_ranges_from_array(a, append_last=False) array([[1, 2], [2, 3], [3, 4]])
def predict(self, X): if np.any(X < self.grid[:, 0]) or np.any(X > self.grid[:, -1]): raise ValueError('data out of min/max bounds') binassign = np.zeros((self.n_features, len(X)), dtype=int) for i in range(self.n_features): binassign[i] = np.digitize(X[:, i], self.grid[i]) - 1 labels = np.dot(self.n_bins_per_feature ** np.arange(self.n_features), binassign) assert np.max(labels) < self.n_bins return labels
Get the index of the grid cell containing each sample in X Parameters ---------- X : array-like, shape = [n_samples, n_features] New data Returns ------- y : array, shape = [n_samples,] Index of the grid cell containing each sample
def series2df(Series, layer=2, split_sign = '_'): try: Series.columns Series = Series.iloc[:,0] except: pass def _helper(x, layer=2): try: return flatten_dict(ast.literal_eval(x), layers=layer, split_sign=split_sign) except: try: return flatten_dict(x, layers=layer, split_sign=split_sign) except: return x df=pd.DataFrame(Series.apply(_helper).tolist()) return df
expect pass a series that each row is string formated Json data with the same structure
def get_times(self): if not self.n: return [] self.times = list(mul(self.u1, self.t1)) + \ list(mul(self.u2, self.t2)) + \ list(mul(self.u3, self.t3)) + \ list(mul(self.u4, self.t4)) self.times = matrix(list(set(self.times))) self.times = list(self.times) + list(self.times - 1e-6) return self.times
Return all the action times and times-1e-6 in a list
def pairwise(iterable): iterable = iter(iterable) left = next(iterable) for right in iterable: yield left, right left = right
Pair each element with its neighbors. Arguments --------- iterable : iterable Returns ------- The generator produces a tuple containing a pairing of each element with its neighbor.
def verify_dependencies(self): for i in range(1, len(self.deps)): assert(not (isinstance(self.deps[i], Transformation) and isinstance(self.deps[i - 1], Analysis)) ), "invalid dep order for %s" % self
Checks no analysis are called before a transformation, as the transformation could invalidate the analysis.
def route(obj, rule, *args, **kwargs): def decorator(cls): endpoint = kwargs.get('endpoint', camel_to_snake(cls.__name__)) kwargs['view_func'] = cls.as_view(endpoint) obj.add_url_rule(rule, *args, **kwargs) return cls return decorator
Decorator for the View classes.
def print_plugins(folders, exit_code=0): modules = plugins.get_plugin_modules(folders) pluginclasses = sorted(plugins.get_plugin_classes(modules), key=lambda x: x.__name__) for pluginclass in pluginclasses: print(pluginclass.__name__) doc = strformat.wrap(pluginclass.__doc__, 80) print(strformat.indent(doc)) print() sys.exit(exit_code)
Print available plugins and exit.
def marginalization_bins(self): log_mean = np.log10(self.mean()) return np.logspace(-1. + log_mean, 1. + log_mean, 1001)/self._j_ref
Binning to use to do the marginalization integrals
def GetMessages(self, files): result = {} for file_name in files: file_desc = self.pool.FindFileByName(file_name) for desc in file_desc.message_types_by_name.values(): result[desc.full_name] = self.GetPrototype(desc) for extension in file_desc.extensions_by_name.values(): if extension.containing_type.full_name not in self._classes: self.GetPrototype(extension.containing_type) extended_class = self._classes[extension.containing_type.full_name] extended_class.RegisterExtension(extension) return result
Gets all the messages from a specified file. This will find and resolve dependencies, failing if the descriptor pool cannot satisfy them. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. This will include any dependent messages as well as any messages defined in the same file as a specified message.
def clone_wire(old_wire, name=None): if isinstance(old_wire, Const): return Const(old_wire.val, old_wire.bitwidth) else: if name is None: return old_wire.__class__(old_wire.bitwidth, name=old_wire.name) return old_wire.__class__(old_wire.bitwidth, name=name)
Makes a copy of any existing wire :param old_wire: The wire to clone :param name: a name fo rhte new wire Note that this function is mainly intended to be used when the two wires are from different blocks. Making two wires with the same name in the same block is not allowed
def sequence_to_string( a_list, open_bracket_char='[', close_bracket_char=']', delimiter=", " ): return "%s%s%s" % ( open_bracket_char, delimiter.join( local_to_str(x) for x in a_list ), close_bracket_char )
a dedicated function that turns a list into a comma delimited string of items converted. This method will flatten nested lists.
def check_dist_restriction(options, check_target=False): dist_restriction_set = any([ options.python_version, options.platform, options.abi, options.implementation, ]) binary_only = FormatControl(set(), {':all:'}) sdist_dependencies_allowed = ( options.format_control != binary_only and not options.ignore_dependencies ) if dist_restriction_set and sdist_dependencies_allowed: raise CommandError( "When restricting platform and interpreter constraints using " "--python-version, --platform, --abi, or --implementation, " "either --no-deps must be set, or --only-binary=:all: must be " "set and --no-binary must not be set (or must be set to " ":none:)." ) if check_target: if dist_restriction_set and not options.target_dir: raise CommandError( "Can not use any platform or abi specific options unless " "installing via '--target'" )
Function for determining if custom platform options are allowed. :param options: The OptionParser options. :param check_target: Whether or not to check if --target is being used.
def _GetDirectory(self): if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY: return None return LVMDirectory(self._file_system, self.path_spec)
Retrieves the directory. Returns: LVMDirectory: a directory or None if not available.
def get(self, name: Text, final: C) -> C: return Caller(self, name, final)
Get the function to call which will run all middlewares. :param name: Name of the function to be called :param final: Function to call at the bottom of the stack (that's the one provided by the implementer). :return:
def refractory(times, refract=0.002): times_refract = [] times_refract.append(times[0]) for i in range(1,len(times)): if times_refract[-1]+refract <= times[i]: times_refract.append(times[i]) return times_refract
Removes spikes in times list that do not satisfy refractor period :param times: list(float) of spike times in seconds :type times: list(float) :param refract: Refractory period in seconds :type refract: float :returns: list(float) of spike times in seconds For every interspike interval < refract, removes the second spike time in list and returns the result
def _get_resource_hash(zone_name, record): record_data = defaultdict(int, record) if type(record_data['GeoLocation']) == dict: record_data['GeoLocation'] = ":".join(["{}={}".format(k, v) for k, v in record_data['GeoLocation'].items()]) args = [ zone_name, record_data['Name'], record_data['Type'], record_data['Weight'], record_data['Region'], record_data['GeoLocation'], record_data['Failover'], record_data['HealthCheckId'], record_data['TrafficPolicyInstanceId'] ] return get_resource_id('r53r', args)
Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique resource IDs Args: zone_name (`str`): The name of the DNS Zone the record belongs to record (`dict`): A record dict to generate the hash from Returns: `str`
def open(self, mode): if self.hdf5 == (): kw = dict(mode=mode, libver='latest') if mode == 'r': kw['swmr'] = True try: self.hdf5 = hdf5.File(self.filename, **kw) except OSError as exc: raise OSError('%s in %s' % (exc, self.filename))
Open the underlying .hdf5 file and the parent, if any
def filter_using_summary(fq, args): data = {entry[0]: entry[1] for entry in process_summary( summaryfile=args.summary, threads="NA", readtype=args.readtype, barcoded=False)[ ["readIDs", "quals"]].itertuples(index=False)} try: for record in SeqIO.parse(fq, "fastq"): if data[record.id] > args.quality \ and args.length <= len(record) <= args.maxlength: print(record[args.headcrop:args.tailcrop].format("fastq"), end="") except KeyError: logging.error("mismatch between summary and fastq: \ {} was not found in the summary file.".format(record.id)) sys.exit('\nERROR: mismatch between sequencing_summary and fastq file: \ {} was not found in the summary file.\nQuitting.'.format(record.id))
Use quality scores from albacore summary file for filtering Use the summary file from albacore for more accurate quality estimate Get the dataframe from nanoget, convert to dictionary
def rollback(self) -> None: if len(self._transactions) == 0: raise RuntimeError("rollback called outside transaction") _debug("rollback:", self._transactions[-1]) try: for on_rollback in self._transactions[-1]: _debug("--> rolling back", on_rollback) self._do_with_retry(on_rollback) except: _debug("--> rollback failed") exc_class, exc, tb = sys.exc_info() raise tldap.exceptions.RollbackError( "FATAL Unrecoverable rollback error: %r" % exc) finally: _debug("--> rollback success") self.reset()
Roll back to previous database state. However stay inside transaction management.
def make_sh_output(value, output_script, witness=False): return _make_output( value=utils.i2le_padded(value, 8), output_script=make_sh_output_script(output_script, witness))
int, str -> TxOut
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False): normalize_in_place(a_pyxb, ignore_timestamps) normalize_in_place(b_pyxb, ignore_timestamps) a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb) b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb) are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml) if not are_equivalent: logger.debug('XML documents not equivalent:') logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml)) return are_equivalent
Determine if SystemMetadata PyXB objects are semantically equivalent. Normalize then compare SystemMetadata PyXB objects for equivalency. Args: a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata PyXB objects are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
def addJunctionPos(shape, fromPos, toPos): result = list(shape) if fromPos != shape[0]: result = [fromPos] + result if toPos != shape[-1]: result.append(toPos) return result
Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality
def Async(f, n=None, timeout=None): return threads(n=n, timeout=timeout)(f)
Concise usage for pool.submit. Basic Usage Asnyc & threads :: from torequests.main import Async, threads import time def use_submit(i): time.sleep(i) result = 'use_submit: %s' % i print(result) return result @threads() def use_decorator(i): time.sleep(i) result = 'use_decorator: %s' % i print(result) return result new_use_submit = Async(use_submit) tasks = [new_use_submit(i) for i in (2, 1, 0) ] + [use_decorator(i) for i in (2, 1, 0)] print([type(i) for i in tasks]) results = [i.x for i in tasks] print(results) # use_submit: 0 # use_decorator: 0 # [<class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>] # use_submit: 1 # use_decorator: 1 # use_submit: 2 # use_decorator: 2 # ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
def service_create(name, service_type, description=None, profile=None, **connection_args): kstone = auth(profile, **connection_args) service = kstone.services.create(name, service_type, description=description) return service_get(service.id, profile=profile, **connection_args)
Add service to Keystone service catalog CLI Examples: .. code-block:: bash salt '*' keystone.service_create nova compute \ 'OpenStack Compute Service'
def read_losc_hdf5_state(f, path='quality/simple', start=None, end=None, copy=False): dataset = io_hdf5.find_dataset(f, '%s/DQmask' % path) maskset = io_hdf5.find_dataset(f, '%s/DQDescriptions' % path) nddata = dataset[()] bits = [bytes.decode(bytes(b), 'utf-8') for b in maskset[()]] epoch = dataset.attrs['Xstart'] try: dt = dataset.attrs['Xspacing'] except KeyError: dt = Quantity(1, 's') else: xunit = parse_unit(dataset.attrs['Xunits']) dt = Quantity(dt, xunit) return StateVector(nddata, bits=bits, t0=epoch, name='Data quality', dx=dt, copy=copy).crop(start=start, end=end)
Read a `StateVector` from a LOSC-format HDF file. Parameters ---------- f : `str`, `h5py.HLObject` path of HDF5 file, or open `H5File` path : `str` path of HDF5 dataset to read. start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional start GPS time of desired data end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional end GPS time of desired data copy : `bool`, default: `False` create a fresh-memory copy of the underlying array Returns ------- data : `~gwpy.timeseries.TimeSeries` a new `TimeSeries` containing the data read from disk
def nvrtcGetProgramLog(self, prog): size = c_size_t() code = self._lib.nvrtcGetProgramLogSize(prog, byref(size)) self._throw_on_error(code) buf = create_string_buffer(size.value) code = self._lib.nvrtcGetProgramLog(prog, buf) self._throw_on_error(code) return buf.value.decode('utf-8')
Returns the log for the NVRTC program object. Only useful after calls to nvrtcCompileProgram or nvrtcVerifyProgram.
def _folder_item_method(self, analysis_brain, item): is_editable = self.is_analysis_edition_allowed(analysis_brain) method_title = analysis_brain.getMethodTitle item['Method'] = method_title or '' if is_editable: method_vocabulary = self.get_methods_vocabulary(analysis_brain) if method_vocabulary: item['Method'] = analysis_brain.getMethodUID item['choices']['Method'] = method_vocabulary item['allow_edit'].append('Method') self.show_methodinstr_columns = True elif method_title: item['replace']['Method'] = get_link(analysis_brain.getMethodURL, method_title) self.show_methodinstr_columns = True
Fills the analysis' method to the item passed in. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row
def print_graph(self, format: str = 'turtle') -> str: print(self.g.serialize(format=format).decode('utf-8'))
prints serialized formated rdflib Graph
def serve_forever(self): self.start_cmd_loop() try: while not self.done: self.handle_request() except KeyboardInterrupt: if log_file == sys.stderr: log_file.write("\n") finally: if self._clean_up_call is not None: self._clean_up_call() self.done = True
Starts the server handling commands and HTTP requests. The server will loop until done is True or a KeyboardInterrupt is received.
def resource_string(self): if self._resources_initialized: res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu, self._avail_resources.gpu) if self._avail_resources.custom_resources: custom = ", ".join( "{} {}".format( self._avail_resources.get_res_total(name), name) for name in self._avail_resources.custom_resources) res_str += " ({})".format(custom) return res_str else: return "? CPUs, ? GPUs"
Returns a string describing the total resources available.
def parse_int_arg(name, default): return default if request.args.get(name) is None \ else int(request.args.get(name))
Return a given URL parameter as int or return the default value.
def normalize(self): if self.tz is None or timezones.is_utc(self.tz): not_null = ~self.isna() DAY_NS = ccalendar.DAY_SECONDS * 1000000000 new_values = self.asi8.copy() adjustment = (new_values[not_null] % DAY_NS) new_values[not_null] = new_values[not_null] - adjustment else: new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)._from_sequence(new_values, freq='infer').tz_localize(self.tz)
Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None)
def get_raw_tag_data(filename): "Return the ID3 tag in FILENAME as a raw byte string." with open(filename, "rb") as file: try: (cls, offset, length) = stagger.tags.detect_tag(file) except stagger.NoTagError: return bytes() file.seek(offset) return file.read(length)
Return the ID3 tag in FILENAME as a raw byte string.
def update_wallet(self, wallet_name, limit): request = { 'update': { 'limit': str(limit), } } return make_request( '{}wallet/{}'.format(self.url, wallet_name), method='PATCH', body=request, timeout=self.timeout, client=self._client)
Update a wallet with a new limit. @param the name of the wallet. @param the new value of the limit. @return a success string from the plans server. @raise ServerError via make_request.
def login_required(fn): @wraps(fn) def login_wrapper(ctx, *args, **kwargs): base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com") api_kwargs = {"telemetry": ctx.obj["TELEMETRY"]} api_key_prior_login = ctx.obj.get("API_KEY") bearer_token_env = os.environ.get("ONE_CODEX_BEARER_TOKEN") api_key_env = os.environ.get("ONE_CODEX_API_KEY") api_key_creds_file = _login(base_url, silent=True) if api_key_prior_login is not None: api_kwargs["api_key"] = api_key_prior_login elif bearer_token_env is not None: api_kwargs["bearer_token"] = bearer_token_env elif api_key_env is not None: api_kwargs["api_key"] = api_key_env elif api_key_creds_file is not None: api_kwargs["api_key"] = api_key_creds_file else: click.echo( "The command you specified requires authentication. Please login first.\n", err=True ) ctx.exit() ctx.obj["API"] = Api(**api_kwargs) return fn(ctx, *args, **kwargs) return login_wrapper
Requires login before proceeding, but does not prompt the user to login. Decorator should be used only on Click CLI commands. Notes ----- Different means of authentication will be attempted in this order: 1. An API key present in the Click context object from a previous successful authentication. 2. A bearer token (ONE_CODEX_BEARER_TOKEN) in the environment. 3. An API key (ONE_CODEX_API_KEY) in the environment. 4. An API key in the credentials file (~/.onecodex).
def create(self, sid): data = values.of({'Sid': sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return ShortCodeInstance(self._version, payload, service_sid=self._solution['service_sid'], )
Create a new ShortCodeInstance :param unicode sid: The SID of a Twilio ShortCode resource :returns: Newly created ShortCodeInstance :rtype: twilio.rest.proxy.v1.service.short_code.ShortCodeInstance
def call_lights(*args, **kwargs): res = dict() lights = _get_lights() for dev_id in 'id' in kwargs and _get_devices(kwargs) or sorted(lights.keys()): if lights.get(six.text_type(dev_id)): res[dev_id] = lights[six.text_type(dev_id)] return res or False
Get info about all available lamps. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.lights salt '*' hue.lights id=1 salt '*' hue.lights id=1,2,3
def set_log_file_extension(self, logFileExtension): assert isinstance(logFileExtension, basestring), "logFileExtension must be a basestring" assert len(logFileExtension), "logFileExtension can't be empty" if logFileExtension[0] == ".": logFileExtension = logFileExtension[1:] assert len(logFileExtension), "logFileExtension is not allowed to be single dot" if logFileExtension[-1] == ".": logFileExtension = logFileExtension[:-1] assert len(logFileExtension), "logFileExtension is not allowed to be double dots" self.__logFileExtension = logFileExtension self.__set_log_file_name()
Set the log file extension. :Parameters: #. logFileExtension (string): Logging file extension. A logging file full name is set as logFileBasename.logFileExtension
def createJsbConfig(self): tempdir = mkdtemp() tempfile = join(tempdir, 'app.jsb3') cmd = ['sencha', 'create', 'jsb', '-a', self.url, '-p', tempfile] log.debug('Running: %s', ' '.join(cmd)) call(cmd) jsb3 = open(tempfile).read() rmtree(tempdir) return jsb3
Create JSB config file using ``sencha create jsb``. :return: The created jsb3 config as a string.
def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): if not mapping: raise DataError("ZADD requires at least one element/score pair") if nx and xx: raise DataError("ZADD allows either 'nx' or 'xx', not both") if incr and len(mapping) != 1: raise DataError("ZADD option 'incr' only works when passing a " "single element/score pair") pieces = [] options = {} if nx: pieces.append(Token.get_token('NX')) if xx: pieces.append(Token.get_token('XX')) if ch: pieces.append(Token.get_token('CH')) if incr: pieces.append(Token.get_token('INCR')) options['as_score'] = True for pair in iteritems(mapping): pieces.append(pair[1]) pieces.append(pair[0]) return self.execute_command('ZADD', name, *pieces, **options)
Set any number of element-name, score pairs to the key ``name``. Pairs are specified as a dict of element-names keys to score values. ``nx`` forces ZADD to only create new elements and not to update scores for elements that already exist. ``xx`` forces ZADD to only update scores of elements that already exist. New elements will not be added. ``ch`` modifies the return value to be the numbers of elements changed. Changed elements include new elements that were added and elements whose scores changed. ``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a single element/score pair can be specified and the score is the amount the existing score will be incremented by. When using this mode the return value of ZADD will be the new score of the element. The return value of ZADD varies based on the mode specified. With no options, ZADD returns the number of new elements added to the sorted set.
def _delete_device_from_device_group(self, device): device_name = get_device_info(device).name dg = pollster(self._get_device_group)(device) device_to_remove = dg.devices_s.devices.load( name=device_name, partition=self.partition ) device_to_remove.delete()
Remove device from device service cluster group. :param device: ManagementRoot object -- device to delete from group
def paint(self, iconic, painter, rect, mode, state, options): for opt in options: self._paint_icon(iconic, painter, rect, mode, state, opt)
Main paint method.
def submit_error(url, user, project, area, description, extra=None, default_message=None): LOG.debug('Creating new BugzScout instance.') client = bugzscout.BugzScout( url, user, project, area) LOG.debug('Submitting BugzScout error.') client.submit_error( description, extra=extra, default_message=default_message)
Celery task for submitting errors asynchronously. :param url: string URL for bugzscout :param user: string fogbugz user to designate when submitting via bugzscout :param project: string fogbugz project to designate for cases :param area: string fogbugz area to designate for cases :param description: string description for error :param extra: string details for error :param default_message: string default message to return in responses
def is_block(bin_list): id_set = set((my_bin[1] for my_bin in bin_list)) start_id, end_id = min(id_set), max(id_set) return id_set == set(range(start_id, end_id + 1))
Check if a bin list has exclusively consecutive bin ids.
def failed_hosts(self) -> Dict[str, "MultiResult"]: return {k: v for k, v in self.result.items() if v.failed}
Hosts that failed to complete the task
def has_throttled(self): for file, value in self.cpu_throttle_count.items(): try: new_value = int(util.read_file(file)) if new_value > value: return True except Exception as e: logging.warning('Cannot read throttling count of CPU from kernel: %s', e) return False
Check whether any of the CPU cores monitored by this instance has throttled since this instance was created. @return a boolean value
def field_match(self, node_field, pattern_field): is_good_list = (isinstance(pattern_field, list) and self.check_list(node_field, pattern_field)) is_good_node = (isinstance(pattern_field, AST) and Check(node_field, self.placeholders).visit(pattern_field)) def strict_eq(f0, f1): try: return f0 == f1 or (isnan(f0) and isnan(f1)) except TypeError: return f0 == f1 is_same = strict_eq(pattern_field, node_field) return is_good_list or is_good_node or is_same
Check if two fields match. Field match if: - If it is a list, all values have to match. - If if is a node, recursively check it. - Otherwise, check values are equal.
def _initialize_monitoring_services_queue(self, chain_state: ChainState): msg = ( 'Transport was started before the monitoring service queue was updated. ' 'This can lead to safety issue. node:{self!r}' ) assert not self.transport, msg msg = ( 'The node state was not yet recovered, cant read balance proofs. node:{self!r}' ) assert self.wal, msg current_balance_proofs = views.detect_balance_proof_change( old_state=ChainState( pseudo_random_generator=chain_state.pseudo_random_generator, block_number=GENESIS_BLOCK_NUMBER, block_hash=constants.EMPTY_HASH, our_address=chain_state.our_address, chain_id=chain_state.chain_id, ), current_state=chain_state, ) for balance_proof in current_balance_proofs: update_services_from_balance_proof(self, chain_state, balance_proof)
Send the monitoring requests for all current balance proofs. Note: The node must always send the *received* balance proof to the monitoring service, *before* sending its own locked transfer forward. If the monitoring service is updated after, then the following can happen: For a transfer A-B-C where this node is B - B receives T1 from A and processes it - B forwards its T2 to C * B crashes (the monitoring service is not updated) For the above scenario, the monitoring service would not have the latest balance proof received by B from A available with the lock for T1, but C would. If the channel B-C is closed and B does not come back online in time, the funds for the lock L1 can be lost. During restarts the rationale from above has to be replicated. Because the initialization code *is not* the same as the event handler. This means the balance proof updates must be done prior to the processing of the message queues.
def encloses(self, location: FileLocation ) -> Optional[FunctionDesc]: for func in self.in_file(location.filename): if location in func.location: return func return None
Returns the function, if any, that encloses a given location.
def processing_blocks(self): pb_list = ProcessingBlockList() return json.dumps(dict(active=pb_list.active, completed=pb_list.completed, aborted=pb_list.aborted))
Return the a JSON dict encoding the PBs known to SDP.
def reset_spyder(self): answer = QMessageBox.warning(self, _("Warning"), _("Spyder will restart and reset to default settings: <br><br>" "Do you want to continue?"), QMessageBox.Yes | QMessageBox.No) if answer == QMessageBox.Yes: self.restart(reset=True)
Quit and reset Spyder and then Restart application.
def connect(self): self.connection = Connection(self.broker_url) e = Exchange('mease', type='fanout', durable=False, delivery_mode=1) self.exchange = e(self.connection.default_channel) self.exchange.declare()
Connects to RabbitMQ
def validate(self, data): for prop in self.properties: if prop.id in data: if prop.type == 'string': if not isinstance(data[prop.id], basestring): raise PresetFieldTypeException("property '{}' must be of type string".format(prop.id)) elif prop.type == 'enum': if not isinstance(data[prop.id], basestring): raise PresetFieldTypeException("property '{}' must be of type string".format(prop.id)) if data[prop.id] not in prop.values: raise PresetException("property '{}' can be one of {}".format(prop.id, prop.values)) else: if prop.required: raise PresetMissingFieldException("missing required property: '{}'".format(prop.id))
Checks if `data` respects this preset specification It will check that every required property is present and for every property type it will make some specific control.
def _translate(self, x, y): return self.parent._translate((x + self.x), (y + self.y))
Convertion x and y to their position on the root Console
def call(self, phone_number, message, message_type, **params): return self.post(VOICE_RESOURCE, phone_number=phone_number, message=message, message_type=message_type, **params)
Send a voice call to the target phone_number. See https://developer.telesign.com/docs/voice-api for detailed API documentation.
def fuse_list( mafs ): last = None for m in mafs: if last is None: last = m else: fused = fuse( last, m ) if fused: last = fused else: yield last last = m if last: yield last
Try to fuse a list of blocks by progressively fusing each adjacent pair.
def ckm_standard(t12, t13, t23, delta): r c12 = cos(t12) c13 = cos(t13) c23 = cos(t23) s12 = sin(t12) s13 = sin(t13) s23 = sin(t23) return np.array([[c12*c13, c13*s12, s13/exp(1j*delta)], [-(c23*s12) - c12*exp(1j*delta)*s13*s23, c12*c23 - exp(1j*delta)*s12*s13*s23, c13*s23], [-(c12*c23*exp(1j*delta)*s13) + s12*s23, -(c23*exp(1j*delta)*s12*s13) - c12*s23, c13*c23]])
r"""CKM matrix in the standard parametrization and standard phase convention. Parameters ---------- - `t12`: CKM angle $\theta_{12}$ in radians - `t13`: CKM angle $\theta_{13}$ in radians - `t23`: CKM angle $\theta_{23}$ in radians - `delta`: CKM phase $\delta=\gamma$ in radians
def check_feasibility(x_bounds, lowerbound, upperbound): x_bounds_lowerbound = sum([x_bound[0] for x_bound in x_bounds]) x_bounds_upperbound = sum([x_bound[-1] for x_bound in x_bounds]) return (x_bounds_lowerbound <= lowerbound <= x_bounds_upperbound) or \ (x_bounds_lowerbound <= upperbound <= x_bounds_upperbound)
This can have false positives. For examples, parameters can only be 0 or 5, and the summation constraint is between 6 and 7.
def delete_resource_view(self, resource_view): if isinstance(resource_view, str): if is_valid_uuid(resource_view) is False: raise HDXError('%s is not a valid resource view id!' % resource_view) resource_view = ResourceView({'id': resource_view}, configuration=self.configuration) else: resource_view = self._get_resource_view(resource_view) if 'id' not in resource_view: found = False title = resource_view.get('title') for rv in self.get_resource_views(): if resource_view['title'] == rv['title']: resource_view = rv found = True break if not found: raise HDXError('No resource views have title %s in this resource!' % title) resource_view.delete_from_hdx()
Delete a resource view from the resource and HDX Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None
def readmarheader(filename): with open(filename, 'rb') as f: intheader = np.fromstring(f.read(10 * 4), np.int32) floatheader = np.fromstring(f.read(15 * 4), '<f4') strheader = f.read(24) f.read(4) otherstrings = [f.read(16) for i in range(29)] return {'Xsize': intheader[0], 'Ysize': intheader[1], 'MeasTime': intheader[8], 'BeamPosX': floatheader[7], 'BeamPosY': floatheader[8], 'Wavelength': floatheader[9], 'Dist': floatheader[10], '__Origin__': 'MarResearch .image', 'recordlength': intheader[2], 'highintensitypixels': intheader[4], 'highintensityrecords': intheader[5], 'Date': dateutil.parser.parse(strheader), 'Detector': 'MARCCD', '__particle__': 'photon'}
Read a header from a MarResearch .image file.
def EnumerateQualifiers(self, *args, **kwargs): if self.conn is not None: rv = self.conn.EnumerateQualifiers(*args, **kwargs) else: rv = [] try: rv += list(self.qualifiers[self.default_namespace].values()) except KeyError: pass return rv
Enumerate the qualifier types in the local repository of this class. For a description of the parameters, see :meth:`pywbem.WBEMConnection.EnumerateQualifiers`.
def crsConvert(crsIn, crsOut): if isinstance(crsIn, osr.SpatialReference): srs = crsIn.Clone() else: srs = osr.SpatialReference() if isinstance(crsIn, int): crsIn = 'EPSG:{}'.format(crsIn) if isinstance(crsIn, str): try: srs.SetFromUserInput(crsIn) except RuntimeError: raise TypeError('crsIn not recognized; must be of type WKT, PROJ4 or EPSG') else: raise TypeError('crsIn must be of type int, str or osr.SpatialReference') if crsOut == 'wkt': return srs.ExportToWkt() elif crsOut == 'prettyWkt': return srs.ExportToPrettyWkt() elif crsOut == 'proj4': return srs.ExportToProj4() elif crsOut == 'epsg': srs.AutoIdentifyEPSG() return int(srs.GetAuthorityCode(None)) elif crsOut == 'opengis': srs.AutoIdentifyEPSG() return 'http://www.opengis.net/def/crs/EPSG/0/{}'.format(srs.GetAuthorityCode(None)) elif crsOut == 'osr': return srs else: raise ValueError('crsOut not recognized; must be either wkt, proj4, opengis or epsg')
convert between different types of spatial references Parameters ---------- crsIn: int, str or :osgeo:class:`osr.SpatialReference` the input CRS crsOut: {'wkt', 'proj4', 'epsg', 'osr', 'opengis' or 'prettyWkt'} the output CRS type Returns ------- int, str or :osgeo:class:`osr.SpatialReference` the output CRS Examples -------- convert an integer EPSG code to PROJ4: >>> crsConvert(4326, 'proj4') '+proj=longlat +datum=WGS84 +no_defs ' convert a PROJ4 string to an opengis URL: >>> crsConvert('+proj=longlat +datum=WGS84 +no_defs ', 'opengis') 'http://www.opengis.net/def/crs/EPSG/0/4326' convert the opengis URL back to EPSG: >>> crsConvert('http://www.opengis.net/def/crs/EPSG/0/4326', 'epsg') 4326 convert an EPSG compound CRS (WGS84 horizontal + EGM96 vertical) >>> crsConvert('EPSG:4326+5773', 'proj4') '+proj=longlat +datum=WGS84 +geoidgrids=egm96_15.gtx +vunits=m +no_defs '
def pretty_print_model(devicemodel): PRETTY_PRINT_MODEL = logging.info(PRETTY_PRINT_MODEL % devicemodel) if 'traits' in devicemodel: for trait in devicemodel['traits']: logging.info(' Trait %s' % trait) else: logging.info('No traits') logging.info('')
Prints out a device model in the terminal by parsing dict.
def _normalized_levenshtein_distance(s1, s2, acceptable_differences): if len(s1) > len(s2): s1, s2 = s2, s1 acceptable_differences = set(-i for i in acceptable_differences) distances = range(len(s1) + 1) for index2, num2 in enumerate(s2): new_distances = [index2 + 1] for index1, num1 in enumerate(s1): if num2 - num1 in acceptable_differences: new_distances.append(distances[index1]) else: new_distances.append(1 + min((distances[index1], distances[index1+1], new_distances[-1]))) distances = new_distances return distances[-1]
This function calculates the levenshtein distance but allows for elements in the lists to be different by any number in the set acceptable_differences. :param s1: A list. :param s2: Another list. :param acceptable_differences: A set of numbers. If (s2[i]-s1[i]) is in the set then they are considered equal. :returns:
def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False): db_url = posixpath.join(config.db_index_url, db) response = requests.get(db_url) response.raise_for_status() dl_inputs = [(os.path.split(file)[1], os.path.split(file)[0], db, dl_dir, keep_subdirs, overwrite) for file in files] make_local_dirs(dl_dir, dl_inputs, keep_subdirs) print('Downloading files...') pool = multiprocessing.Pool(processes=2) pool.map(dl_pb_file, dl_inputs) print('Finished downloading files') return
Download specified files from a Physiobank database. Parameters ---------- db : str The Physiobank database directory to download. eg. For database: 'http://physionet.org/physiobank/database/mitdb', db='mitdb'. dl_dir : str The full local directory path in which to download the files. files : list A list of strings specifying the file names to download relative to the database base directory. keep_subdirs : bool, optional Whether to keep the relative subdirectories of downloaded files as they are organized in Physiobank (True), or to download all files into the same base directory (False). overwrite : bool, optional If True, all files will be redownloaded regardless. If False, existing files with the same name and relative subdirectory will be checked. If the local file is the same size as the online file, the download is skipped. If the local file is larger, it will be deleted and the file will be redownloaded. If the local file is smaller, the file will be assumed to be partially downloaded and the remaining bytes will be downloaded and appended. Examples -------- >>> wfdb.dl_files('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat'])
def _ParseCshVariables(self, lines): paths = {} for line in lines: if len(line) < 2: continue action = line[0] if action == "setenv": target = line[1] path_vals = [] if line[2:]: path_vals = line[2].split(":") self._ExpandPath(target, path_vals, paths) elif action == "set": set_vals = self._CSH_SET_RE.search(" ".join(line[1:])) if set_vals: target, vals = set_vals.groups() if target in ("path", "term", "user"): target = target.upper() path_vals = vals.split() self._ExpandPath(target, path_vals, paths) return paths
Extract env_var and path values from csh derivative shells. Path attributes can be set several ways: - setenv takes the form "setenv PATH_NAME COLON:SEPARATED:LIST" - set takes the form "set path_name=(space separated list)" and is automatically exported for several types of files. The first entry in each stanza is used to decide what context to use. Other entries are used to identify the path name and any assigned values. Args: lines: A list of lines, each of which is a list of space separated words. Returns: a dictionary of path names and values.
def last_commit(): try: root = subprocess.check_output( ['hg', 'parent', '--template={node}'], stderr=subprocess.STDOUT).strip() return root.decode('utf-8') except subprocess.CalledProcessError: return None
Returns the SHA1 of the last commit.
def run_command(cmd, debug=False): if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None
def classify_host(host): if isinstance(host, (IPv4Address, IPv6Address)): return host if is_valid_hostname(host): return host return ip_address(host)
Host is an IPv4Address, IPv6Address or a string. If an IPv4Address or IPv6Address return it. Otherwise convert the string to an IPv4Address or IPv6Address object if possible and return it. Otherwise return the original string if it is a valid hostname. Raise ValueError if a string cannot be interpreted as an IP address and it is not a valid hostname.
def list_wallet_names(api_key, is_hd_wallet=False, coin_symbol='btc'): assert is_valid_coin_symbol(coin_symbol), coin_symbol assert api_key params = {'token': api_key} kwargs = dict(wallets='hd' if is_hd_wallet else '') url = make_url(coin_symbol, **kwargs) r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
Get all the wallets belonging to an API key
def html_page_context(app, pagename, templatename, context, doctree): rendered_toc = get_rendered_toctree(app.builder, pagename) context['toc'] = rendered_toc context['display_toc'] = True if "toctree" not in context: return def make_toctree(collapse=True): return get_rendered_toctree(app.builder, pagename, prune=False, collapse=collapse, ) context['toctree'] = make_toctree
Event handler for the html-page-context signal. Modifies the context directly. - Replaces the 'toc' value created by the HTML builder with one that shows all document titles and the local table of contents. - Sets display_toc to True so the table of contents is always displayed, even on empty pages. - Replaces the 'toctree' function with one that uses the entire document structure, ignores the maxdepth argument, and uses only prune and collapse.
def get_permission_required(cls): if cls.permission_required is None: raise ImproperlyConfigured( "{0} is missing the permission_required attribute. " "Define {0}.permission_required, or override " "{0}.get_permission_required().".format(cls.__name__) ) if isinstance(cls.permission_required, six.string_types): if cls.permission_required != "": perms = (cls.permission_required,) else: perms = () else: perms = cls.permission_required return perms
Get permission required property. Must return an iterable.
def delete(fun): if __opts__['file_client'] == 'local': data = __salt__['data.get']('mine_cache') if isinstance(data, dict) and fun in data: del data[fun] return __salt__['data.update']('mine_cache', data) load = { 'cmd': '_mine_delete', 'id': __opts__['id'], 'fun': fun, } return _mine_send(load, __opts__)
Remove specific function contents of minion. Returns True on success. CLI Example: .. code-block:: bash salt '*' mine.delete 'network.interfaces'
def skipgram_batch(centers, contexts, num_tokens, dtype, index_dtype): contexts = mx.nd.array(contexts[2], dtype=index_dtype) indptr = mx.nd.arange(len(centers) + 1) centers = mx.nd.array(centers, dtype=index_dtype) centers_csr = mx.nd.sparse.csr_matrix( (mx.nd.ones(centers.shape), centers, indptr), dtype=dtype, shape=(len(centers), num_tokens)) return centers_csr, contexts, centers
Create a batch for SG training objective.
def assert_not_present(self, selector, testid=None, **kwargs): self.info_log( "Assert not present selector(%s) testid(%s)" % (selector, testid) ) wait_until_not_present = kwargs.get( 'wait_until_not_present', BROME_CONFIG['proxy_driver']['wait_until_not_present_before_assert_not_present'] ) self.debug_log( "effective wait_until_not_present: %s" % wait_until_not_present ) if wait_until_not_present: ret = self.wait_until_not_present(selector, raise_exception=False) else: ret = not self.is_present(selector) if ret: if testid is not None: self.create_test_result(testid, True) return True else: if testid is not None: self.create_test_result(testid, False) return False
Assert that the element is not present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_not_present (bool) Returns: bool: True is the assertion succeed; False otherwise.
def deploy(self, unique_id, configs=None): self.install(unique_id, configs) self.start(unique_id, configs)
Deploys the service to the host. This should at least perform the same actions as install and start but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a mao of configs the deployer may use to modify the deployment
def gen(self, text, start=0): for cc in self.chunkComment(text, start): c = self.extractChunkContent(cc) cc = ''.join(cc) m = self.matchComment(c) idx = text.index(cc, start) e = idx + len(cc) if m: assert text[idx:e] == cc try: end = text.index('\n\n', e - 1) + 1 except ValueError: end = len(text) text = text[:e] + text[end:] new = self.genOutputs(self.code(text), m) new = ''.join(new) text = text[:e] + new + text[e:] return self.gen(text, e + len(new)) return text
Return the source code in text, filled with autogenerated code starting at start.
def significance_fdr(p, alpha): i = np.argsort(p, axis=None) m = i.size - np.sum(np.isnan(p)) j = np.empty(p.shape, int) j.flat[i] = np.arange(1, i.size + 1) mask = p <= alpha * j / m if np.sum(mask) == 0: return mask k = np.max(j[mask]) s = j <= k return s
Calculate significance by controlling for the false discovery rate. This function determines which of the p-values in `p` can be considered significant. Correction for multiple comparisons is performed by controlling the false discovery rate (FDR). The FDR is the maximum fraction of p-values that are wrongly considered significant [1]_. Parameters ---------- p : array, shape (channels, channels, nfft) p-values. alpha : float Maximum false discovery rate. Returns ------- s : array, dtype=bool, shape (channels, channels, nfft) Significance of each p-value. References ---------- .. [1] Y. Benjamini, Y. Hochberg. Controlling the false discovery rate: a practical and powerful approach to multiple testing. J. Royal Stat. Soc. Series B 57(1): 289-300, 1995.
def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs): return cls.stripe_class.list(api_key=api_key, **kwargs).auto_paging_iter()
Call the stripe API's list operation for this model. :param api_key: The api key to use for this request. Defaults to djstripe_settings.STRIPE_SECRET_KEY. :type api_key: string See Stripe documentation for accepted kwargs for each object. :returns: an iterator over all items in the query
def List(self, name, initial=None): return types.List(name, self.api, initial=initial)
The list datatype. :param name: The name of the list. :keyword initial: Initial contents of the list. See :class:`redish.types.List`.
def fullversion(): ret = {} cmd = 'lvm version' out = __salt__['cmd.run'](cmd).splitlines() for line in out: comps = line.split(':') ret[comps[0].strip()] = comps[1].strip() return ret
Return all version info from lvm version CLI Example: .. code-block:: bash salt '*' lvm.fullversion
def pop(self): if not self._containers: raise KittyException('no container to pop') self._containers.pop() if self._container(): self._container().pop()
Remove a the top container from the container stack
def compile_mof_string(self, mof_str, namespace=None, search_paths=None, verbose=None): namespace = namespace or self.default_namespace self._validate_namespace(namespace) mofcomp = MOFCompiler(_MockMOFWBEMConnection(self), search_paths=search_paths, verbose=verbose) mofcomp.compile_string(mof_str, namespace)
Compile the MOF definitions in the specified string and add the resulting CIM objects to the specified CIM namespace of the mock repository. If the namespace does not exist, :exc:`~pywbem.CIMError` with status CIM_ERR_INVALID_NAMESPACE is raised. This method supports all MOF pragmas, and specifically the include pragma. If a CIM class or CIM qualifier type to be added already exists in the target namespace with the same name (comparing case insensitively), this method raises :exc:`~pywbem.CIMError`. If a CIM instance to be added already exists in the target namespace with the same keybinding values, this method raises :exc:`~pywbem.CIMError`. In all cases where this method raises an exception, the mock repository remains unchanged. Parameters: mof_str (:term:`string`): A string with the MOF definitions to be compiled. namespace (:term:`string`): The name of the target CIM namespace in the mock repository. This namespace is also used for lookup of any existing or dependent CIM objects. If `None`, the default namespace of the connection is used. search_paths (:term:`py:iterable` of :term:`string`): An iterable of directory path names where MOF dependent files will be looked up. See the description of the `search_path` init parameter of the :class:`~pywbem.MOFCompiler` class for more information on MOF dependent files. verbose (:class:`py:bool`): Controls whether to issue more detailed compiler messages. Raises: IOError: MOF file not found. :exc:`~pywbem.MOFParseError`: Compile error in the MOF. :exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does not exist. :exc:`~pywbem.CIMError`: Failure related to the CIM objects in the mock repository.
def delete_io( hash ): global CACHE_ load_cache(True) record_used('cache', hash) num_deleted = len(CACHE_['cache'].get(hash, [])) if hash in CACHE_['cache']: del CACHE_['cache'][hash] write_out() return num_deleted
Deletes records associated with a particular hash :param str hash: The hash :rtype int: The number of records deleted
def set_allocated_time(self, time): if self.get_allocated_time_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_duration( time, self.get_allocated_time_metadata()): raise errors.InvalidArgument() map = dict() map['days'] = time.days map['seconds'] = time.seconds map['microseconds'] = time.microseconds self._my_map['allocatedTime'] = map
Sets the allocated time. arg: time (osid.calendaring.Duration): the allocated time raise: InvalidArgument - ``time`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def csv_import(self, csv_source, encoding='utf-8', transforms=None, row_class=DataObject, **kwargs): reader_args = dict((k, v) for k, v in kwargs.items() if k not in ['encoding', 'csv_source', 'transforms', 'row_class']) reader = lambda src: csv.DictReader(src, **reader_args) return self._import(csv_source, encoding, transforms, reader=reader, row_class=row_class)
Imports the contents of a CSV-formatted file into this table. @param csv_source: CSV file - if a string is given, the file with that name will be opened, read, and closed; if a file object is given, then that object will be read as-is, and left for the caller to be closed. @type csv_source: string or file @param encoding: encoding to be used for reading source text if C{csv_source} is passed as a string filename @type encoding: string (default='UTF-8') @param transforms: dict of functions by attribute name; if given, each attribute will be transformed using the corresponding transform; if there is no matching transform, the attribute will be read as a string (default); the transform function can also be defined as a (function, default-value) tuple; if there is an Exception raised by the transform function, then the attribute will be set to the given default value @type transforms: dict (optional) @param kwargs: additional constructor arguments for csv C{DictReader} objects, such as C{delimiter} or C{fieldnames}; these are passed directly through to the csv C{DictReader} constructor @type kwargs: named arguments (optional)
def _strip_ctype(name, ctype, protocol=2): try: name, ctypestr = name.rsplit(',', 1) except ValueError: pass else: ctype = Nds2ChannelType.find(ctypestr).value if protocol == 1 and ctype in ( Nds2ChannelType.STREND.value, Nds2ChannelType.MTREND.value ): name += ',{0}'.format(ctypestr) return name, ctype
Strip the ctype from a channel name for the given nds server version This is needed because NDS1 servers store trend channels _including_ the suffix, but not raw channels, and NDS2 doesn't do this.
def grab_project_data(prj): if not prj: return {} data = {} for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS: try: data[section] = getattr(prj, section) except AttributeError: _LOGGER.debug("Project lacks section '%s', skipping", section) return data
From the given Project, grab Sample-independent data. There are some aspects of a Project of which it's beneficial for a Sample to be aware, particularly for post-hoc analysis. Since Sample objects within a Project are mutually independent, though, each doesn't need to know about any of the others. A Project manages its, Sample instances, so for each Sample knowledge of Project data is limited. This method facilitates adoption of that conceptual model. :param Project prj: Project from which to grab data :return Mapping: Sample-independent data sections from given Project
def from_coords(cls, x, y): x_bytes = int(math.ceil(math.log(x, 2) / 8.0)) y_bytes = int(math.ceil(math.log(y, 2) / 8.0)) num_bytes = max(x_bytes, y_bytes) byte_string = b'\x04' byte_string += int_to_bytes(x, width=num_bytes) byte_string += int_to_bytes(y, width=num_bytes) return cls(byte_string)
Creates an ECPoint object from the X and Y integer coordinates of the point :param x: The X coordinate, as an integer :param y: The Y coordinate, as an integer :return: An ECPoint object
def py_doc_trim(docstring): if not docstring: return '' lines = docstring.expandtabs().splitlines() indent = sys.maxint for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) trimmed = [lines[0].strip()] if indent < sys.maxint: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) joined = '\n'.join(trimmed) return newline_substitution_regex.sub(" ", joined)
Trim a python doc string. This example is nipped from https://www.python.org/dev/peps/pep-0257/, which describes how to conventionally format and trim docstrings. It has been modified to replace single newlines with a space, but leave multiple consecutive newlines in tact.
def get_parameters(self): parameter_names = self.PARAMETERS.keys() parameter_values = [getattr(processor, n) for n in parameter_names] return dict(zip(parameter_names, parameter_values))
returns a dictionary with the processor's stored parameters
def _set_response_headers(self, response): options = self._get_local_options() self._set_feature_headers(response.headers, options) self._set_frame_options_headers(response.headers, options) self._set_content_security_policy_headers(response.headers, options) self._set_hsts_headers(response.headers) self._set_referrer_policy_headers(response.headers) return response
Applies all configured headers to the given response.
def _make_complex(self): rcomplex_coeffs = _shtools.SHrtoc(self.coeffs, convention=1, switchcs=0) complex_coeffs = _np.zeros((2, self.lmax+1, self.lmax+1), dtype='complex') complex_coeffs[0, :, :] = (rcomplex_coeffs[0, :, :] + 1j * rcomplex_coeffs[1, :, :]) complex_coeffs[1, :, :] = complex_coeffs[0, :, :].conjugate() for m in self.degrees(): if m % 2 == 1: complex_coeffs[1, :, m] = - complex_coeffs[1, :, m] return SHCoeffs.from_array(complex_coeffs, normalization=self.normalization, csphase=self.csphase, copy=False)
Convert the real SHCoeffs class to the complex class.
def top_level(self): output = {} if isinstance(self.obj, dict): for name, item in self.obj.items(): if isinstance(item, dict): if item: output[name] = StrReprWrapper('{...}') else: output[name] = StrReprWrapper('{}') elif isinstance(item, list): if item: output[name] = StrReprWrapper('[...]') else: output[name] = StrReprWrapper('[]') else: output[name] = item return output else: return self.obj
Print just the top level of an object, being sure to show where it goes deeper
def process_requests(self): while True: id, args, kwargs = self.request_queue.get() try: response = self._make_request(*args, **kwargs) except Exception as e: response = e self.results[id] = response
Loop that runs in a thread to process requests synchronously.
def get_edges_as_list(self): my_edges = [] for edge_node in self.__get_edge_nodes(): my_edges.append(Cedge(edge_node)) return my_edges
Iterator that returns all the edge objects @rtype: L{Cedge} @return: terminal objects (iterator)
def hideFromPublicBundle(self, otpk_pub): self.__checkSPKTimestamp() for otpk in self.__otpks: if otpk.pub == otpk_pub: self.__otpks.remove(otpk) self.__hidden_otpks.append(otpk) self.__refillOTPKs()
Hide a one-time pre key from the public bundle. :param otpk_pub: The public key of the one-time pre key to hide, encoded as a bytes-like object.
def detect(args): for l in args.input: if l.strip(): _print("{:<20}{}".format(Detector(l).language.name, l.strip()))
Detect the language of each line.
def delete_folder(self, id, force=None): path = {} data = {} params = {} path["id"] = id if force is not None: params["force"] = force self.logger.debug("DELETE /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/folders/{id}".format(**path), data=data, params=params, no_data=True)
Delete folder. Remove the specified folder. You can only delete empty folders unless you set the 'force' flag