code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _call_pyfftw(self, x, out, **kwargs): """Implement ``self(x[, out, **kwargs])`` for pyfftw back-end. Parameters ---------- x : `numpy.ndarray` Array representing the function to be transformed out : `numpy.ndarray` Array to which the output is written planning_effort : {'estimate', 'measure', 'patient', 'exhaustive'} Flag for the amount of effort put into finding an optimal FFTW plan. See the `FFTW doc on planner flags <http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_. planning_timelimit : float or ``None``, optional Limit planning time to roughly this many seconds. Default: ``None`` (no limit) threads : int, optional Number of threads to use. Default: 1 Returns ------- out : `numpy.ndarray` Result of the transform. The returned object is a reference to the input parameter ``out``. """ # We pop some kwargs options here so that we always use the ones # given during init or implicitly assumed. kwargs.pop('axes', None) kwargs.pop('halfcomplex', None) kwargs.pop('normalise_idft', None) # We use `False` # Pre-processing before calculating the sums, in-place for C2C and R2C if self.halfcomplex: preproc = self._preprocess(x) assert is_real_dtype(preproc.dtype) else: # out is preproc in this case preproc = self._preprocess(x, out=out) assert is_complex_floating_dtype(preproc.dtype) # The actual call to the FFT library. We store the plan for re-use. # The FFT is calculated in-place, except if the range is real and # we don't use halfcomplex. direction = 'forward' if self.sign == '-' else 'backward' self._fftw_plan = pyfftw_call( preproc, out, direction=direction, halfcomplex=self.halfcomplex, axes=self.axes, normalise_idft=False, **kwargs) assert is_complex_floating_dtype(out.dtype) # Post-processing accounting for shift, scaling and interpolation out = self._postprocess(out, out=out) assert is_complex_floating_dtype(out.dtype) return out
Implement ``self(x[, out, **kwargs])`` for pyfftw back-end. Parameters ---------- x : `numpy.ndarray` Array representing the function to be transformed out : `numpy.ndarray` Array to which the output is written planning_effort : {'estimate', 'measure', 'patient', 'exhaustive'} Flag for the amount of effort put into finding an optimal FFTW plan. See the `FFTW doc on planner flags <http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_. planning_timelimit : float or ``None``, optional Limit planning time to roughly this many seconds. Default: ``None`` (no limit) threads : int, optional Number of threads to use. Default: 1 Returns ------- out : `numpy.ndarray` Result of the transform. The returned object is a reference to the input parameter ``out``.
def get_full_pipe(sol, base=()): """ Returns the full pipe of a dispatch run. :param sol: A Solution object. :type sol: schedula.utils.Solution :param base: Base node id. :type base: tuple[str] :return: Full pipe of a dispatch run. :rtype: DspPipe """ pipe, i = DspPipe(), len(base) for p in sol._pipe: n, s = p[-1] d = s.dsp p = {'task': p} if n in s._errors: p['error'] = s._errors[n] node_id = s.full_name + (n,) assert base == node_id[:i], '%s != %s' % (node_id[:i], base) n_id = node_id[i:] n, path = d.get_node(n, node_attr=None) if n['type'] == 'function' and 'function' in n: try: sub_sol = s.workflow.node[path[-1]]['solution'] sp = get_full_pipe(sub_sol, base=node_id) if sp: p['sub_pipe'] = sp except KeyError: pass pipe[bypass(*n_id)] = p return pipe
Returns the full pipe of a dispatch run. :param sol: A Solution object. :type sol: schedula.utils.Solution :param base: Base node id. :type base: tuple[str] :return: Full pipe of a dispatch run. :rtype: DspPipe
def get_upstream_paths(self, port): """Retrieve a dictionary containing the full URLs of the upstream apps :param int port: The port used by the replay and cdx servers :return: A dictionary containing the upstream paths (replay, cdx-server, record [if enabled]) :rtype: dict[str, str] """ base_paths = { 'replay': self.REPLAY_API % port, 'cdx-server': self.CDX_API % port, } if self.recorder_path: base_paths['record'] = self.recorder_path return base_paths
Retrieve a dictionary containing the full URLs of the upstream apps :param int port: The port used by the replay and cdx servers :return: A dictionary containing the upstream paths (replay, cdx-server, record [if enabled]) :rtype: dict[str, str]
def _cromwell_debug(metadata): """Format Cromwell failures to make debugging easier. """ def get_failed_calls(cur, key=None): if key is None: key = [] out = [] if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur: out.append((key, cur)) elif isinstance(cur, dict): for k, v in cur.items(): out.extend(get_failed_calls(v, key + [k])) elif isinstance(cur, (list, tuple)): for i, v in enumerate(cur): out.extend(get_failed_calls(v, key + [i])) return out print("Failed bcbio Cromwell run") print("-------------------------") for fail_k, fail_call in get_failed_calls(metadata["calls"]): root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"])) print("Failure in step: %s" % ".".join([str(x) for x in fail_k])) print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log")) print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-commands.log")) print(" Cromwell directory : %s" % root_dir) print()
Format Cromwell failures to make debugging easier.
def credentials_loader(self, in_credentials: str = "client_secrets.json") -> dict: """Loads API credentials from a file, JSON or INI. :param str in_credentials: path to the credentials file. By default, look for a client_secrets.json file. """ accepted_extensions = (".ini", ".json") # checks if not path.isfile(in_credentials): raise IOError("Credentials file doesn't exist: {}".format(in_credentials)) else: in_credentials = path.normpath(in_credentials) if path.splitext(in_credentials)[1] not in accepted_extensions: raise ValueError( "Extension of credentials file must be one of {}".format( accepted_extensions ) ) else: kind = path.splitext(in_credentials)[1] # load, check and set if kind == ".json": with open(in_credentials, "r") as f: in_auth = json.loads(f.read()) # check structure heads = ("installed", "web") if not set(in_auth).intersection(set(heads)): raise ValueError( "Input JSON structure is not as expected." " First key must be one of: {}".format(heads) ) # set if "web" in in_auth: # json structure for group application auth_settings = in_auth.get("web") out_auth = { "auth_mode": "group", "client_id": auth_settings.get("client_id"), "client_secret": auth_settings.get("client_secret"), # if not specified, must be a former file then set classic scope "scopes": auth_settings.get("scopes", ["resources:read"]), "uri_auth": auth_settings.get("auth_uri"), "uri_token": auth_settings.get("token_uri"), "uri_base": self.get_url_base_from_url_token( auth_settings.get("token_uri") ), "uri_redirect": None, } else: # assuming in_auth == 'installed' auth_settings = in_auth.get("installed") out_auth = { "auth_mode": "user", "client_id": auth_settings.get("client_id"), "client_secret": auth_settings.get("client_secret"), # if not specified, must be a former file then set classic scope "scopes": auth_settings.get("scopes", ["resources:read"]), "uri_auth": auth_settings.get("auth_uri"), "uri_token": auth_settings.get("token_uri"), "uri_base": self.get_url_base_from_url_token( auth_settings.get("token_uri") ), "uri_redirect": auth_settings.get("redirect_uris", None), } else: # assuming file is an .ini ini_parser = ConfigParser() ini_parser.read(in_credentials) # check structure if "auth" in ini_parser._sections: auth_settings = ini_parser["auth"] else: raise ValueError( "Input INI structure is not as expected." " Section of credentials must be named: auth" ) # set out_auth = { "auth_mode": auth_settings.get("CLIENT_TYPE"), "client_id": auth_settings.get("CLIENT_ID"), "client_secret": auth_settings.get("CLIENT_SECRET"), "uri_auth": auth_settings.get("URI_AUTH"), "uri_token": auth_settings.get("URI_TOKEN"), "uri_base": self.get_url_base_from_url_token( auth_settings.get("URI_TOKEN") ), "uri_redirect": auth_settings.get("URI_REDIRECT"), } # method ending return out_auth
Loads API credentials from a file, JSON or INI. :param str in_credentials: path to the credentials file. By default, look for a client_secrets.json file.
def addItem(self, item, message=None): """add a new Item class object""" if message is None: message = 'Adding item %s' % item.path try: v = Version.new(repo=self) v.addItem(item) v.save(message) except VersionError, e: raise RepoError(e)
add a new Item class object
def elixir_decode(elixir_filename): """ Takes an elixir style file name and decodes it's content. Values returned as a dictionary. Elixir filenames have the format RUNID.TYPE.FILTER/EXPTIME.CHIPID.VERSION.fits """ import re, pyfits parts_RE=re.compile(r'([^\.\s]+)') dataset_name = parts_RE.findall(elixir_filename) ### check that this was a valid elixir_filename if not dataset_name or len(dataset_name)<5 : raise ValueError('String %s does not parse as elixir filename' % elixir_filename ) comments={'exptime': 'Integration time (seconds)', 'filter': 'Name of filter in position ', 'crunid': 'CFHT Q RunID', 'obstype': 'Observation or Exposure type', 'imageid': 'CCD chip number', 'filename': 'file name at creation of this MEF file' } keywords={} keywords['filename']=elixir_filename keywords['runid']=dataset_name[0] keywords['obstype']=dataset_name[1] keywords['exptime']=None keywords['filter']=None ### if the third part of the name is all numbers we assume exposure time if re.match(r'\d+',dataset_name[2]): keyword['exptime']=int(dataset_name[2]) else: keyword['filter']=dataset_name[2] keywords['imageid']=dataset_name[3] keywords['version']=dataset_name[4] header=pyfits.Header() for keyword in keywords.keys(): if keywords[keyword]: header.update(keyword,keywords[keyword],comment=comment[keyword]) return header
Takes an elixir style file name and decodes it's content. Values returned as a dictionary. Elixir filenames have the format RUNID.TYPE.FILTER/EXPTIME.CHIPID.VERSION.fits
def wrap(self, stream, name=None, filename=None): """This is called with the stream as returned by `tokenize` and wraps every token in a :class:`Token` and converts the value. """ for lineno, token, value in stream: if token in ignored_tokens: continue elif token == 'linestatement_begin': token = 'block_begin' elif token == 'linestatement_end': token = 'block_end' # we are not interested in those tokens in the parser elif token in ('raw_begin', 'raw_end'): continue elif token == 'data': value = self._normalize_newlines(value) elif token == 'keyword': token = value elif token == 'name': value = str(value) if check_ident and not value.isidentifier(): raise TemplateSyntaxError( 'Invalid character in identifier', lineno, name, filename) elif token == 'string': # try to unescape string try: value = self._normalize_newlines(value[1:-1]) \ .encode('ascii', 'backslashreplace') \ .decode('unicode-escape') except Exception as e: msg = str(e).split(':')[-1].strip() raise TemplateSyntaxError(msg, lineno, name, filename) elif token == 'integer': value = int(value) elif token == 'float': value = float(value) elif token == 'operator': token = operators[value] yield Token(lineno, token, value)
This is called with the stream as returned by `tokenize` and wraps every token in a :class:`Token` and converts the value.
def print_http_nfc_lease_info(info): """ Prints information about the lease, such as the entity covered by the lease, and HTTP URLs for up/downloading file backings. :param info: :type info: vim.HttpNfcLease.Info :return: """ print 'Lease timeout: {0.leaseTimeout}\n' \ 'Disk Capacity KB: {0.totalDiskCapacityInKB}'.format(info) device_number = 1 if info.deviceUrl: for device_url in info.deviceUrl: print 'HttpNfcLeaseDeviceUrl: {1}\n' \ 'Device URL Import Key: {0.importKey}\n' \ 'Device URL Key: {0.key}\n' \ 'Device URL: {0.url}\n' \ 'Device URL Size: {0.fileSize}\n' \ 'SSL Thumbprint: {0.sslThumbprint}\n'.format(device_url, device_number) if not device_url.targetId: print "No targetId found for this device" print "Device is not eligible for export. This could be a mounted iso or img of some sort" print "It will NOT be downloaded\n" device_number += 1 else: print 'No devices were found.'
Prints information about the lease, such as the entity covered by the lease, and HTTP URLs for up/downloading file backings. :param info: :type info: vim.HttpNfcLease.Info :return:
def _brace_key(self, key): """ key: 'x' -> '{x}' """ if isinstance(key, six.integer_types): t = str key = t(key) else: t = type(key) return t(u'{') + key + t(u'}')
key: 'x' -> '{x}'
def clean(cls, path): '''Clean up all the files in a provided path''' for pth in os.listdir(path): pth = os.path.abspath(os.path.join(path, pth)) if os.path.isdir(pth): logger.debug('Removing directory %s' % pth) shutil.rmtree(pth) else: logger.debug('Removing file %s' % pth) os.remove(pth)
Clean up all the files in a provided path
def _apply_sub_frames(cls, documents, subs): """Convert embedded documents to sub-frames for one or more documents""" # Dereference each reference for path, projection in subs.items(): # Get the SubFrame class we'll use to wrap the embedded document sub = None expect_map = False if '$sub' in projection: sub = projection.pop('$sub') elif '$sub.' in projection: sub = projection.pop('$sub.') expect_map = True else: continue # Add sub-frames to the documents raw_subs = [] for document in documents: value = cls._path_to_value(path, document) if value is None: continue if isinstance(value, dict): if expect_map: # Dictionary of embedded documents raw_subs += value.values() for k, v in value.items(): if isinstance(v ,list): value[k] = [ sub(u) for u in v if isinstance(u, dict)] else: value[k] = sub(v) # Single embedded document else: raw_subs.append(value) value = sub(value) elif isinstance(value, list): # List of embedded documents raw_subs += value value = [sub(v) for v in value if isinstance(v, dict)] else: raise TypeError('Not a supported sub-frame type') child_document = document keys = cls._path_to_keys(path) for key in keys[:-1]: child_document = child_document[key] child_document[keys[-1]] = value # Apply the projection to the list of sub frames if projection: sub._apply_projection(raw_subs, projection)
Convert embedded documents to sub-frames for one or more documents
def _getKeyForUrl(url, existing=None): """ Extracts a key from a given s3:// URL. On return, but not on exceptions, this method leaks an S3Connection object. The caller is responsible to close that by calling key.bucket.connection.close(). :param bool existing: If True, key is expected to exist. If False, key is expected not to exists and it will be created. If None, the key will be created if it doesn't exist. :rtype: Key """ # Get the bucket's region to avoid a redirect per request try: with closing(boto.connect_s3()) as s3: location = s3.get_bucket(url.netloc).get_location() region = bucket_location_to_region(location) except S3ResponseError as e: if e.error_code == 'AccessDenied': s3 = boto.connect_s3() else: raise else: # Note that caller is responsible for closing the connection s3 = boto.s3.connect_to_region(region) try: keyName = url.path[1:] bucketName = url.netloc bucket = s3.get_bucket(bucketName) key = bucket.get_key(keyName.encode('utf-8')) if existing is True: if key is None: raise RuntimeError("Key '%s' does not exist in bucket '%s'." % (keyName, bucketName)) elif existing is False: if key is not None: raise RuntimeError("Key '%s' exists in bucket '%s'." % (keyName, bucketName)) elif existing is None: pass else: assert False if key is None: key = bucket.new_key(keyName) except: with panic(): s3.close() else: return key
Extracts a key from a given s3:// URL. On return, but not on exceptions, this method leaks an S3Connection object. The caller is responsible to close that by calling key.bucket.connection.close(). :param bool existing: If True, key is expected to exist. If False, key is expected not to exists and it will be created. If None, the key will be created if it doesn't exist. :rtype: Key
def get_client_address(self): """ Returns an auth token dictionary for making calls to eventhub REST API. :rtype: str """ return "amqps://{}:{}@{}.{}:5671/{}".format( urllib.parse.quote_plus(self.policy), urllib.parse.quote_plus(self.sas_key), self.sb_name, self.namespace_suffix, self.eh_name)
Returns an auth token dictionary for making calls to eventhub REST API. :rtype: str
def sql_program_name_func(command): """ Extract program name from `command`. >>> sql_program_name_func('ls') 'ls' >>> sql_program_name_func('git status') 'git' >>> sql_program_name_func('EMACS=emacs make') 'make' :type command: str """ args = command.split(' ') for prog in args: if '=' not in prog: return prog return args[0]
Extract program name from `command`. >>> sql_program_name_func('ls') 'ls' >>> sql_program_name_func('git status') 'git' >>> sql_program_name_func('EMACS=emacs make') 'make' :type command: str
def get_replacement_method(method_to_patch, side_effect=UNDEFINED, rvalue=UNDEFINED, ignore=UNDEFINED, callback=UNDEFINED, context=UNDEFINED, subsequent_rvalue=UNDEFINED): """ Returns the method to be applied in place of an original method. This method either executes a side effect, returns an rvalue, or implements caching in place of the method_to_patch :param function method_to_patch: A reference to the method that will be patched. :param mixed side_effect: The side effect to execute. Either a callable with the same parameters as the target, or an exception. :param mixed rvalue: The value that should be immediately returned without executing the target. :param caliendo.Ignore ignore: The parameters that should be ignored when determining cachekeys. These are typically the dynamic values such as datetime.datetime.now() or a setting from an environment specific file. :param function callback: A pickleable callback to execute when the patched method is called and the cache is hit. (has to have been cached the first time). :param caliendo.hooks.Context ctxt: The context this patch should be executed under. Generally reserved for internal use. The vast majority of use cases should leave this parameter alone. :param mixed subsequent_rvalue: If passed; this will be the return value each time this method is run regardless of what is returned when it is initially cached. Caching for this method will be skipped. This is useful when the method returns something unpickleable but we still need to stub it out. :rtype: function :returns: The function to replace all references to method_to_patch with. """ def patch_with(*args, **kwargs): if side_effect != UNDEFINED: return execute_side_effect(side_effect, args, kwargs) if rvalue != UNDEFINED: return rvalue return cache(method_to_patch, args=args, kwargs=kwargs, ignore=ignore, call_stack=context.stack, callback=callback, subsequent_rvalue=subsequent_rvalue) return patch_with
Returns the method to be applied in place of an original method. This method either executes a side effect, returns an rvalue, or implements caching in place of the method_to_patch :param function method_to_patch: A reference to the method that will be patched. :param mixed side_effect: The side effect to execute. Either a callable with the same parameters as the target, or an exception. :param mixed rvalue: The value that should be immediately returned without executing the target. :param caliendo.Ignore ignore: The parameters that should be ignored when determining cachekeys. These are typically the dynamic values such as datetime.datetime.now() or a setting from an environment specific file. :param function callback: A pickleable callback to execute when the patched method is called and the cache is hit. (has to have been cached the first time). :param caliendo.hooks.Context ctxt: The context this patch should be executed under. Generally reserved for internal use. The vast majority of use cases should leave this parameter alone. :param mixed subsequent_rvalue: If passed; this will be the return value each time this method is run regardless of what is returned when it is initially cached. Caching for this method will be skipped. This is useful when the method returns something unpickleable but we still need to stub it out. :rtype: function :returns: The function to replace all references to method_to_patch with.
def get_port(self, id_or_uri, port_id_or_uri): """ Gets an interconnect port. Args: id_or_uri: Can be either the interconnect id or uri. port_id_or_uri: The interconnect port id or uri. Returns: dict: The interconnect port. """ uri = self._client.build_subresource_uri(id_or_uri, port_id_or_uri, "ports") return self._client.get(uri)
Gets an interconnect port. Args: id_or_uri: Can be either the interconnect id or uri. port_id_or_uri: The interconnect port id or uri. Returns: dict: The interconnect port.
def list(): """Print a listing of known running TensorBoard instances. TensorBoard instances that were killed uncleanly (e.g., with SIGKILL or SIGQUIT) may appear in this list even if they are no longer running. Conversely, this list may be missing some entries if your operating system's temporary directory has been cleared since a still-running TensorBoard instance started. """ infos = manager.get_all() if not infos: print("No known TensorBoard instances running.") return print("Known TensorBoard instances:") for info in infos: template = " - port {port}: {data_source} (started {delta} ago; pid {pid})" print(template.format( port=info.port, data_source=manager.data_source_from_info(info), delta=_time_delta_from_info(info), pid=info.pid, ))
Print a listing of known running TensorBoard instances. TensorBoard instances that were killed uncleanly (e.g., with SIGKILL or SIGQUIT) may appear in this list even if they are no longer running. Conversely, this list may be missing some entries if your operating system's temporary directory has been cleared since a still-running TensorBoard instance started.
def make_config_file(guided=False): """ Options: --auto, --guided, --manual Places for the file: --inplace, --user """ config_path = _make_config_location(guided=guided) config_data = make_config_data(guided=guided) write_config_file(config_path, config_data)
Options: --auto, --guided, --manual Places for the file: --inplace, --user
def draw(self): """draw the curses ui on the screen, handle scroll if needed""" self.screen.clear() x, y = 1, 1 # start point max_y, max_x = self.screen.getmaxyx() max_rows = max_y - y # the max rows we can draw lines, current_line = self.get_lines() # calculate how many lines we should scroll, relative to the top scroll_top = getattr(self, 'scroll_top', 0) if current_line <= scroll_top: scroll_top = 0 elif current_line - scroll_top > max_rows: scroll_top = current_line - max_rows self.scroll_top = scroll_top lines_to_draw = lines[scroll_top:scroll_top+max_rows] for line in lines_to_draw: if type(line) is tuple: self.screen.addnstr(y, x, line[0], max_x-2, line[1]) else: self.screen.addnstr(y, x, line, max_x-2) y += 1 self.screen.refresh()
draw the curses ui on the screen, handle scroll if needed
def saveJSON(g, data, backup=False): """ Saves the current setup to disk. g : hcam_drivers.globals.Container Container with globals data : dict The current setup in JSON compatible dictionary format. backup : bool If we are saving a backup on close, don't prompt for filename """ if not backup: fname = filedialog.asksaveasfilename( defaultextension='.json', filetypes=[('json files', '.json'), ], initialdir=g.cpars['app_directory'] ) else: fname = os.path.join(os.path.expanduser('~/.hdriver'), 'app.json') if not fname: g.clog.warn('Aborted save to disk') return False with open(fname, 'w') as of: of.write( json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) ) g.clog.info('Saved setup to' + fname) return True
Saves the current setup to disk. g : hcam_drivers.globals.Container Container with globals data : dict The current setup in JSON compatible dictionary format. backup : bool If we are saving a backup on close, don't prompt for filename
def addvFunc(self,solution,EndOfPrdvP): ''' Creates the value function for this period and adds it to the solution. Parameters ---------- solution : ConsumerSolution The solution to this single period problem, likely including the consumption function, marginal value function, etc. EndOfPrdvP : np.array Array of end-of-period marginal value of assets corresponding to the asset values in self.aNrmNow. Returns ------- solution : ConsumerSolution The single period solution passed as an input, but now with the value function (defined over market resources m) as an attribute. ''' self.makeEndOfPrdvFunc(EndOfPrdvP) solution.vFunc = self.makevFunc(solution) return solution
Creates the value function for this period and adds it to the solution. Parameters ---------- solution : ConsumerSolution The solution to this single period problem, likely including the consumption function, marginal value function, etc. EndOfPrdvP : np.array Array of end-of-period marginal value of assets corresponding to the asset values in self.aNrmNow. Returns ------- solution : ConsumerSolution The single period solution passed as an input, but now with the value function (defined over market resources m) as an attribute.
def build_kal_scan_channel_string(kal_bin, channel, args): """Return string for CLI invocation of kal, for channel scan.""" option_mapping = {"gain": "-g", "device": "-d", "error": "-e"} base_string = "%s -v -c %s" % (kal_bin, channel) base_string += options_string_builder(option_mapping, args) return(base_string)
Return string for CLI invocation of kal, for channel scan.
def previous_song(self): """previous song for player to play NOTE: not the last played song """ if self.current_song is None: return self._get_good_song(base=-1, direction=-1) if self.playback_mode == PlaybackMode.random: previous_song = self._get_good_song(direction=-1) else: current_index = self._songs.index(self.current_song) previous_song = self._get_good_song(base=current_index - 1, direction=-1) return previous_song
previous song for player to play NOTE: not the last played song
def finalize(self): """ Finalize the run - build the name generator and use it to build the remap symbol tables. """ self.global_scope.close() name_generator = NameGenerator(skip=self.reserved_keywords) self.global_scope.build_remap_symbols( name_generator, children_only=not self.obfuscate_globals, )
Finalize the run - build the name generator and use it to build the remap symbol tables.
def _run_get_data_background(macs, queue, shared_data, bt_device): """ Background process function for RuuviTag Sensors """ run_flag = RunFlag() def add_data(data): if not shared_data['run_flag']: run_flag.running = False data[1]['time'] = datetime.utcnow().isoformat() queue.put(data) RuuviTagSensor.get_datas(add_data, macs, run_flag, bt_device)
Background process function for RuuviTag Sensors
def get_arctic_lib(connection_string, **kwargs): """ Returns a mongo library for the given connection string Parameters --------- connection_string: `str` Format must be one of the following: library@trading for known mongo servers library@hostname:port Returns: -------- Arctic library """ m = CONNECTION_STR.match(connection_string) if not m: raise ValueError("connection string incorrectly formed: %s" % connection_string) library, host = m.group(1), m.group(2) return _get_arctic(host, **kwargs)[library]
Returns a mongo library for the given connection string Parameters --------- connection_string: `str` Format must be one of the following: library@trading for known mongo servers library@hostname:port Returns: -------- Arctic library
def filter(self, **kwargs): """ Returns a list of objects from the database. The kwargs parameter can contain any number of attributes. Only objects which contain all listed attributes and in which all values match for all listed attributes will be returned. """ from sqlalchemy import or_ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() page_size = kwargs.pop('page_size', 1000) order_by = kwargs.pop('order_by', None) tags = kwargs.pop('tags', []) exclude_text = kwargs.pop('exclude_text', None) exclude_text_words = kwargs.pop('exclude_text_words', []) persona_not_startswith = kwargs.pop('persona_not_startswith', None) search_text_contains = kwargs.pop('search_text_contains', None) # Convert a single sting into a list if only one tag is provided if type(tags) == str: tags = [tags] if len(kwargs) == 0: statements = session.query(Statement).filter() else: statements = session.query(Statement).filter_by(**kwargs) if tags: statements = statements.join(Statement.tags).filter( Tag.name.in_(tags) ) if exclude_text: statements = statements.filter( ~Statement.text.in_(exclude_text) ) if exclude_text_words: or_word_query = [ Statement.text.ilike('%' + word + '%') for word in exclude_text_words ] statements = statements.filter( ~or_(*or_word_query) ) if persona_not_startswith: statements = statements.filter( ~Statement.persona.startswith('bot:') ) if search_text_contains: or_query = [ Statement.search_text.contains(word) for word in search_text_contains.split(' ') ] statements = statements.filter( or_(*or_query) ) if order_by: if 'created_at' in order_by: index = order_by.index('created_at') order_by[index] = Statement.created_at.asc() statements = statements.order_by(*order_by) total_statements = statements.count() for start_index in range(0, total_statements, page_size): for statement in statements.slice(start_index, start_index + page_size): yield self.model_to_object(statement) session.close()
Returns a list of objects from the database. The kwargs parameter can contain any number of attributes. Only objects which contain all listed attributes and in which all values match for all listed attributes will be returned.
def print_chain_summary(self, stream=sys.stdout, indent=""): """Print a summary of the files in this file dict. This version uses chain_input_files and chain_output_files to count the input and output files. """ stream.write("%sTotal files : %i\n" % (indent, len(self.file_dict))) stream.write("%s Input files : %i\n" % (indent, len(self.chain_input_files))) stream.write("%s Output files : %i\n" % (indent, len(self.chain_output_files))) stream.write("%s Internal files : %i\n" % (indent, len(self.internal_files))) stream.write("%s Temp files : %i\n" % (indent, len(self.temp_files)))
Print a summary of the files in this file dict. This version uses chain_input_files and chain_output_files to count the input and output files.
def id_source(source, full=False): """ Returns the name of a website-scrapping function. """ if source not in source_ids: return '' if full: return source_ids[source][1] else: return source_ids[source][0]
Returns the name of a website-scrapping function.
def check_path(path, create=False): """ Check for a path on filesystem :param path: str - path name :param create: bool - create if do not exist :return: bool - path exists """ if not os.path.exists(path): if create: os.makedirs(path) return os.path.exists(path) else: return False return True
Check for a path on filesystem :param path: str - path name :param create: bool - create if do not exist :return: bool - path exists
def create_page(self, **extra_kwargs): """ Create page (and page title) in default language extra_kwargs will be pass to cms.api.create_page() e.g.: extra_kwargs={ "soft_root": True, "reverse_id": my_reverse_id, } """ with translation.override(self.default_language_code): # for evaluate the language name lazy translation # e.g.: settings.LANGUAGE_CODE is not "en" self.default_lang_name = dict( self.languages)[self.default_language_code] self.slug = self.get_slug(self.default_language_code, self.default_lang_name) assert self.slug != "" page = None parent = self.get_parent_page() if parent is not None: assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent if self.delete_first: if self.apphook_namespace is not None: pages = Page.objects.filter( application_namespace=self.apphook_namespace, parent=parent, ) else: pages = Page.objects.filter( title_set__slug=self.slug, parent=parent, ) log.debug("Delete %i pages...", pages.count()) pages.delete() else: if self.apphook_namespace is not None: # Create a plugin page queryset = Page.objects.drafts() queryset = queryset.filter(parent=parent) try: page = queryset.get( application_namespace=self.apphook_namespace) except Page.DoesNotExist: pass # Create page else: log.debug("Use existing page: %s", page) created = False return page, created else: # Not a plugin page queryset = Title.objects.filter( language=self.default_language_code) queryset = queryset.filter(page__parent=parent) try: title = queryset.filter(slug=self.slug).first() except Title.DoesNotExist: pass # Create page else: if title is not None: log.debug("Use page from title with slug %r", self.slug) page = title.page created = False if page is None: with translation.override(self.default_language_code): # set right translation language # for evaluate language name lazy translation # e.g.: settings.LANGUAGE_CODE is not "en" page = create_page( title=self.get_title(self.default_language_code, self.default_lang_name), menu_title=self.get_menu_title(self.default_language_code, self.default_lang_name), template=self.get_template(self.default_language_code, self.default_lang_name), language=self.default_language_code, slug=self.slug, published=False, parent=parent, in_navigation=self.in_navigation, apphook=self.apphook, apphook_namespace=self.apphook_namespace, **extra_kwargs) created = True log.debug("Page created in %s: %s", self.default_lang_name, page) assert page.publisher_is_draft == True return page, created
Create page (and page title) in default language extra_kwargs will be pass to cms.api.create_page() e.g.: extra_kwargs={ "soft_root": True, "reverse_id": my_reverse_id, }
def rm_watch(self, wd, rec=False, quiet=True): """ Removes watch(s). @param wd: Watch Descriptor of the file or directory to unwatch. Also accepts a list of WDs. @type wd: int or list of int. @param rec: Recursively removes watches on every already watched subdirectories and subfiles. @type rec: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully removed, False otherwise. @rtype: dict of {int: bool} """ lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) ret_ = {} # return {wd: bool, ...} for awd in lwd: # remove watch wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd) if wd_ < 0: ret_[awd] = False err = ('rm_watch: cannot remove WD=%d, %s' % \ (awd, self._inotify_wrapper.str_errno())) if quiet: log.error(err) continue raise WatchManagerError(err, ret_) # Remove watch from our dictionary if awd in self._wmd: del self._wmd[awd] ret_[awd] = True log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd)) return ret_
Removes watch(s). @param wd: Watch Descriptor of the file or directory to unwatch. Also accepts a list of WDs. @type wd: int or list of int. @param rec: Recursively removes watches on every already watched subdirectories and subfiles. @type rec: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully removed, False otherwise. @rtype: dict of {int: bool}
def updateData(self, axeskey, x, y): """Replaces the currently displayed data :param axeskey: name of data plot to update. Valid options are 'stim' or 'response' :type axeskey: str :param x: index values associated with y to plot :type x: numpy.ndarray :param y: values to plot at x :type y: numpy.ndarray """ if axeskey == 'stim': self.stimPlot.setData(x,y) # call manually to ajust placement of signal ranges = self.viewRange() self.rangeChange(self, ranges) if axeskey == 'response': self.clearTraces() if self._traceUnit == 'A': y = y * self._ampScalar if self.zeroAction.isChecked(): start_avg = np.mean(y[5:25]) y = y - start_avg self.tracePlot.setData(x,y*self._polarity)
Replaces the currently displayed data :param axeskey: name of data plot to update. Valid options are 'stim' or 'response' :type axeskey: str :param x: index values associated with y to plot :type x: numpy.ndarray :param y: values to plot at x :type y: numpy.ndarray
def _get_timestamp_tuple(ts): """ Internal method to get a timestamp tuple from a value. Handles input being a datetime or a Timestamp. """ if isinstance(ts, datetime.datetime): return Timestamp.from_datetime(ts).tuple() elif isinstance(ts, Timestamp): return ts raise TypeError('Timestamp or datetime.datetime required')
Internal method to get a timestamp tuple from a value. Handles input being a datetime or a Timestamp.
def _process_uniprot_ids(self, limit=None): """ This method processes the mappings from ZFIN gene IDs to UniProtKB IDs. Triples created: <zfin_gene_id> a class <zfin_gene_id> rdfs:label gene_symbol <uniprot_id> is an Individual <uniprot_id> has type <polypeptide> <zfin_gene_id> has_gene_product <uniprot_id> :param limit: :return: """ LOG.info("Processing UniProt IDs") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['uniprot']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, gene_so_id, gene_symbol, uniprot_id # , empty ) = row if self.test_mode and gene_id not in self.test_ids['gene']: continue gene_id = 'ZFIN:' + gene_id.strip() uniprot_id = 'UniProtKB:' + uniprot_id.strip() geno.addGene(gene_id, gene_symbol) # TODO: Abstract to one of the model utilities model.addIndividualToGraph( uniprot_id, None, self.globaltt['polypeptide']) graph.addTriple( gene_id, self.globaltt['has gene product'], uniprot_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with UniProt IDs") return
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs. Triples created: <zfin_gene_id> a class <zfin_gene_id> rdfs:label gene_symbol <uniprot_id> is an Individual <uniprot_id> has type <polypeptide> <zfin_gene_id> has_gene_product <uniprot_id> :param limit: :return:
def get_net(req): """Get the net of any 'next' and 'prev' querystrings.""" try: nxt, prev = map( int, (req.GET.get('cal_next', 0), req.GET.get('cal_prev', 0)) ) net = nxt - prev except Exception: net = 0 return net
Get the net of any 'next' and 'prev' querystrings.
def calcAspectRatioFromCorners(corners, in_plane=False): ''' simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation ''' q = corners l0 = [q[0, 0], q[0, 1], q[1, 0], q[1, 1]] l1 = [q[0, 0], q[0, 1], q[-1, 0], q[-1, 1]] l2 = [q[2, 0], q[2, 1], q[3, 0], q[3, 1]] l3 = [q[2, 0], q[2, 1], q[1, 0], q[1, 1]] a1 = line.length(l0) / line.length(l1) a2 = line.length(l2) / line.length(l3) if in_plane: # take aspect ration from more rectangular corner if (abs(0.5 * np.pi - abs(line.angle2(l0, l1))) < abs(0.5 * np.pi - abs(line.angle2(l2, l3)))): return a1 else: return a2 return 0.5 * (a1 + a2)
simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation
def validate_sceneInfo(self): """Check scene name and whether remote file exists. Raises WrongSceneNameError if the scene name is wrong. """ if self.sceneInfo.prefix not in self.__satellitesMap: logger.error('Google Downloader: Prefix of %s (%s) is invalid' % (self.sceneInfo.name, self.sceneInfo.prefix)) raise WrongSceneNameError('Google Downloader: Prefix of %s (%s) is invalid' % (self.sceneInfo.name, self.sceneInfo.prefix))
Check scene name and whether remote file exists. Raises WrongSceneNameError if the scene name is wrong.
def in_scope(self, exclude_scopes=None, include_scopes=None): """Whether this scope should be included by the given inclusion and exclusion rules. :param Scope exclude_scopes: An optional Scope containing scope names to exclude. None (the default value) indicates that no filtering should be done based on exclude_scopes. :param Scope include_scopes: An optional Scope containing scope names to include. None (the default value) indicates that no filtering should be done based on include_scopes. :return: True if none of the input scopes are in `exclude_scopes`, and either (a) no include scopes are provided, or (b) at least one input scope is included in the `include_scopes` list. :rtype: bool """ if include_scopes is not None and not isinstance(include_scopes, Scope): raise ValueError('include_scopes must be a Scope instance but was {}.'.format( type(include_scopes) )) if exclude_scopes is not None and not isinstance(exclude_scopes, Scope): raise ValueError('exclude_scopes must be a Scope instance but was {}.'.format( type(exclude_scopes) )) if exclude_scopes and any(s in exclude_scopes for s in self): return False if include_scopes and not any(s in include_scopes for s in self): return False return True
Whether this scope should be included by the given inclusion and exclusion rules. :param Scope exclude_scopes: An optional Scope containing scope names to exclude. None (the default value) indicates that no filtering should be done based on exclude_scopes. :param Scope include_scopes: An optional Scope containing scope names to include. None (the default value) indicates that no filtering should be done based on include_scopes. :return: True if none of the input scopes are in `exclude_scopes`, and either (a) no include scopes are provided, or (b) at least one input scope is included in the `include_scopes` list. :rtype: bool
def current(self): """Returns the current user """ if not has_request_context(): return self.no_req_ctx_user_stack.top user_stack = getattr(_request_ctx_stack.top, 'user_stack', None) if user_stack and user_stack.top: return user_stack.top return _get_user()
Returns the current user
def active_thresholds_value_maps(keywords, exposure_key): """Helper to retrieve active value maps or thresholds for an exposure. :param keywords: Hazard layer keywords. :type keywords: dict :param exposure_key: The exposure key. :type exposure_key: str :returns: Active thresholds or value maps. :rtype: dict """ if 'classification' in keywords: if keywords['layer_mode'] == layer_mode_continuous['key']: return keywords['thresholds'] else: return keywords['value_map'] if keywords['layer_mode'] == layer_mode_continuous['key']: classifications = keywords['thresholds'].get(exposure_key) else: classifications = keywords['value_maps'].get(exposure_key) if classifications is None: return None for value in list(classifications.values()): if value['active']: return value['classes'] return None
Helper to retrieve active value maps or thresholds for an exposure. :param keywords: Hazard layer keywords. :type keywords: dict :param exposure_key: The exposure key. :type exposure_key: str :returns: Active thresholds or value maps. :rtype: dict
def extract_audioclip_samples(d) -> dict: """ Extract all the sample data from an AudioClip and convert it from FSB5 if needed. """ ret = {} if not d.data: # eg. StreamedResource not available return {} try: from fsb5 import FSB5 except ImportError as e: raise RuntimeError("python-fsb5 is required to extract AudioClip") af = FSB5(d.data) for i, sample in enumerate(af.samples): if i > 0: filename = "%s-%i.%s" % (d.name, i, af.get_sample_extension()) else: filename = "%s.%s" % (d.name, af.get_sample_extension()) try: sample = af.rebuild_sample(sample) except ValueError as e: print("WARNING: Could not extract %r (%s)" % (d, e)) continue ret[filename] = sample return ret
Extract all the sample data from an AudioClip and convert it from FSB5 if needed.
def iter_variants(self): """Iterate over marker information.""" for idx, row in self.bim.iterrows(): yield Variant( row.name, CHROM_INT_TO_STR[row.chrom], row.pos, [row.a1, row.a2] )
Iterate over marker information.
def add_contig_to_header(line, ref_file): """Streaming target to add contigs to a VCF file header. """ if line.startswith("##fileformat=VCF"): out = [line] for region in ref.file_contigs(ref_file): out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size)) return "\n".join(out) else: return line
Streaming target to add contigs to a VCF file header.
def ecg_hrv(rpeaks=None, rri=None, sampling_rate=1000, hrv_features=["time", "frequency", "nonlinear"]): """ Computes the Heart-Rate Variability (HRV). Shamelessly stolen from the `hrv <https://github.com/rhenanbartels/hrv/blob/develop/hrv>`_ package by Rhenan Bartels. All credits go to him. Parameters ---------- rpeaks : list or ndarray R-peak location indices. rri: list or ndarray RR intervals in the signal. If this argument is passed, rpeaks should not be passed. sampling_rate : int Sampling rate (samples/second). hrv_features : list What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'. Returns ---------- hrv : dict Contains hrv features and percentage of detected artifacts. Example ---------- >>> import neurokit as nk >>> sampling_rate = 1000 >>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks, sampling_rate=sampling_rate) Notes ---------- *Details* - **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods. - **sdNN**: The standard deviation of the time interval between successive normal heart beats (*i.e.*, the RR intervals). Reflects all influences on HRV including slow influences across the day, circadian variations, the effect of hormonal influences such as cortisol and epinephrine. It should be noted that total variance of HRV increases with the length of the analyzed recording. - **meanNN**: The the mean RR interval. - **CVSD**: The coefficient of variation of successive differences (van Dellen et al., 1985), the RMSSD divided by meanNN. - **cvNN**: The Coefficient of Variation, *i.e.* the ratio of sdNN divided by meanNN. - **RMSSD** is the root mean square of the RR intervals (*i.e.*, square root of the mean of the squared differences in time between successive normal heart beats). Reflects high frequency (fast or parasympathetic) influences on HRV (*i.e.*, those influencing larger changes from one beat to the next). - **medianNN**: Median of the Absolute values of the successive Differences between the RR intervals. - **madNN**: Median Absolute Deviation (MAD) of the RR intervals. - **mcvNN**: Median-based Coefficient of Variation, *i.e.* the ratio of madNN divided by medianNN. - **pNN50**: The proportion derived by dividing NN50 (The number of interval differences of successive RR intervals greater than 50 ms) by the total number of RR intervals. - **pNN20**: The proportion derived by dividing NN20 (The number of interval differences of successive RR intervals greater than 20 ms) by the total number of RR intervals. - **Triang**: The HRV triangular index measurement is the integral of the density distribution (that is, the number of all RR intervals) divided by the maximum of the density distribution (class width of 8ms). - **Shannon_h**: Shannon Entropy calculated on the basis of the class probabilities pi (i = 1,...,n with n—number of classes) of the NN interval density distribution (class width of 8 ms resulting in a smoothed histogram suitable for HRV analysis). - **VLF** is the variance (*i.e.*, power) in HRV in the Very Low Frequency (.003 to .04 Hz). Reflect an intrinsic rhythm produced by the heart which is modulated by primarily by sympathetic activity. - **LF** is the variance (*i.e.*, power) in HRV in the Low Frequency (.04 to .15 Hz). Reflects a mixture of sympathetic and parasympathetic activity, but in long-term recordings like ours, it reflects sympathetic activity and can be reduced by the beta-adrenergic antagonist propanolol (McCraty & Atkinson, 1996). - **HF** is the variance (*i.e.*, power) in HRV in the High Frequency (.15 to .40 Hz). Reflects fast changes in beat-to-beat variability due to parasympathetic (vagal) activity. Sometimes called the respiratory band because it corresponds to HRV changes related to the respiratory cycle and can be increased by slow, deep breathing (about 6 or 7 breaths per minute) (Kawachi et al., 1995) and decreased by anticholinergic drugs or vagal blockade (Hainsworth, 1995). - **Total_Power**: Total power of the density spectra. - **LFHF**: The LF/HF ratio is sometimes used by some investigators as a quantitative mirror of the sympatho/vagal balance. - **LFn**: normalized LF power LFn = LF/(LF+HF). - **HFn**: normalized HF power HFn = HF/(LF+HF). - **LFp**: ratio between LF and Total_Power. - **HFp**: ratio between H and Total_Power. - **DFA**: Detrended fluctuation analysis (DFA) introduced by Peng et al. (1995) quantifies the fractal scaling properties of time series. DFA_1 is the short-term fractal scaling exponent calculated over n = 4–16 beats, and DFA_2 is the long-term fractal scaling exponent calculated over n = 16–64 beats. - **Shannon**: Shannon Entropy over the RR intervals array. - **Sample_Entropy**: Sample Entropy (SampEn) over the RR intervals array with emb_dim=2. - **Correlation_Dimension**: Correlation Dimension over the RR intervals array with emb_dim=2. - **Entropy_Multiscale**: Multiscale Entropy over the RR intervals array with emb_dim=2. - **Entropy_SVD**: SVD Entropy over the RR intervals array with emb_dim=2. - **Entropy_Spectral_VLF**: Spectral Entropy over the RR intervals array in the very low frequency (0.003-0.04). - **Entropy_Spectral_LF**: Spectral Entropy over the RR intervals array in the low frequency (0.4-0.15). - **Entropy_Spectral_HF**: Spectral Entropy over the RR intervals array in the very high frequency (0.15-0.40). - **Fisher_Info**: Fisher information over the RR intervals array with tau=1 and emb_dim=2. - **Lyapunov**: Lyapunov Exponent over the RR intervals array with emb_dim=58 and matrix_dim=4. - **FD_Petrosian**: Petrosian's Fractal Dimension over the RR intervals. - **FD_Higushi**: Higushi's Fractal Dimension over the RR intervals array with k_max=16. *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ - Rhenan Bartels (https://github.com/rhenanbartels) *Dependencies* - scipy - numpy *See Also* - RHRV: http://rhrv.r-forge.r-project.org/ References ----------- - Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381. - Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308. - Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32. - Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585. - Lippman, N. E. A. L., Stein, K. M., & Lerman, B. B. (1994). Comparison of methods for removal of ectopy in measurement of heart rate variability. American Journal of Physiology-Heart and Circulatory Physiology, 267(1), H411-H418. - Peltola, M. A. (2012). Role of editing of R–R intervals in the analysis of heart rate variability. Frontiers in physiology, 3. """ # Check arguments: exactly one of rpeaks or rri has to be given as input if rpeaks is None and rri is None: raise ValueError("Either rpeaks or RRIs needs to be given.") if rpeaks is not None and rri is not None: raise ValueError("Either rpeaks or RRIs should be given but not both.") # Initialize empty dict hrv = {} # Preprocessing # ================== # Extract RR intervals (RRis) if rpeaks is not None: # Rpeaks is given, RRis need to be computed RRis = np.diff(rpeaks) else: # Case where RRis are already given: RRis = rri # Basic resampling to 1Hz to standardize the scale RRis = RRis/sampling_rate RRis = RRis.astype(float) # Artifact detection - Statistical for index, rr in enumerate(RRis): # Remove RR intervals that differ more than 25% from the previous one if RRis[index] < RRis[index-1]*0.75: RRis[index] = np.nan if RRis[index] > RRis[index-1]*1.25: RRis[index] = np.nan # Artifact detection - Physiological (http://emedicine.medscape.com/article/2172196-overview) RRis = pd.Series(RRis) RRis[RRis < 0.6] = np.nan RRis[RRis > 1.3] = np.nan # Sanity check if len(RRis) <= 1: print("NeuroKit Warning: ecg_hrv(): Not enough R peaks to compute HRV :/") return(hrv) # Artifacts treatment hrv["n_Artifacts"] = pd.isnull(RRis).sum()/len(RRis) artifacts_indices = RRis.index[RRis.isnull()] # get the artifacts indices RRis = RRis.drop(artifacts_indices) # remove the artifacts # Rescale to 1000Hz RRis = RRis*1000 hrv["RR_Intervals"] = RRis # Values of RRis # Sanity check after artifact removal if len(RRis) <= 1: print("NeuroKit Warning: ecg_hrv(): Not enough normal R peaks to compute HRV :/") return(hrv) # Time Domain # ================== if "time" in hrv_features: hrv["RMSSD"] = np.sqrt(np.mean(np.diff(RRis) ** 2)) hrv["meanNN"] = np.mean(RRis) hrv["sdNN"] = np.std(RRis, ddof=1) # make it calculate N-1 hrv["cvNN"] = hrv["sdNN"] / hrv["meanNN"] hrv["CVSD"] = hrv["RMSSD"] / hrv["meanNN"] hrv["medianNN"] = np.median(abs(RRis)) hrv["madNN"] = mad(RRis, constant=1) hrv["mcvNN"] = hrv["madNN"] / hrv["medianNN"] nn50 = sum(abs(np.diff(RRis)) > 50) nn20 = sum(abs(np.diff(RRis)) > 20) hrv["pNN50"] = nn50 / len(RRis) * 100 hrv["pNN20"] = nn20 / len(RRis) * 100 # Frequency Domain Preparation # ============================== if "frequency" in hrv_features: # Interpolation # ================= # Convert to continuous RR interval (RRi) beats_times = rpeaks[1:].copy() # the time at which each beat occured starting from the 2nd beat beats_times -= list(beats_times)[0] # So it starts at 0 beats_times = np.delete(list(beats_times), artifacts_indices) # delete also the artifact beat moments try: RRi = interpolate(RRis, beats_times, sampling_rate) # Interpolation using 3rd order spline except TypeError: print("NeuroKit Warning: ecg_hrv(): Sequence too short to compute interpolation. Will skip many features.") return(hrv) hrv["df"] = RRi.to_frame("ECG_RR_Interval") # Continuous (interpolated) signal of RRi # Geometrical Method (actually part of time domain) # ========================================= # TODO: This part needs to be checked by an expert. Also, it would be better to have Renyi entropy (a generalization of shannon's), but I don't know how to compute it. try: bin_number = 32 # Initialize bin_width value # find the appropriate number of bins so the class width is approximately 8 ms (Voss, 2015) for bin_number_current in range(2, 50): bin_width = np.diff(np.histogram(RRi, bins=bin_number_current, density=True)[1])[0] if abs(8 - bin_width) < abs(8 - np.diff(np.histogram(RRi, bins=bin_number, density=True)[1])[0]): bin_number = bin_number_current hrv["Triang"] = len(RRis)/np.max(np.histogram(RRi, bins=bin_number, density=True)[0]) hrv["Shannon_h"] = complexity_entropy_shannon(np.histogram(RRi, bins=bin_number, density=True)[0]) except ValueError: hrv["Triang"] = np.nan hrv["Shannon_h"] = np.nan # Frequency Domain Features # ========================== freq_bands = { "ULF": [0.0001, 0.0033], "VLF": [0.0033, 0.04], "LF": [0.04, 0.15], "HF": [0.15, 0.40], "VHF": [0.4, 0.5]} # Frequency-Domain Power over time freq_powers = {} for band in freq_bands: freqs = freq_bands[band] # Filter to keep only the band of interest filtered, sampling_rate, params = biosppy.signals.tools.filter_signal(signal=RRi, ftype='butter', band='bandpass', order=1, frequency=freqs, sampling_rate=sampling_rate) # Apply Hilbert transform amplitude, phase = biosppy.signals.tools.analytic_signal(filtered) # Extract Amplitude of Envelope (power) freq_powers["ECG_HRV_" + band] = amplitude freq_powers = pd.DataFrame.from_dict(freq_powers) freq_powers.index = hrv["df"].index hrv["df"] = pd.concat([hrv["df"], freq_powers], axis=1) # Compute Power Spectral Density (PSD) using multitaper method power, freq = mne.time_frequency.psd_array_multitaper(RRi, sfreq=sampling_rate, fmin=0, fmax=0.5, adaptive=False, normalization='length') def power_in_band(power, freq, band): power = np.trapz(y=power[(freq >= band[0]) & (freq < band[1])], x=freq[(freq >= band[0]) & (freq < band[1])]) return(power) # Extract Power according to frequency bands hrv["ULF"] = power_in_band(power, freq, freq_bands["ULF"]) hrv["VLF"] = power_in_band(power, freq, freq_bands["VLF"]) hrv["LF"] = power_in_band(power, freq, freq_bands["LF"]) hrv["HF"] = power_in_band(power, freq, freq_bands["HF"]) hrv["VHF"] = power_in_band(power, freq, freq_bands["VHF"]) hrv["Total_Power"] = power_in_band(power, freq, [0, 0.5]) hrv["LFn"] = hrv["LF"]/(hrv["LF"]+hrv["HF"]) hrv["HFn"] = hrv["HF"]/(hrv["LF"]+hrv["HF"]) hrv["LF/HF"] = hrv["LF"]/hrv["HF"] hrv["LF/P"] = hrv["LF"]/hrv["Total_Power"] hrv["HF/P"] = hrv["HF"]/hrv["Total_Power"] # TODO: THIS HAS TO BE CHECKED BY AN EXPERT - Should it be applied on the interpolated on raw RRis? # Non-Linear Dynamics # ====================== if "nonlinear" in hrv_features: if len(RRis) > 17: hrv["DFA_1"] = nolds.dfa(RRis, range(4, 17)) if len(RRis) > 66: hrv["DFA_2"] = nolds.dfa(RRis, range(16, 66)) hrv["Shannon"] = complexity_entropy_shannon(RRis) hrv["Sample_Entropy"] = nolds.sampen(RRis, emb_dim=2) try: hrv["Correlation_Dimension"] = nolds.corr_dim(RRis, emb_dim=2) except AssertionError as error: print("NeuroKit Warning: ecg_hrv(): Correlation Dimension. Error: " + str(error)) hrv["Correlation_Dimension"] = np.nan mse = complexity_entropy_multiscale(RRis, max_scale_factor=20, m=2) hrv["Entropy_Multiscale_AUC"] = mse["MSE_AUC"] hrv["Entropy_SVD"] = complexity_entropy_svd(RRis, emb_dim=2) hrv["Entropy_Spectral_VLF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.0033, 0.04, 0.001)) hrv["Entropy_Spectral_LF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.04, 0.15, 0.001)) hrv["Entropy_Spectral_HF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.15, 0.40, 0.001)) hrv["Fisher_Info"] = complexity_fisher_info(RRis, tau=1, emb_dim=2) # lyap exp doesn't work for some reasons # hrv["Lyapunov"] = np.max(nolds.lyap_e(RRis, emb_dim=58, matrix_dim=4)) hrv["FD_Petrosian"] = complexity_fd_petrosian(RRis) hrv["FD_Higushi"] = complexity_fd_higushi(RRis, k_max=16) # TO DO: # Include many others (see Voss 2015) return(hrv)
Computes the Heart-Rate Variability (HRV). Shamelessly stolen from the `hrv <https://github.com/rhenanbartels/hrv/blob/develop/hrv>`_ package by Rhenan Bartels. All credits go to him. Parameters ---------- rpeaks : list or ndarray R-peak location indices. rri: list or ndarray RR intervals in the signal. If this argument is passed, rpeaks should not be passed. sampling_rate : int Sampling rate (samples/second). hrv_features : list What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'. Returns ---------- hrv : dict Contains hrv features and percentage of detected artifacts. Example ---------- >>> import neurokit as nk >>> sampling_rate = 1000 >>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks, sampling_rate=sampling_rate) Notes ---------- *Details* - **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods. - **sdNN**: The standard deviation of the time interval between successive normal heart beats (*i.e.*, the RR intervals). Reflects all influences on HRV including slow influences across the day, circadian variations, the effect of hormonal influences such as cortisol and epinephrine. It should be noted that total variance of HRV increases with the length of the analyzed recording. - **meanNN**: The the mean RR interval. - **CVSD**: The coefficient of variation of successive differences (van Dellen et al., 1985), the RMSSD divided by meanNN. - **cvNN**: The Coefficient of Variation, *i.e.* the ratio of sdNN divided by meanNN. - **RMSSD** is the root mean square of the RR intervals (*i.e.*, square root of the mean of the squared differences in time between successive normal heart beats). Reflects high frequency (fast or parasympathetic) influences on HRV (*i.e.*, those influencing larger changes from one beat to the next). - **medianNN**: Median of the Absolute values of the successive Differences between the RR intervals. - **madNN**: Median Absolute Deviation (MAD) of the RR intervals. - **mcvNN**: Median-based Coefficient of Variation, *i.e.* the ratio of madNN divided by medianNN. - **pNN50**: The proportion derived by dividing NN50 (The number of interval differences of successive RR intervals greater than 50 ms) by the total number of RR intervals. - **pNN20**: The proportion derived by dividing NN20 (The number of interval differences of successive RR intervals greater than 20 ms) by the total number of RR intervals. - **Triang**: The HRV triangular index measurement is the integral of the density distribution (that is, the number of all RR intervals) divided by the maximum of the density distribution (class width of 8ms). - **Shannon_h**: Shannon Entropy calculated on the basis of the class probabilities pi (i = 1,...,n with n—number of classes) of the NN interval density distribution (class width of 8 ms resulting in a smoothed histogram suitable for HRV analysis). - **VLF** is the variance (*i.e.*, power) in HRV in the Very Low Frequency (.003 to .04 Hz). Reflect an intrinsic rhythm produced by the heart which is modulated by primarily by sympathetic activity. - **LF** is the variance (*i.e.*, power) in HRV in the Low Frequency (.04 to .15 Hz). Reflects a mixture of sympathetic and parasympathetic activity, but in long-term recordings like ours, it reflects sympathetic activity and can be reduced by the beta-adrenergic antagonist propanolol (McCraty & Atkinson, 1996). - **HF** is the variance (*i.e.*, power) in HRV in the High Frequency (.15 to .40 Hz). Reflects fast changes in beat-to-beat variability due to parasympathetic (vagal) activity. Sometimes called the respiratory band because it corresponds to HRV changes related to the respiratory cycle and can be increased by slow, deep breathing (about 6 or 7 breaths per minute) (Kawachi et al., 1995) and decreased by anticholinergic drugs or vagal blockade (Hainsworth, 1995). - **Total_Power**: Total power of the density spectra. - **LFHF**: The LF/HF ratio is sometimes used by some investigators as a quantitative mirror of the sympatho/vagal balance. - **LFn**: normalized LF power LFn = LF/(LF+HF). - **HFn**: normalized HF power HFn = HF/(LF+HF). - **LFp**: ratio between LF and Total_Power. - **HFp**: ratio between H and Total_Power. - **DFA**: Detrended fluctuation analysis (DFA) introduced by Peng et al. (1995) quantifies the fractal scaling properties of time series. DFA_1 is the short-term fractal scaling exponent calculated over n = 4–16 beats, and DFA_2 is the long-term fractal scaling exponent calculated over n = 16–64 beats. - **Shannon**: Shannon Entropy over the RR intervals array. - **Sample_Entropy**: Sample Entropy (SampEn) over the RR intervals array with emb_dim=2. - **Correlation_Dimension**: Correlation Dimension over the RR intervals array with emb_dim=2. - **Entropy_Multiscale**: Multiscale Entropy over the RR intervals array with emb_dim=2. - **Entropy_SVD**: SVD Entropy over the RR intervals array with emb_dim=2. - **Entropy_Spectral_VLF**: Spectral Entropy over the RR intervals array in the very low frequency (0.003-0.04). - **Entropy_Spectral_LF**: Spectral Entropy over the RR intervals array in the low frequency (0.4-0.15). - **Entropy_Spectral_HF**: Spectral Entropy over the RR intervals array in the very high frequency (0.15-0.40). - **Fisher_Info**: Fisher information over the RR intervals array with tau=1 and emb_dim=2. - **Lyapunov**: Lyapunov Exponent over the RR intervals array with emb_dim=58 and matrix_dim=4. - **FD_Petrosian**: Petrosian's Fractal Dimension over the RR intervals. - **FD_Higushi**: Higushi's Fractal Dimension over the RR intervals array with k_max=16. *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ - Rhenan Bartels (https://github.com/rhenanbartels) *Dependencies* - scipy - numpy *See Also* - RHRV: http://rhrv.r-forge.r-project.org/ References ----------- - Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381. - Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308. - Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32. - Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585. - Lippman, N. E. A. L., Stein, K. M., & Lerman, B. B. (1994). Comparison of methods for removal of ectopy in measurement of heart rate variability. American Journal of Physiology-Heart and Circulatory Physiology, 267(1), H411-H418. - Peltola, M. A. (2012). Role of editing of R–R intervals in the analysis of heart rate variability. Frontiers in physiology, 3.
def get_authority_key_identifier(self): """Return the AuthorityKeyIdentifier extension used in certificates signed by this CA.""" try: ski = self.x509.extensions.get_extension_for_class(x509.SubjectKeyIdentifier) except x509.ExtensionNotFound: return x509.AuthorityKeyIdentifier.from_issuer_public_key(self.x509.public_key()) else: return x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski)
Return the AuthorityKeyIdentifier extension used in certificates signed by this CA.
def _get_exc_info(self, exc_tuple=None): """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc. Ensures sys.last_type,value,traceback hold the exc_info we found, from whichever source. raises ValueError if none of these contain any information """ if exc_tuple is None: etype, value, tb = sys.exc_info() else: etype, value, tb = exc_tuple if etype is None: if hasattr(sys, 'last_type'): etype, value, tb = sys.last_type, sys.last_value, \ sys.last_traceback if etype is None: raise ValueError("No exception to find") # Now store the exception info in sys.last_type etc. # WARNING: these variables are somewhat deprecated and not # necessarily safe to use in a threaded environment, but tools # like pdb depend on their existence, so let's set them. If we # find problems in the field, we'll need to revisit their use. sys.last_type = etype sys.last_value = value sys.last_traceback = tb return etype, value, tb
get exc_info from a given tuple, sys.exc_info() or sys.last_type etc. Ensures sys.last_type,value,traceback hold the exc_info we found, from whichever source. raises ValueError if none of these contain any information
def get_schema(repo, content_type): """ Return a schema for a content type in a repository. :param Repo repo: The git repository. :returns: dict """ try: with open( os.path.join(repo.working_dir, '_schemas', '%s.avsc' % (content_type,)), 'r') as fp: data = fp.read() return avro.schema.parse(data) except IOError: # pragma: no cover raise NotFound('Schema does not exist.')
Return a schema for a content type in a repository. :param Repo repo: The git repository. :returns: dict
def send_location(self, geo_uri, name, thumb_url=None, **thumb_info): """Send a location to the room. See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-location for thumb_info Args: geo_uri (str): The geo uri representing the location. name (str): Description for the location. thumb_url (str): URL to the thumbnail of the location. thumb_info (): Metadata about the thumbnail, type ImageInfo. """ return self.client.api.send_location(self.room_id, geo_uri, name, thumb_url, thumb_info)
Send a location to the room. See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-location for thumb_info Args: geo_uri (str): The geo uri representing the location. name (str): Description for the location. thumb_url (str): URL to the thumbnail of the location. thumb_info (): Metadata about the thumbnail, type ImageInfo.
def getprice(self): """ Here we obtain the price for the quote and make sure it has a feed price """ target = self.bot.get("target", {}) if target.get("reference") == "feed": assert self.market == self.market.core_quote_market(), "Wrong market for 'feed' reference!" ticker = self.market.ticker() price = ticker.get("quoteSettlement_price") assert abs(price["price"]) != float("inf"), "Check price feed of asset! (%s)" % str(price) return price
Here we obtain the price for the quote and make sure it has a feed price
def iter_instances(self): """Iterate over the stored objects Yields: wrkey: The two-tuple key used to store the object obj: The instance or function object """ for wrkey in set(self.keys()): obj = self.get(wrkey) if obj is None: continue yield wrkey, obj
Iterate over the stored objects Yields: wrkey: The two-tuple key used to store the object obj: The instance or function object
def get_csv_from_metadata(dsn, d): """ Two goals. Get all csv from metadata, and return new metadata with generated filenames to match files. :param str dsn: Dataset name :param dict d: Metadata :return dict _csvs: Csv """ logger_csvs.info("enter get_csv_from_metadata") _csvs = OrderedDict() _d = copy.deepcopy(d) try: if "paleoData" in _d: # Process paleoData section _d["paleoData"], _csvs = _get_csv_from_section(_d["paleoData"], "{}.paleo".format(dsn), _csvs) if "chronData" in _d: _d["chronData"], _csvs = _get_csv_from_section(_d["chronData"], "{}.chron".format(dsn), _csvs) except Exception as e: print("Error: get_csv_from_metadata: {}, {}".format(dsn, e)) logger_csvs.error("get_csv_from_metadata: {}, {}".format(dsn, e)) logger_csvs.info("exit get_csv_from_metadata") return _d, _csvs
Two goals. Get all csv from metadata, and return new metadata with generated filenames to match files. :param str dsn: Dataset name :param dict d: Metadata :return dict _csvs: Csv
def barycentric_to_cartesian(tri, bc): ''' barycentric_to_cartesian(tri, bc) yields the d x n coordinate matrix of the given barycentric coordinate matrix (also d x n) bc interpolated in the n triangles given in the array tri. See also cartesian_to_barycentric. If tri and bc represent one triangle and coordinate, then just the coordinate and not a matrix is returned. The value d, dimensions, must be 2 or 3. ''' bc = np.asarray(bc) tri = np.asarray(tri) if len(bc.shape) == 1: return barycentric_to_cartesian(np.transpose(np.asarray([tri]), (1,2,0)), np.asarray([bc]).T)[:,0] bc = bc if bc.shape[0] == 2 else bc.T if bc.shape[0] != 2: raise ValueError('barycentric matrix did not have a dimension of size 2') n = bc.shape[1] # we know how many bc's there are now; lets reorient tri to match with the last dimension as n if len(tri.shape) == 2: tri = np.transpose([tri for _ in range(n)], (1,2,0)) # the possible orientations of tri: if tri.shape[0] == 3: if tri.shape[1] in [2,3] and tri.shape[2] == n: pass # default orientation elif tri.shape[1] == n and tri.shape[2] in [2,3]: tri = np.transpose(tri, (0,2,1)) else: raise ValueError('could not deduce triangle dimensions') elif tri.shape[1] == 3: if tri.shape[0] in [2,3] and tri.shape[2] == n: tri = np.transpose(tri, (1,0,2)) elif tri.shape[0] == n and tri.shape[2] in [2,3]: tri = np.transpose(tri, (1,2,0)) else: raise ValueError('could not deduce triangle dimensions') elif tri.shape[2] == 3: if tri.shape[0] in [2,3] and tri.shape[1] == n: tri = np.transpose(tri, (2,0,1)) elif tri.shape[0] == n and tri.shape[1] in [2,3]: tri = np.transpose(tri, (2,1,0)) else: raise ValueError('could not deduce triangle dimensions') else: raise ValueError('At least one dimension of triangles must be 3') if tri.shape[0] != 3 or (tri.shape[1] not in [2,3]): raise ValueError('Triangle array did not have dimensions of sizes 3 and (2 or 3)') if tri.shape[2] != n: raise ValueError('number of triangles and coordinates must match') (l1,l2) = bc (p1, p2, p3) = tri l3 = (1 - l1 - l2) return np.asarray([x1*l1 + x2*l2 + x3*l3 for (x1,x2,x3) in zip(p1, p2, p3)])
barycentric_to_cartesian(tri, bc) yields the d x n coordinate matrix of the given barycentric coordinate matrix (also d x n) bc interpolated in the n triangles given in the array tri. See also cartesian_to_barycentric. If tri and bc represent one triangle and coordinate, then just the coordinate and not a matrix is returned. The value d, dimensions, must be 2 or 3.
def update_thread(cls, session, conversation, thread): """Update a thread. Args: session (requests.sessions.Session): Authenticated session. conversation (helpscout.models.Conversation): The conversation that the thread belongs to. thread (helpscout.models.Thread): The thread to be updated. Returns: helpscout.models.Conversation: Conversation including freshly updated thread. """ data = thread.to_api() data['reload'] = True return cls( '/conversations/%s/threads/%d.json' % ( conversation.id, thread.id, ), data=data, request_type=RequestPaginator.PUT, singleton=True, session=session, )
Update a thread. Args: session (requests.sessions.Session): Authenticated session. conversation (helpscout.models.Conversation): The conversation that the thread belongs to. thread (helpscout.models.Thread): The thread to be updated. Returns: helpscout.models.Conversation: Conversation including freshly updated thread.
def format_all(self): """ return a trace of parents and children of the obect """ res = '\n--- Format all : ' + str(self.name) + ' -------------\n' res += ' parent = ' + str(self.parent) + '\n' res += self._get_all_children() res += self._get_links() return res
return a trace of parents and children of the obect
def rtype_to_model(rtype): """ Return a model class object given a string resource type :param rtype: string resource type :return: model class object :raise: ValueError """ models = goldman.config.MODELS for model in models: if rtype.lower() == model.RTYPE.lower(): return model raise ValueError('%s resource type not registered' % rtype)
Return a model class object given a string resource type :param rtype: string resource type :return: model class object :raise: ValueError
def get_repo(name, basedir=None, **kwargs): # pylint: disable=W0613 ''' Display a repo from <basedir> (default basedir: all dirs in ``reposdir`` yum option). CLI Examples: .. code-block:: bash salt '*' pkg.get_repo myrepo salt '*' pkg.get_repo myrepo basedir=/path/to/dir salt '*' pkg.get_repo myrepo basedir=/path/to/dir,/path/to/another/dir ''' repos = list_repos(basedir) # Find out what file the repo lives in repofile = '' for repo in repos: if repo == name: repofile = repos[repo]['file'] if repofile: # Return just one repo filerepos = _parse_repo_file(repofile)[1] return filerepos[name] return {}
Display a repo from <basedir> (default basedir: all dirs in ``reposdir`` yum option). CLI Examples: .. code-block:: bash salt '*' pkg.get_repo myrepo salt '*' pkg.get_repo myrepo basedir=/path/to/dir salt '*' pkg.get_repo myrepo basedir=/path/to/dir,/path/to/another/dir
def _set_collection(self, v, load=False): """ Setter method for collection, mapped from YANG variable /interface/fortygigabitethernet/rmon/collection (container) If this variable is read-only (config: false) in the source YANG file, then _set_collection is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_collection() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=collection.collection, is_container='container', presence=False, yang_name="collection", rest_name="collection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON ether collection', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """collection must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=collection.collection, is_container='container', presence=False, yang_name="collection", rest_name="collection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON ether collection', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='container', is_config=True)""", }) self.__collection = t if hasattr(self, '_set'): self._set()
Setter method for collection, mapped from YANG variable /interface/fortygigabitethernet/rmon/collection (container) If this variable is read-only (config: false) in the source YANG file, then _set_collection is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_collection() directly.
def save_cloud_optimized(self, dest_url, resampling=Resampling.gauss, blocksize=256, overview_blocksize=256, creation_options=None): """Save as Cloud Optimized GeoTiff object to a new file. :param dest_url: path to the new raster :param resampling: which Resampling to use on reading, default Resampling.gauss :param blocksize: the size of the blocks default 256 :param overview_blocksize: the block size of the overviews, default 256 :param creation_options: dict, options that can override the source raster profile, notice that you can't override tiled=True, and the blocksize the list of creation_options can be found here https://www.gdal.org/frmt_gtiff.html :return: new GeoRaster of the tiled object """ src = self # GeoRaster2.open(self._filename) with tempfile.NamedTemporaryFile(suffix='.tif') as tf: src.save(tf.name, overviews=False) convert_to_cog(tf.name, dest_url, resampling, blocksize, overview_blocksize, creation_options) geotiff = GeoRaster2.open(dest_url) return geotiff
Save as Cloud Optimized GeoTiff object to a new file. :param dest_url: path to the new raster :param resampling: which Resampling to use on reading, default Resampling.gauss :param blocksize: the size of the blocks default 256 :param overview_blocksize: the block size of the overviews, default 256 :param creation_options: dict, options that can override the source raster profile, notice that you can't override tiled=True, and the blocksize the list of creation_options can be found here https://www.gdal.org/frmt_gtiff.html :return: new GeoRaster of the tiled object
def env(): """Verify SSH variables and construct exported variables""" ssh = cij.env_to_dict(PREFIX, REQUIRED) if "KEY" in ssh: ssh["KEY"] = cij.util.expand_path(ssh["KEY"]) if cij.ENV.get("SSH_PORT") is None: cij.ENV["SSH_PORT"] = "22" cij.warn("cij.ssh.env: SSH_PORT was not set, assigned: %r" % ( cij.ENV.get("SSH_PORT") )) if cij.ENV.get("SSH_CMD_TIME") is None: cij.ENV["SSH_CMD_TIME"] = "1" cij.warn("cij.ssh.env: SSH_CMD_TIME was not set, assigned: %r" % ( cij.ENV.get("SSH_CMD_TIME") )) return 0
Verify SSH variables and construct exported variables
def cli(self, commands): """ Execute a list of commands and return the output in a dictionary format using the command as the key. Example input: ['show clock', 'show calendar'] Output example: { 'show calendar': u'22:02:01 UTC Thu Feb 18 2016', 'show clock': u'*22:01:51.165 UTC Thu Feb 18 2016'} """ cli_output = dict() if type(commands) is not list: raise TypeError('Please enter a valid list of commands!') for command in commands: output = self._send_command(command) if 'Invalid input detected' in output: raise ValueError('Unable to execute command "{}"'.format(command)) cli_output.setdefault(command, {}) cli_output[command] = output return cli_output
Execute a list of commands and return the output in a dictionary format using the command as the key. Example input: ['show clock', 'show calendar'] Output example: { 'show calendar': u'22:02:01 UTC Thu Feb 18 2016', 'show clock': u'*22:01:51.165 UTC Thu Feb 18 2016'}
def run_review(*args): ''' Get the difference of recents modification, and send the Email. For: wiki, page, and post. ''' email_cnt = '''<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title></title> <style type="text/css"> table.diff {font-family:Courier; border:medium;} .diff_header {background-color:#e0e0e0} td.diff_header {text-align:right} .diff_next {background-color:#c0c0c0} .diff_add {background-color:#aaffaa} .diff_chg {background-color:#ffff77} .diff_sub {background-color:#ffaaaa} </style></head><body>''' idx = 1 email_cnt = email_cnt + '<table border=1>' email_cnt, idx = __get_post_review(email_cnt, idx) # post email_cnt, idx = __get_page_review(email_cnt, idx) # page. email_cnt, idx = __get_wiki_review(email_cnt, idx) # wiki ########################################################### diff_str = __get_diff_recent() if len(diff_str) < 20000: email_cnt = email_cnt + diff_str email_cnt = email_cnt + '''</body></html>''' if idx > 1: send_mail(post_emails, "{0}|{1}|{2}".format(SMTP_CFG['name'], '文档更新情况', DATE_STR), email_cnt)
Get the difference of recents modification, and send the Email. For: wiki, page, and post.
def update_marker(self, iid, **kwargs): """ Change the options for a certain marker and redraw the marker :param iid: identifier of the marker to change :type iid: str :param kwargs: Dictionary of options to update :type kwargs: dict :raises: ValueError """ if iid not in self._markers: raise ValueError("Unknown iid passed as argument: {}".format(iid)) self.check_kwargs(kwargs) marker = self._markers[iid] marker.update(kwargs) self.delete_marker(iid) return self.create_marker(marker["category"], marker["start"], marker["finish"], marker)
Change the options for a certain marker and redraw the marker :param iid: identifier of the marker to change :type iid: str :param kwargs: Dictionary of options to update :type kwargs: dict :raises: ValueError
def failback_from_replicant(self, volume_id, replicant_id): """Failback from a volume replicant. :param integer volume_id: The id of the volume :param integer replicant_id: ID of replicant to failback from :return: Returns whether failback was successful or not """ return self.client.call('Network_Storage', 'failbackFromReplicant', replicant_id, id=volume_id)
Failback from a volume replicant. :param integer volume_id: The id of the volume :param integer replicant_id: ID of replicant to failback from :return: Returns whether failback was successful or not
def make_error_response(self, cond): """Create error response for the any non-error presence stanza. :Parameters: - `cond`: error condition name, as defined in XMPP specification. :Types: - `cond`: `unicode` :return: new presence stanza. :returntype: `Presence` """ if self.stanza_type == "error": raise ValueError("Errors may not be generated in response" " to errors") stanza = Presence(stanza_type = "error", from_jid = self.from_jid, to_jid = self.to_jid, stanza_id = self.stanza_id, status = self._status, show = self._show, priority = self._priority, error_cond = cond) if self._payload is None: self.decode_payload() for payload in self._payload: stanza.add_payload(payload) return stanza
Create error response for the any non-error presence stanza. :Parameters: - `cond`: error condition name, as defined in XMPP specification. :Types: - `cond`: `unicode` :return: new presence stanza. :returntype: `Presence`
def get(self, key: Text, count: Optional[int]=None, formatter: Formatter=None, locale: Text=None, params: Optional[Dict[Text, Any]]=None, flags: Optional[Flags]=None) -> List[Text]: """ Get the appropriate translation given the specified parameters. :param key: Translation key :param count: Count for plurals :param formatter: Optional string formatter to use :param locale: Prefered locale to get the string from :param params: Params to be substituted :param flags: Flags to help choosing one version or the other """ if params is None: params = {} if count is not None: raise TranslationError('Count parameter is not supported yet') locale = self.choose_locale(locale) try: group: SentenceGroup = self.dict[locale][key] except KeyError: raise MissingTranslationError('Translation "{}" does not exist' .format(key)) try: trans = group.render(flags or {}) out = [] for line in trans: if not formatter: out.append(line.format(**params)) else: out.append(formatter.format(line, **params)) except KeyError as e: raise MissingParamError( 'Parameter "{}" missing to translate "{}"' .format(e.args[0], key) ) else: return out
Get the appropriate translation given the specified parameters. :param key: Translation key :param count: Count for plurals :param formatter: Optional string formatter to use :param locale: Prefered locale to get the string from :param params: Params to be substituted :param flags: Flags to help choosing one version or the other
def get_entry_by_material_id(self, material_id, compatible_only=True, inc_structure=None, property_data=None, conventional_unit_cell=False): """ Get a ComputedEntry corresponding to a material_id. Args: material_id (str): Materials Project material_id (a string, e.g., mp-1234). compatible_only (bool): Whether to return only "compatible" entries. Compatible entries are entries that have been processed using the MaterialsProjectCompatibility class, which performs adjustments to allow mixing of GGA and GGA+U calculations for more accurate phase diagrams and reaction energies. inc_structure (str): If None, entries returned are ComputedEntries. If inc_structure="final", ComputedStructureEntries with final structures are returned. Otherwise, ComputedStructureEntries with initial structures are returned. property_data (list): Specify additional properties to include in entry.data. If None, no data. Should be a subset of supported_properties. conventional_unit_cell (bool): Whether to get the standard conventional unit cell Returns: ComputedEntry or ComputedStructureEntry object. """ data = self.get_entries(material_id, compatible_only=compatible_only, inc_structure=inc_structure, property_data=property_data, conventional_unit_cell=conventional_unit_cell) return data[0]
Get a ComputedEntry corresponding to a material_id. Args: material_id (str): Materials Project material_id (a string, e.g., mp-1234). compatible_only (bool): Whether to return only "compatible" entries. Compatible entries are entries that have been processed using the MaterialsProjectCompatibility class, which performs adjustments to allow mixing of GGA and GGA+U calculations for more accurate phase diagrams and reaction energies. inc_structure (str): If None, entries returned are ComputedEntries. If inc_structure="final", ComputedStructureEntries with final structures are returned. Otherwise, ComputedStructureEntries with initial structures are returned. property_data (list): Specify additional properties to include in entry.data. If None, no data. Should be a subset of supported_properties. conventional_unit_cell (bool): Whether to get the standard conventional unit cell Returns: ComputedEntry or ComputedStructureEntry object.
def create(cls, tokens:Tokens, max_vocab:int, min_freq:int) -> 'Vocab': "Create a vocabulary from a set of `tokens`." freq = Counter(p for o in tokens for p in o) itos = [o for o,c in freq.most_common(max_vocab) if c >= min_freq] for o in reversed(defaults.text_spec_tok): if o in itos: itos.remove(o) itos.insert(0, o) return cls(itos)
Create a vocabulary from a set of `tokens`.
def get_mapping_from_db3_file( db_path ): ''' Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping ''' import sqlite3 # should be moved to the top but we do this here for CentOS 5 support conn = sqlite3.connect(db_path) results = conn.cursor().execute(''' SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type FROM residue_pdb_identification INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum ''') # Create the mapping from PDB residues to Rosetta residues rosetta_residue_ids = [] mapping = {} for r in results: mapping["%s%s%s" % (r[0], str(r[1]).rjust(4), r[2])] = {'pose_residue_id' : r[4], 'name3' : r[5], 'res_type' : r[6]} rosetta_residue_ids.append(r[4]) # Ensure that the the range of the map is exactly the set of Rosetta residues i.e. the map from (a subset of) the PDB residues to the Rosetta residues is surjective raw_residue_list = [r for r in conn.cursor().execute('''SELECT resNum, name3 FROM residues ORDER BY resNum''')] assert(sorted([r[0] for r in raw_residue_list]) == sorted(rosetta_residue_ids)) return mapping
Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping
def get_version(module): """ Return package version as listed in `__version__`. """ init_py = open('{0}.py'.format(module)).read() return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
Return package version as listed in `__version__`.
def find_movers(choosers, rates, rate_column): """ Returns an array of the indexes of the `choosers` that are slated to move. Parameters ---------- choosers : pandas.DataFrame Table of agents from which to find movers. rates : pandas.DataFrame Table of relocation rates. Index is unused. Other columns describe filters on the `choosers` table so that different segments can have different relocation rates. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. An example `rates` structure: age_of_head_max age_of_head_min nan 65 65 40 In this example the `choosers` table would need to have an 'age_of_head' column on which to filter. nan should be used to flag filters that do not apply in a given row. rate_column : object Name of column in `rates` table that has relocation rates. Returns ------- movers : pandas.Index Suitable for indexing `choosers` by index. """ logger.debug('start: find movers for relocation') relocation_rates = pd.Series( np.zeros(len(choosers)), index=choosers.index) for _, row in rates.iterrows(): indexes = util.filter_table(choosers, row, ignore={rate_column}).index relocation_rates.loc[indexes] = row[rate_column] movers = relocation_rates.index[ relocation_rates > np.random.random(len(choosers))] logger.debug('picked {} movers for relocation'.format(len(movers))) logger.debug('finish: find movers for relocation') return movers
Returns an array of the indexes of the `choosers` that are slated to move. Parameters ---------- choosers : pandas.DataFrame Table of agents from which to find movers. rates : pandas.DataFrame Table of relocation rates. Index is unused. Other columns describe filters on the `choosers` table so that different segments can have different relocation rates. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. An example `rates` structure: age_of_head_max age_of_head_min nan 65 65 40 In this example the `choosers` table would need to have an 'age_of_head' column on which to filter. nan should be used to flag filters that do not apply in a given row. rate_column : object Name of column in `rates` table that has relocation rates. Returns ------- movers : pandas.Index Suitable for indexing `choosers` by index.
def render_mail(self, template_prefix, email, context): """ Renders an e-mail to `email`. `template_prefix` identifies the e-mail that is to be sent, e.g. "account/email/email_confirmation" """ subject = render_to_string('{0}_subject.txt'.format(template_prefix), context) # remove superfluous line breaks subject = " ".join(subject.splitlines()).strip() subject = self.format_email_subject(subject) bodies = {} for ext in ['html', 'txt']: try: template_name = '{0}_message.{1}'.format(template_prefix, ext) bodies[ext] = render_to_string(template_name, context).strip() except TemplateDoesNotExist: if ext == 'txt' and not bodies: # We need at least one body raise if 'txt' in bodies: msg = EmailMultiAlternatives(subject, bodies['txt'], settings.DEFAULT_FROM_EMAIL, [email]) if 'html' in bodies: msg.attach_alternative(bodies['html'], 'text/html') else: msg = EmailMessage(subject, bodies['html'], settings.DEFAULT_FROM_EMAIL, [email]) msg.content_subtype = 'html' # Main content is now text/html return msg
Renders an e-mail to `email`. `template_prefix` identifies the e-mail that is to be sent, e.g. "account/email/email_confirmation"
def upload_all_books(book_id_start, book_id_end, rdf_library=None): """ Uses the fetch, make, push subcommands to mirror Project Gutenberg to a github3 api """ # TODO refactor appname into variable logger.info( "starting a gitberg mass upload: {0} -> {1}".format( book_id_start, book_id_end ) ) for book_id in range(int(book_id_start), int(book_id_end) + 1): cache = {} errors = 0 try: if int(book_id) in missing_pgid: print(u'missing\t{}'.format(book_id)) continue upload_book(book_id, rdf_library=rdf_library, cache=cache) except Exception as e: print(u'error\t{}'.format(book_id)) logger.error(u"Error processing: {}\r{}".format(book_id, e)) errors += 1 if errors > 10: print('error limit reached!') break
Uses the fetch, make, push subcommands to mirror Project Gutenberg to a github3 api
def _restore_replace(self): """ Check if we need to replace ".gitignore" to ".keep". :return: The replacement status. :rtype: bool """ if PyFunceble.path.isdir(self.base + ".git"): # The `.git` directory exist. if "PyFunceble" not in Command("git remote show origin").execute(): # PyFunceble is not in the origin. # We return True. return True # We return False. return False # The `.git` directory does not exist. # We return True. return True
Check if we need to replace ".gitignore" to ".keep". :return: The replacement status. :rtype: bool
def getattr(self, name, default: Any = _missing): """ Convenience method equivalent to ``deep_getattr(mcs_args.clsdict, mcs_args.bases, 'attr_name'[, default])`` """ return deep_getattr(self.clsdict, self.bases, name, default)
Convenience method equivalent to ``deep_getattr(mcs_args.clsdict, mcs_args.bases, 'attr_name'[, default])``
def _fetch_stock_data(self, stock_list): """获取股票信息""" pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None]
获取股票信息
def Earth(pos=(0, 0, 0), r=1, lw=1): """Build a textured actor representing the Earth. .. hint:: |geodesic| |geodesic.py|_ """ import os tss = vtk.vtkTexturedSphereSource() tss.SetRadius(r) tss.SetThetaResolution(72) tss.SetPhiResolution(36) earthMapper = vtk.vtkPolyDataMapper() earthMapper.SetInputConnection(tss.GetOutputPort()) earthActor = Actor(c="w") earthActor.SetMapper(earthMapper) atext = vtk.vtkTexture() pnmReader = vtk.vtkPNMReader() cdir = os.path.dirname(__file__) if cdir == "": cdir = "." fn = settings.textures_path + "earth.ppm" pnmReader.SetFileName(fn) atext.SetInputConnection(pnmReader.GetOutputPort()) atext.InterpolateOn() earthActor.SetTexture(atext) if not lw: earthActor.SetPosition(pos) return earthActor es = vtk.vtkEarthSource() es.SetRadius(r / 0.995) earth2Mapper = vtk.vtkPolyDataMapper() earth2Mapper.SetInputConnection(es.GetOutputPort()) earth2Actor = Actor() # vtk.vtkActor() earth2Actor.SetMapper(earth2Mapper) earth2Mapper.ScalarVisibilityOff() earth2Actor.GetProperty().SetLineWidth(lw) ass = Assembly([earthActor, earth2Actor]) ass.SetPosition(pos) settings.collectable_actors.append(ass) return ass
Build a textured actor representing the Earth. .. hint:: |geodesic| |geodesic.py|_
def _check_lock_permission( self, url, lock_type, lock_scope, lock_depth, token_list, principal ): """Check, if <principal> can lock <url>, otherwise raise an error. If locking <url> would create a conflict, DAVError(HTTP_LOCKED) is raised. An embedded DAVErrorCondition contains the conflicting resource. @see http://www.webdav.org/specs/rfc4918.html#lock-model - Parent locks WILL NOT be conflicting, if they are depth-0. - Exclusive depth-infinity parent locks WILL be conflicting, even if they are owned by <principal>. - Child locks WILL NOT be conflicting, if we request a depth-0 lock. - Exclusive child locks WILL be conflicting, even if they are owned by <principal>. (7.7) - It is not enough to check whether a lock is owned by <principal>, but also the token must be passed with the request. (Because <principal> may run two different applications on his client.) - <principal> cannot lock-exclusive, if he holds a parent shared-lock. (This would only make sense, if he was the only shared-lock holder.) - TODO: litmus tries to acquire a shared lock on one resource twice (locks: 27 'double_sharedlock') and fails, when we return HTTP_LOCKED. So we allow multi shared locks on a resource even for the same principal. @param url: URL that shall be locked @param lock_type: "write" @param lock_scope: "shared"|"exclusive" @param lock_depth: "0"|"infinity" @param token_list: list of lock tokens, that the user submitted in If: header @param principal: name of the principal requesting a lock @return: None (or raise) """ assert lock_type == "write" assert lock_scope in ("shared", "exclusive") assert lock_depth in ("0", "infinity") _logger.debug( "checkLockPermission({}, {}, {}, {})".format( url, lock_scope, lock_depth, principal ) ) # Error precondition to collect conflicting URLs errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict) self._lock.acquire_read() try: # Check url and all parents for conflicting locks u = url while u: ll = self.get_url_lock_list(u) for l in ll: _logger.debug(" check parent {}, {}".format(u, lock_string(l))) if u != url and l["depth"] != "infinity": # We only consider parents with Depth: infinity continue elif l["scope"] == "shared" and lock_scope == "shared": # Only compatible with shared locks (even by same # principal) continue # Lock conflict _logger.debug( " -> DENIED due to locked parent {}".format(lock_string(l)) ) errcond.add_href(l["root"]) u = util.get_uri_parent(u) if lock_depth == "infinity": # Check child URLs for conflicting locks childLocks = self.storage.get_lock_list( url, include_root=False, include_children=True, token_only=False ) for l in childLocks: assert util.is_child_uri(url, l["root"]) # if util.is_child_uri(url, l["root"]): _logger.debug( " -> DENIED due to locked child {}".format(lock_string(l)) ) errcond.add_href(l["root"]) finally: self._lock.release() # If there were conflicts, raise HTTP_LOCKED for <url>, and pass # conflicting resource with 'no-conflicting-lock' precondition if len(errcond.hrefs) > 0: raise DAVError(HTTP_LOCKED, err_condition=errcond) return
Check, if <principal> can lock <url>, otherwise raise an error. If locking <url> would create a conflict, DAVError(HTTP_LOCKED) is raised. An embedded DAVErrorCondition contains the conflicting resource. @see http://www.webdav.org/specs/rfc4918.html#lock-model - Parent locks WILL NOT be conflicting, if they are depth-0. - Exclusive depth-infinity parent locks WILL be conflicting, even if they are owned by <principal>. - Child locks WILL NOT be conflicting, if we request a depth-0 lock. - Exclusive child locks WILL be conflicting, even if they are owned by <principal>. (7.7) - It is not enough to check whether a lock is owned by <principal>, but also the token must be passed with the request. (Because <principal> may run two different applications on his client.) - <principal> cannot lock-exclusive, if he holds a parent shared-lock. (This would only make sense, if he was the only shared-lock holder.) - TODO: litmus tries to acquire a shared lock on one resource twice (locks: 27 'double_sharedlock') and fails, when we return HTTP_LOCKED. So we allow multi shared locks on a resource even for the same principal. @param url: URL that shall be locked @param lock_type: "write" @param lock_scope: "shared"|"exclusive" @param lock_depth: "0"|"infinity" @param token_list: list of lock tokens, that the user submitted in If: header @param principal: name of the principal requesting a lock @return: None (or raise)
def client_list(self, *args): """Display a list of connected clients""" if len(self._clients) == 0: self.log('No clients connected') else: self.log(self._clients, pretty=True)
Display a list of connected clients
def certify_enum(value, kind=None, required=True): """ Certifier for enum. :param value: The value to be certified. :param kind: The enum type that value should be an instance of. :param bool required: Whether the value can be `None`. Defaults to True. :raises CertifierTypeError: The type is invalid """ if certify_required( value=value, required=required, ): return if not isinstance(value, kind): raise CertifierTypeError( message="expected {expected!r}, but value is of type {actual!r}".format( expected=kind.__name__, actual=value.__class__.__name__), value=value, required=required, )
Certifier for enum. :param value: The value to be certified. :param kind: The enum type that value should be an instance of. :param bool required: Whether the value can be `None`. Defaults to True. :raises CertifierTypeError: The type is invalid
def is_shortcut_in_use(self, shortcut): """ Returns if given action shortcut is in use. :param name: Action shortcut. :type name: unicode :return: Is shortcut in use. :rtype: bool """ for path, actionName, action in foundations.walkers.dictionaries_walker(self.__categories): if action.shortcut() == QKeySequence(shortcut): return True return False
Returns if given action shortcut is in use. :param name: Action shortcut. :type name: unicode :return: Is shortcut in use. :rtype: bool
def line_spacing_rule(self): """ A member of the :ref:`WdLineSpacing` enumeration indicating how the value of :attr:`line_spacing` should be interpreted. Assigning any of the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or :attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing` to be updated to produce the corresponding line spacing. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing_rule( pPr.spacing_line, pPr.spacing_lineRule )
A member of the :ref:`WdLineSpacing` enumeration indicating how the value of :attr:`line_spacing` should be interpreted. Assigning any of the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or :attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing` to be updated to produce the corresponding line spacing.
def dequeue(self, k): """Outputs *k* draws from the multinomial distribution.""" if self.j + k <= self.M: out = self.A[self.j:(self.j + k)] self.j += k elif k <= self.M: out = np.empty(k, 'int') nextra = self.j + k - self.M out[:(k - nextra)] = self.A[self.j:] self.enqueue() out[(k - nextra):] = self.A[:nextra] self.j = nextra else: raise ValueError('MultinomialQueue: k must be <= M (the max \ capacity of the queue)') return out
Outputs *k* draws from the multinomial distribution.
def export(self, exclude=[]): """ returns a dictionary representation of the document """ fields = ( (key, self.get_field(key)) for key in self.schema if not key.startswith("_") and key not in exclude ) doc = {name: field.export() for name, field in fields} return doc
returns a dictionary representation of the document
def wait_for(self, pids=[], status_list=process_result_statuses): ''' wait_for(self, pids=[], status_list=process_result_statuses) Waits for a process to finish :Parameters: * *pids* (`list`) -- list of processes waiting to be finished * *status_list* (`list`) -- optional - List of statuses to wait for processes to finish with :Example: .. code-block:: python pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service') opereto_client.wait_for([pid], ['failure', 'error']) opereto_client.rerun_process(pid) ''' results={} pids = self._get_pids(pids) for pid in pids: while(True): try: stat = self._call_rest_api('get', '/processes/'+pid+'/status', error='Failed to fetch process [%s] status'%pid) if stat in status_list: results[pid]=stat break time.sleep(5) except requests.exceptions.RequestException as e: # reinitialize session using api call decorator self.session=None raise e return results
wait_for(self, pids=[], status_list=process_result_statuses) Waits for a process to finish :Parameters: * *pids* (`list`) -- list of processes waiting to be finished * *status_list* (`list`) -- optional - List of statuses to wait for processes to finish with :Example: .. code-block:: python pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service') opereto_client.wait_for([pid], ['failure', 'error']) opereto_client.rerun_process(pid)
def _group_dict_set(iterator): """Make a dict that accumulates the values for each key in an iterator of doubles. :param iter[tuple[A,B]] iterator: An iterator :rtype: dict[A,set[B]] """ d = defaultdict(set) for key, value in iterator: d[key].add(value) return dict(d)
Make a dict that accumulates the values for each key in an iterator of doubles. :param iter[tuple[A,B]] iterator: An iterator :rtype: dict[A,set[B]]
def profile(self, name): """Return a specific profile.""" self.selected_profile = self.profiles.get(name) return self.profiles.get(name)
Return a specific profile.
def set_icon_data(self, base64_data, mimetype="image/png", rel="icon"): """ Allows to define an icon for the App Args: base64_data (str): base64 encoded image data (ie. "data:image/x-icon;base64,AAABAAEAEBA....") mimetype (str): mimetype of the image ("image/png" or "image/x-icon"...) rel (str): leave it unchanged (standard "icon") """ self.add_child("favicon", '<link rel="%s" href="%s" type="%s" />'%(rel, base64_data, mimetype))
Allows to define an icon for the App Args: base64_data (str): base64 encoded image data (ie. "data:image/x-icon;base64,AAABAAEAEBA....") mimetype (str): mimetype of the image ("image/png" or "image/x-icon"...) rel (str): leave it unchanged (standard "icon")
def gen_postinit(self, cls: ClassDefinition, slotname: str) -> Optional[str]: """ Generate python post init rules for slot in class """ rlines: List[str] = [] slot = self.schema.slots[slotname] if slot.alias: slotname = slot.alias slotname = self.python_name_for(slotname) range_type_name = self.range_type_name(slot, cls.name) # Generate existence check for required slots. Note that inherited classes have to check post-init because # named variables can't be mixed in the class signature if slot.primary_key or slot.identifier or slot.required: if cls.is_a: rlines.append(f'if self.{slotname} is None:') rlines.append(f'\traise ValueError(f"{slotname} must be supplied")') rlines.append(f'if not isinstance(self.{slotname}, {range_type_name}):') rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.range in self.schema.classes or slot.range in self.schema.types: if not slot.multivalued: rlines.append(f'if self.{slotname} and not isinstance(self.{slotname}, {range_type_name}):') # Another really wierd case -- a class that has no properties if slot.range in self.schema.classes and not self.all_slots_for(self.schema.classes[slot.range]): rlines.append(f'\tself.{slotname} = {range_type_name}()') else: rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.inlined: slot_range_cls = self.schema.classes[slot.range] pkeys = self.primary_keys_for(slot_range_cls) if pkeys: # Special situation -- if there are only two values: primary key and value, # we load it is a list, not a dictionary if len(self.all_slots_for(slot_range_cls)) - len(pkeys) == 1: class_init = '(k, v)' else: pkey_name = self.python_name_for(pkeys[0]) class_init = f'({pkey_name}=k, **({{}} if v is None else v))' rlines.append(f'for k, v in self.{slotname}.items():') rlines.append(f'\tif not isinstance(v, {range_type_name}):') rlines.append(f'\t\tself.{slotname}[k] = {range_type_name}{class_init}') else: rlines.append(f'self.{slotname} = [v if isinstance(v, {range_type_name})') indent = len(f'self.{slotname} = [') * ' ' rlines.append(f'{indent}else {range_type_name}(v) for v in self.{slotname}]') return '\n\t\t'.join(rlines)
Generate python post init rules for slot in class
def indent(self, space=4): '''Return an indented Newick string, just like ``nw_indent`` in Newick Utilities Args: ``space`` (``int``): The number of spaces a tab should equal Returns: ``str``: An indented Newick string ''' if not isinstance(space,int): raise TypeError("space must be an int") if space < 0: raise ValueError("space must be a non-negative integer") space = ' '*space; o = []; l = 0 for c in self.newick(): if c == '(': o.append('(\n'); l += 1; o.append(space*l) elif c == ')': o.append('\n'); l -= 1; o.append(space*l); o.append(')') elif c == ',': o.append(',\n'); o.append(space*l) else: o.append(c) return ''.join(o)
Return an indented Newick string, just like ``nw_indent`` in Newick Utilities Args: ``space`` (``int``): The number of spaces a tab should equal Returns: ``str``: An indented Newick string
def minutes_from_utc(self): """ The timezone offset of this point in time object as +/- minutes from UTC. A positive value of the timezone offset indicates minutes east of UTC, and a negative value indicates minutes west of UTC. 0, if this object represents a time interval. """ offset = 0 if self.__datetime is not None and \ self.__datetime.utcoffset() is not None: offset = self.__datetime.utcoffset().seconds / 60 if self.__datetime.utcoffset().days == -1: offset = -((60 * 24) - offset) return int(offset)
The timezone offset of this point in time object as +/- minutes from UTC. A positive value of the timezone offset indicates minutes east of UTC, and a negative value indicates minutes west of UTC. 0, if this object represents a time interval.
def handle_args_and_set_context(args): """ Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated Context object based on CLI args """ parser = argparse.ArgumentParser() parser.add_argument("env", help="environment") parser.add_argument("path_to_template", help="path to the config template to process") parser.add_argument("--no_params", help="disable loading values from params file", action="store_true", default=False) parser.add_argument("--verbose", help="Output extra info", action="store_true", default=False) parser.add_argument("--lint", help="Test configs for valid JSON/YAML syntax", action="store_true", default=False) parser.add_argument("--silent", help="Suppress output of rendered template", action="store_true", default=False) parsed = vars(parser.parse_args(args)) path_to_template = abspath(parsed["path_to_template"]) service = path_to_template.split('/')[-3] return Context( get_account_alias(parsed["env"]), EFConfig.DEFAULT_REGION, parsed["env"], service, path_to_template, parsed["no_params"], parsed["verbose"], parsed["lint"], parsed["silent"] )
Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated Context object based on CLI args
def console_load_apf(con: tcod.console.Console, filename: str) -> bool: """Update a console from an ASCII Paint `.apf` file.""" return bool( lib.TCOD_console_load_apf(_console(con), filename.encode("utf-8")) )
Update a console from an ASCII Paint `.apf` file.
def to_bytes(self): ''' Takes a list of ICMPv6Option objects and returns a packed byte string of options, appropriately padded if necessary. ''' raw = b'' if not self._options: return raw for icmpv6popt in self._options: raw += icmpv6popt.to_bytes() # Padding doesn't seem necessary? # RFC states it should be padded to 'natural 64bit boundaries' # However, wireshark interprets \x00 as a malformed option field # So for now, ignore padding # padbytes = 4 - (len(raw) % 4) # raw += b'\x00'*padbytes return raw
Takes a list of ICMPv6Option objects and returns a packed byte string of options, appropriately padded if necessary.
def _generateModel0(numCategories): """ Generate the initial, first order, and second order transition probabilities for 'model0'. For this model, we generate the following set of sequences: 1-2-3 (4X) 1-2-4 (1X) 5-2-3 (1X) 5-2-4 (4X) Parameters: ---------------------------------------------------------------------- numCategories: Number of categories retval: (initProb, firstOrder, secondOrder, seqLen) initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrder: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrder: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. Here is an example of some return values: initProb: [0.7, 0.2, 0.1] firstOrder: {'[0]': [0.3, 0.3, 0.4], '[1]': [0.3, 0.3, 0.4], '[2]': [0.3, 0.3, 0.4]} secondOrder: {'[0,0]': [0.3, 0.3, 0.4], '[0,1]': [0.3, 0.3, 0.4], '[0,2]': [0.3, 0.3, 0.4], '[1,0]': [0.3, 0.3, 0.4], '[1,1]': [0.3, 0.3, 0.4], '[1,2]': [0.3, 0.3, 0.4], '[2,0]': [0.3, 0.3, 0.4], '[2,1]': [0.3, 0.3, 0.4], '[2,2]': [0.3, 0.3, 0.4]} """ # =============================================================== # Let's model the following: # a-b-c (4X) # a-b-d (1X) # e-b-c (1X) # e-b-d (4X) # -------------------------------------------------------------------- # Initial probabilities, 'a' and 'e' equally likely initProb = numpy.zeros(numCategories) initProb[0] = 0.5 initProb[4] = 0.5 # -------------------------------------------------------------------- # 1st order transitions # both 'a' and 'e' should lead to 'b' firstOrder = dict() for catIdx in range(numCategories): key = str([catIdx]) probs = numpy.ones(numCategories) / numCategories if catIdx == 0 or catIdx == 4: probs.fill(0) probs[1] = 1.0 # lead only to b firstOrder[key] = probs # -------------------------------------------------------------------- # 2nd order transitions # a-b should lead to c 80% and d 20% # e-b should lead to c 20% and d 80% secondOrder = dict() for firstIdx in range(numCategories): for secondIdx in range(numCategories): key = str([firstIdx, secondIdx]) probs = numpy.ones(numCategories) / numCategories if key == str([0,1]): probs.fill(0) probs[2] = 0.80 # 'ab' leads to 'c' 80% of the time probs[3] = 0.20 # 'ab' leads to 'd' 20% of the time elif key == str([4,1]): probs.fill(0) probs[2] = 0.20 # 'eb' leads to 'c' 20% of the time probs[3] = 0.80 # 'eb' leads to 'd' 80% of the time secondOrder[key] = probs return (initProb, firstOrder, secondOrder, 3)
Generate the initial, first order, and second order transition probabilities for 'model0'. For this model, we generate the following set of sequences: 1-2-3 (4X) 1-2-4 (1X) 5-2-3 (1X) 5-2-4 (4X) Parameters: ---------------------------------------------------------------------- numCategories: Number of categories retval: (initProb, firstOrder, secondOrder, seqLen) initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrder: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrder: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. Here is an example of some return values: initProb: [0.7, 0.2, 0.1] firstOrder: {'[0]': [0.3, 0.3, 0.4], '[1]': [0.3, 0.3, 0.4], '[2]': [0.3, 0.3, 0.4]} secondOrder: {'[0,0]': [0.3, 0.3, 0.4], '[0,1]': [0.3, 0.3, 0.4], '[0,2]': [0.3, 0.3, 0.4], '[1,0]': [0.3, 0.3, 0.4], '[1,1]': [0.3, 0.3, 0.4], '[1,2]': [0.3, 0.3, 0.4], '[2,0]': [0.3, 0.3, 0.4], '[2,1]': [0.3, 0.3, 0.4], '[2,2]': [0.3, 0.3, 0.4]}
def adict(*classes): '''Install one or more classes to be handled as dict. ''' a = True for c in classes: # if class is dict-like, add class # name to _dict_classes[module] if isclass(c) and _infer_dict(c): t = _dict_classes.get(c.__module__, ()) if c.__name__ not in t: # extend tuple _dict_classes[c.__module__] = t + (c.__name__,) else: # not a dict-like class a = False return a
Install one or more classes to be handled as dict.
def has_child(self, term): """Return True if this GO object has a child GO ID.""" for parent in self.children: if parent.item_id == term or parent.has_child(term): return True return False
Return True if this GO object has a child GO ID.
def open511_convert(input_doc, output_format, serialize=True, **kwargs): """ Convert an Open511 document between formats. input_doc - either an lxml open511 Element or a deserialized JSON dict output_format - short string name of a valid output format, as listed above """ try: output_format_info = FORMATS[output_format] except KeyError: raise ValueError("Unrecognized output format %s" % output_format) input_doc = ensure_format(input_doc, output_format_info.input_format) result = output_format_info.func(input_doc, **kwargs) if serialize: result = output_format_info.serializer(result) return result
Convert an Open511 document between formats. input_doc - either an lxml open511 Element or a deserialized JSON dict output_format - short string name of a valid output format, as listed above
def FindLiteral(self, pattern, data): """Search the data for a hit.""" pattern = utils.Xor(pattern, self.xor_in_key) offset = 0 while 1: # We assume here that data.find does not make a copy of pattern. offset = data.find(pattern, offset) if offset < 0: break yield (offset, offset + len(pattern)) offset += 1
Search the data for a hit.