code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def create_s3app(self): """Create S3 infra for s3 applications""" utils.banner("Creating S3 App Infrastructure") primary_region = self.configs['pipeline']['primary_region'] s3obj = s3.S3Apps(app=self.app, env=self.env, region=self.region, prop_path=self.json_path, primary_region=primary_region) s3obj.create_bucket()
Create S3 infra for s3 applications
def items(cls): """ :return: List of tuples consisting of every enum value in the form [('NAME', value), ...] :rtype: list """ items = [(value.name, key) for key, value in cls.values.items()] return sorted(items, key=lambda x: x[1])
:return: List of tuples consisting of every enum value in the form [('NAME', value), ...] :rtype: list
def bulk_copy(self, ids): """Bulk copy a set of users. :param ids: Int list of user IDs. :return: :class:`users.User <users.User>` list """ schema = UserSchema() return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema)
Bulk copy a set of users. :param ids: Int list of user IDs. :return: :class:`users.User <users.User>` list
def _build_voronoi_polygons(df): """ Given a GeoDataFrame of point geometries and pre-computed plot extrema, build Voronoi simplexes for the given points in the given space and returns them. Voronoi simplexes which are located on the edges of the graph may extend into infinity in some direction. In other words, the set of points nearest the given point does not necessarily have to be a closed polygon. We force these non-hermetic spaces into polygons using a subroutine. Parameters ---------- df : GeoDataFrame instance The `GeoDataFrame` of points being partitioned. Returns ------- polygons : list of shapely.geometry.Polygon objects The Voronoi polygon output. """ from scipy.spatial import Voronoi geom = np.array(df.geometry.map(lambda p: [p.x, p.y]).tolist()) vor = Voronoi(geom) polygons = [] for idx_point, _ in enumerate(vor.points): idx_point_region = vor.point_region[idx_point] idxs_vertices = np.array(vor.regions[idx_point_region]) is_finite = not np.any(idxs_vertices == -1) if is_finite: # Easy case, the region is closed. Make a polygon out of the Voronoi ridge points. idx_point_region = vor.point_region[idx_point] idxs_vertices = np.array(vor.regions[idx_point_region]) region_vertices = vor.vertices[idxs_vertices] region_poly = shapely.geometry.Polygon(region_vertices) polygons.append(region_poly) else: # Hard case, the region is open. Project new edges out to the margins of the plot. # See `scipy.spatial.voronoi_plot_2d` for the source of this calculation. point_idx_ridges_idx = np.where((vor.ridge_points == idx_point).any(axis=1))[0] # TODO: why does this happen? if len(point_idx_ridges_idx) == 0: continue ptp_bound = vor.points.ptp(axis=0) center = vor.points.mean(axis=0) finite_segments = [] infinite_segments = [] pointwise_ridge_points = vor.ridge_points[point_idx_ridges_idx] pointwise_ridge_vertices = np.asarray(vor.ridge_vertices)[point_idx_ridges_idx] for pointidx, simplex in zip(pointwise_ridge_points, pointwise_ridge_vertices): simplex = np.asarray(simplex) if np.all(simplex >= 0): finite_segments.append(vor.vertices[simplex]) else: i = simplex[simplex >= 0][0] # finite end Voronoi vertex t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent t /= np.linalg.norm(t) n = np.array([-t[1], t[0]]) # normal midpoint = vor.points[pointidx].mean(axis=0) direction = np.sign(np.dot(midpoint - center, n)) * n far_point = vor.vertices[i] + direction * ptp_bound.max() infinite_segments.append(np.asarray([vor.vertices[i], far_point])) finite_segments = finite_segments if finite_segments else np.zeros(shape=(0,2,2)) ls = np.vstack([np.asarray(infinite_segments), np.asarray(finite_segments)]) # We have to trivially sort the line segments into polygonal order. The algorithm that follows is # inefficient, being O(n^2), but "good enough" for this use-case. ls_sorted = [] while len(ls_sorted) < len(ls): l1 = ls[0] if len(ls_sorted) == 0 else ls_sorted[-1] matches = [] for l2 in [l for l in ls if not (l == l1).all()]: if np.any(l1 == l2): matches.append(l2) elif np.any(l1 == l2[::-1]): l2 = l2[::-1] matches.append(l2) if len(ls_sorted) == 0: ls_sorted.append(l1) for match in matches: # in list sytax this would be "if match not in ls_sorted" # in numpy things are more complicated... if not any((match == ls_sort).all() for ls_sort in ls_sorted): ls_sorted.append(match) break # Build and return the final polygon. polyline = np.vstack(ls_sorted) geom = shapely.geometry.Polygon(polyline).convex_hull polygons.append(geom) return polygons
Given a GeoDataFrame of point geometries and pre-computed plot extrema, build Voronoi simplexes for the given points in the given space and returns them. Voronoi simplexes which are located on the edges of the graph may extend into infinity in some direction. In other words, the set of points nearest the given point does not necessarily have to be a closed polygon. We force these non-hermetic spaces into polygons using a subroutine. Parameters ---------- df : GeoDataFrame instance The `GeoDataFrame` of points being partitioned. Returns ------- polygons : list of shapely.geometry.Polygon objects The Voronoi polygon output.
def chunkprocess(func): """take a function that taks an iterable as the first argument. return a wrapper that will break an iterable into chunks using chunkiter and run each chunk in function, yielding the value of each function call as an iterator. """ @functools.wraps(func) def wrapper(iterable, chunksize, *args, **kwargs): for chunk in chunkiter(iterable, chunksize): yield func(chunk, *args, **kwargs) return wrapper
take a function that taks an iterable as the first argument. return a wrapper that will break an iterable into chunks using chunkiter and run each chunk in function, yielding the value of each function call as an iterator.
def _fill_row_borders(self): """Add the first and last rows to the data by extrapolation. """ lines = len(self.hrow_indices) chunk_size = self.chunk_size or lines factor = len(self.hrow_indices) / len(self.row_indices) tmp_data = [] for num in range(len(self.tie_data)): tmp_data.append([]) row_indices = [] for index in range(0, lines, chunk_size): indices = np.logical_and(self.row_indices >= index / factor, self.row_indices < (index + chunk_size) / factor) ties = np.argwhere(indices).squeeze() tiepos = self.row_indices[indices].squeeze() for num, data in enumerate(self.tie_data): to_extrapolate = data[ties, :] if len(to_extrapolate) > 0: extrapolated = self._extrapolate_rows(to_extrapolate, tiepos, self.hrow_indices[ index], self.hrow_indices[index + chunk_size - 1]) tmp_data[num].append(extrapolated) row_indices.append(np.array([self.hrow_indices[index]])) row_indices.append(tiepos) row_indices.append(np.array([self.hrow_indices[index + chunk_size - 1]])) for num in range(len(self.tie_data)): self.tie_data[num] = np.vstack(tmp_data[num]) self.row_indices = np.concatenate(row_indices)
Add the first and last rows to the data by extrapolation.
def create(ctx, name, integration_type, location, non_interactive, quiet, dry_run): """Create scaffolding for a new integration.""" repo_choice = ctx.obj['repo_choice'] root = resolve_path(location) if location else get_root() path_sep = os.path.sep integration_dir = os.path.join(root, normalize_package_name(name)) if os.path.exists(integration_dir): abort('Path `{}` already exists!'.format(integration_dir)) template_fields = {} if repo_choice != 'core' and not non_interactive and not dry_run: template_fields['author'] = click.prompt('Your name') template_fields['email'] = click.prompt('Your email') template_fields['email_packages'] = template_fields['email'] click.echo() config = construct_template_fields(name, repo_choice, **template_fields) files = create_template_files(integration_type, root, config, read=not dry_run) file_paths = [file.file_path.replace('{}{}'.format(root, path_sep), '', 1) for file in files] path_tree = tree() for file_path in file_paths: branch = path_tree for part in file_path.split(path_sep): branch = branch[part] if dry_run: if quiet: echo_info('Will create `{}`'.format(integration_dir)) else: echo_info('Will create in `{}`:'.format(root)) display_path_tree(path_tree) return for file in files: file.write() if quiet: echo_info('Created `{}`'.format(integration_dir)) else: echo_info('Created in `{}`:'.format(root)) display_path_tree(path_tree)
Create scaffolding for a new integration.
def bare(self): "Make a Features object with no metadata; points to the same features." if not self.meta: return self elif self.stacked: return Features(self.stacked_features, self.n_pts, copy=False) else: return Features(self.features, copy=False)
Make a Features object with no metadata; points to the same features.
def mask(self, pattern): """A drawing operator that paints the current source using the alpha channel of :obj:`pattern` as a mask. (Opaque areas of :obj:`pattern` are painted with the source, transparent areas are not painted.) :param pattern: A :class:`Pattern` object. """ cairo.cairo_mask(self._pointer, pattern._pointer) self._check_status()
A drawing operator that paints the current source using the alpha channel of :obj:`pattern` as a mask. (Opaque areas of :obj:`pattern` are painted with the source, transparent areas are not painted.) :param pattern: A :class:`Pattern` object.
def check(text): """Check the text.""" err = "misc.annotations" msg = u"Annotation left in text." annotations = [ "FIXME", "FIX ME", "TODO", "todo", "ERASE THIS", "FIX THIS", ] return existence_check( text, annotations, err, msg, ignore_case=False, join=True)
Check the text.
def qs_add(self, *args, **kwargs): '''Add value to QuerySet MultiDict''' query = self.query.copy() if args: mdict = MultiDict(args[0]) for k, v in mdict.items(): query.add(k, v) for k, v in kwargs.items(): query.add(k, v) return self._copy(query=query)
Add value to QuerySet MultiDict
def _validate_validator(self, validator, field, value): """ {'oneof': [ {'type': 'callable'}, {'type': 'list', 'schema': {'oneof': [{'type': 'callable'}, {'type': 'string'}]}}, {'type': 'string'} ]} """ if isinstance(validator, _str_type): validator = self.__get_rule_handler('validator', validator) validator(field, value) elif isinstance(validator, Iterable): for v in validator: self._validate_validator(v, field, value) else: validator(field, value, self._error)
{'oneof': [ {'type': 'callable'}, {'type': 'list', 'schema': {'oneof': [{'type': 'callable'}, {'type': 'string'}]}}, {'type': 'string'} ]}
def client_args_for_bank(bank_info, ofx_version): """ Return the client arguments to use for a particular Institution, as found from ofxhome. This provides us with an extension point to override or augment ofxhome data for specific institutions, such as those that require specific User-Agent headers (or no User-Agent header). :param bank_info: OFXHome bank information for the institution, as returned by ``OFXHome.lookup()`` :type bank_info: dict :param ofx_version: OFX Version argument specified on command line :type ofx_version: str :return: Client arguments for a specific institution :rtype: dict """ client_args = {'ofx_version': str(ofx_version)} if 'ofx.discovercard.com' in bank_info['url']: # Discover needs no User-Agent and no Accept headers client_args['user_agent'] = False client_args['accept'] = False if 'www.accountonline.com' in bank_info['url']: # Citi needs no User-Agent header client_args['user_agent'] = False return client_args
Return the client arguments to use for a particular Institution, as found from ofxhome. This provides us with an extension point to override or augment ofxhome data for specific institutions, such as those that require specific User-Agent headers (or no User-Agent header). :param bank_info: OFXHome bank information for the institution, as returned by ``OFXHome.lookup()`` :type bank_info: dict :param ofx_version: OFX Version argument specified on command line :type ofx_version: str :return: Client arguments for a specific institution :rtype: dict
def get_builder_openshift_url(self): """ url of OpenShift where builder will connect """ key = "builder_openshift_url" url = self._get_deprecated(key, self.conf_section, key) if url is None: logger.warning("%r not found, falling back to get_openshift_base_uri()", key) url = self.get_openshift_base_uri() return url
url of OpenShift where builder will connect
def standard_FPR(reference_patterns, estimated_patterns, tol=1e-5): """Standard F1 Score, Precision and Recall. This metric checks if the prototype patterns of the reference match possible translated patterns in the prototype patterns of the estimations. Since the sizes of these prototypes must be equal, this metric is quite restictive and it tends to be 0 in most of 2013 MIREX results. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns) Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format tol : float Tolerance level when comparing reference against estimation. Default parameter is the one found in the original matlab code by Tom Collins used for MIREX 2013. (Default value = 1e-5) Returns ------- f_measure : float The standard F1 Score precision : float The standard Precision recall : float The standard Recall """ validate(reference_patterns, estimated_patterns) nP = len(reference_patterns) # Number of patterns in the reference nQ = len(estimated_patterns) # Number of patterns in the estimation k = 0 # Number of patterns that match # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Find matches of the prototype patterns for ref_pattern in reference_patterns: P = np.asarray(ref_pattern[0]) # Get reference prototype for est_pattern in estimated_patterns: Q = np.asarray(est_pattern[0]) # Get estimation prototype if len(P) != len(Q): continue # Check transposition given a certain tolerance if (len(P) == len(Q) == 1 or np.max(np.abs(np.diff(P - Q, axis=0))) < tol): k += 1 break # Compute the standard measures precision = k / float(nQ) recall = k / float(nP) f_measure = util.f_measure(precision, recall) return f_measure, precision, recall
Standard F1 Score, Precision and Recall. This metric checks if the prototype patterns of the reference match possible translated patterns in the prototype patterns of the estimations. Since the sizes of these prototypes must be equal, this metric is quite restictive and it tends to be 0 in most of 2013 MIREX results. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns) Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format tol : float Tolerance level when comparing reference against estimation. Default parameter is the one found in the original matlab code by Tom Collins used for MIREX 2013. (Default value = 1e-5) Returns ------- f_measure : float The standard F1 Score precision : float The standard Precision recall : float The standard Recall
def process_data(self, file_info): """expects FileInfo""" if self._exceeds_max_file_size(file_info): self.log.info("File '%s' has a size in bytes (%d) greater than the configured limit. Will be ignored.", file_info.path, file_info.size) self.fire(events.FilteredFile(file_info)) return None else: return file_info
expects FileInfo
async def open(self) -> 'NodePool': """ Explicit entry. Opens pool as configured, for later closure via close(). For use when keeping pool open across multiple calls. Raise any IndyError causing failure to create ledger configuration. :return: current object """ LOGGER.debug('NodePool.open >>>') try: await pool.set_protocol_version(2) # 1 for indy-node 1.3, 2 for indy-node 1.4 await pool.create_pool_ledger_config(self.name, json.dumps({'genesis_txn': str(self.genesis_txn_path)})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.PoolLedgerConfigAlreadyExistsError: LOGGER.info('Pool ledger config for %s already exists', self.name) else: LOGGER.debug('NodePool.open: <!< indy error code %s', x_indy.error_code) raise x_indy self._handle = await pool.open_pool_ledger(self.name, json.dumps(self.cfg)) LOGGER.debug('NodePool.open <<<') return self
Explicit entry. Opens pool as configured, for later closure via close(). For use when keeping pool open across multiple calls. Raise any IndyError causing failure to create ledger configuration. :return: current object
def url(self) -> str: """ Returns the URL that will open this project results file in the browser :return: """ return 'file://{path}?id={id}'.format( path=os.path.join(self.results_path, 'project.html'), id=self.uuid )
Returns the URL that will open this project results file in the browser :return:
def main(configpath = None, startup = None, daemon = False, pidfile = None, fork = None): """ The most simple way to start the VLCP framework :param configpath: path of a configuration file to be loaded :param startup: startup modules list. If None, `server.startup` in the configuration files is used; if `server.startup` is not configured, any module defined or imported into __main__ is loaded. :param daemon: if True, use python-daemon to fork and start at background. `python-daemon` must be installed:: pip install python-daemon :param pidfile: if daemon=True, this file is used for the pidfile. :param fork: use extra fork to start multiple instances """ if configpath is not None: manager.loadfrom(configpath) if startup is not None: manager['server.startup'] = startup if not manager.get('server.startup'): # No startup modules, try to load from __main__ startup = [] import __main__ for k in dir(__main__): m = getattr(__main__, k) if isinstance(m, type) and issubclass(m, Module) and m is not Module: startup.append('__main__.' + k) manager['server.startup'] = startup if fork is not None and fork > 1: if not hasattr(os, 'fork'): raise ValueError('Fork is not supported in this operating system.') def start_process(): s = Server() s.serve() def main_process(): if fork is not None and fork > 1: import multiprocessing from time import sleep sub_procs = [] for i in range(0, fork): p = multiprocessing.Process(target = start_process) sub_procs.append(p) for i in range(0, fork): sub_procs[i].start() try: import signal def except_return(sig, frame): raise SystemExit signal.signal(signal.SIGTERM, except_return) signal.signal(signal.SIGINT, except_return) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, except_return) while True: sleep(2) for i in range(0, fork): if sub_procs[i].is_alive(): break else: break finally: for i in range(0, fork): if sub_procs[i].is_alive(): sub_procs[i].terminate() for i in range(0, fork): sub_procs[i].join() else: start_process() if daemon: import daemon if not pidfile: pidfile = manager.get('daemon.pidfile') uid = manager.get('daemon.uid') gid = manager.get('daemon.gid') if gid is None: group = manager.get('daemon.group') if group is not None: import grp gid = grp.getgrnam(group)[2] if uid is None: import pwd user = manager.get('daemon.user') if user is not None: user_pw = pwd.getpwnam(user) uid = user_pw.pw_uid if gid is None: gid = user_pw.pw_gid if uid is not None and gid is None: import pwd gid = pwd.getpwuid(uid).pw_gid if pidfile: import fcntl class PidLocker(object): def __init__(self, path): self.filepath = path self.fd = None def __enter__(self): # Create pid file self.fd = os.open(pidfile, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, 0o644) fcntl.lockf(self.fd, fcntl.LOCK_EX|fcntl.LOCK_NB) os.write(self.fd, str(os.getpid()).encode('ascii')) os.fsync(self.fd) def __exit__(self, typ, val, tb): if self.fd: try: fcntl.lockf(self.fd, fcntl.LOCK_UN) except Exception: pass os.close(self.fd) self.fd = None locker = PidLocker(pidfile) else: locker = None import sys # Module loading is related to current path, add it to sys.path cwd = os.getcwd() if cwd not in sys.path: sys.path.append(cwd) # Fix path issues on already-loaded modules for m in sys.modules.values(): if getattr(m, '__path__', None): m.__path__ = [os.path.abspath(p) for p in m.__path__] # __file__ is used for module-relative resource locate if getattr(m, '__file__', None): m.__file__ = os.path.abspath(m.__file__) configs = {'gid':gid,'uid':uid,'pidfile':locker} config_filters = ['chroot_directory', 'working_directory', 'umask', 'detach_process', 'prevent_core'] if hasattr(manager, 'daemon'): configs.update((k,v) for k,v in manager.daemon.config_value_items() if k in config_filters) if not hasattr(os, 'initgroups'): configs['initgroups'] = False with daemon.DaemonContext(**configs): main_process() else: main_process()
The most simple way to start the VLCP framework :param configpath: path of a configuration file to be loaded :param startup: startup modules list. If None, `server.startup` in the configuration files is used; if `server.startup` is not configured, any module defined or imported into __main__ is loaded. :param daemon: if True, use python-daemon to fork and start at background. `python-daemon` must be installed:: pip install python-daemon :param pidfile: if daemon=True, this file is used for the pidfile. :param fork: use extra fork to start multiple instances
def _expand_libs_in_libs(specs): """ Expands specs.libs.depends.libs to include any indirectly required libs """ for lib_name, lib_spec in specs['libs'].iteritems(): if 'depends' in lib_spec and 'libs' in lib_spec['depends']: lib_spec['depends']['libs'] = _get_dependent('libs', lib_name, specs, 'libs')
Expands specs.libs.depends.libs to include any indirectly required libs
def bind(self, sock): """Wrap and return the given socket.""" if self.context is None: self.context = self.get_context() conn = SSLConnection(self.context, sock) self._environ = self.get_environ() return conn
Wrap and return the given socket.
def flatten(self): """ Get a flattened list of the items in the collection. :rtype: Collection """ def _flatten(d): if isinstance(d, dict): for v in d.values(): for nested_v in _flatten(v): yield nested_v elif isinstance(d, list): for list_v in d: for nested_v in _flatten(list_v): yield nested_v else: yield d return self.__class__(list(_flatten(self.items)))
Get a flattened list of the items in the collection. :rtype: Collection
def show_type(cls, result): """ :param TryHaskell.Result result: Parse result of JSON data. :rtype: str|unicode """ if result.ok: return ' :: '.join([result.expr, result.type]) return result.value
:param TryHaskell.Result result: Parse result of JSON data. :rtype: str|unicode
def generalize(self, sr, geometries, maxDeviation, deviationUnit): """ The generalize operation is performed on a geometry service resource. The generalize operation simplifies the input geometries using the Douglas-Peucker algorithm with a specified maximum deviation distance. The output geometries will contain a subset of the original input vertices. Inputs: geometries - array of geometries to be generalized (structured as JSON geometry objects returned by the ArcGIS REST API). sr - spatial reference of the input geometries WKID. maxDeviation - maxDeviation sets the maximum allowable offset, which will determine the degree of simplification. This value limits the distance the output geometry can differ from the input geometry. deviationUnit - a unit for maximum deviation. If a unit is not specified, the units are derived from sr. """ url = self._url + "/generalize" params = { "f" : "json", "sr" : sr, "deviationUnit" : deviationUnit, "maxDeviation": maxDeviation } params['geometries'] = self.__geometryListToGeomTemplate(geometries=geometries) return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
The generalize operation is performed on a geometry service resource. The generalize operation simplifies the input geometries using the Douglas-Peucker algorithm with a specified maximum deviation distance. The output geometries will contain a subset of the original input vertices. Inputs: geometries - array of geometries to be generalized (structured as JSON geometry objects returned by the ArcGIS REST API). sr - spatial reference of the input geometries WKID. maxDeviation - maxDeviation sets the maximum allowable offset, which will determine the degree of simplification. This value limits the distance the output geometry can differ from the input geometry. deviationUnit - a unit for maximum deviation. If a unit is not specified, the units are derived from sr.
def CreateChatWith(self, *Usernames): """Creates a chat with one or more users. :Parameters: Usernames : str One or more Skypenames of the users. :return: A chat object :rtype: `Chat` :see: `Chat.AddMembers` """ return Chat(self, chop(self._DoCommand('CHAT CREATE %s' % ', '.join(Usernames)), 2)[1])
Creates a chat with one or more users. :Parameters: Usernames : str One or more Skypenames of the users. :return: A chat object :rtype: `Chat` :see: `Chat.AddMembers`
def enable_node(self, service_name, node_name): """ Enables a given node name for the given service name via the "enable server" HAProxy command. """ logger.info("Enabling server %s/%s", service_name, node_name) return self.send_command( "enable server %s/%s" % (service_name, node_name) )
Enables a given node name for the given service name via the "enable server" HAProxy command.
def verify_notification(data): """ Function to verify notification came from a trusted source Returns True if verfied, False if not verified """ pemfile = grab_keyfile(data['SigningCertURL']) cert = crypto.load_certificate(crypto.FILETYPE_PEM, pemfile) signature = base64.decodestring(six.b(data['Signature'])) if data['Type'] == "Notification": hash_format = NOTIFICATION_HASH_FORMAT else: hash_format = SUBSCRIPTION_HASH_FORMAT try: crypto.verify( cert, signature, six.b(hash_format.format(**data)), 'sha1') except crypto.Error: return False return True
Function to verify notification came from a trusted source Returns True if verfied, False if not verified
def get_characteristic_from_uuid(self, uuid): """Given a characteristic UUID, return a :class:`Characteristic` object containing information about that characteristic Args: uuid (str): a string containing the hex-encoded UUID Returns: None if an error occurs, otherwise a :class:`Characteristic` object """ if uuid in self.uuid_chars: logger.debug('Returning cached info for char: {}'.format(uuid)) return self.uuid_chars[uuid] for service in self.services.values(): char = service.get_characteristic_by_uuid(uuid) if char is not None: self.uuid_chars[uuid] = char logger.debug('Found char for UUID: {}'.format(uuid)) return char logger.info('Failed to find char for UUID: {}'.format(uuid)) return None
Given a characteristic UUID, return a :class:`Characteristic` object containing information about that characteristic Args: uuid (str): a string containing the hex-encoded UUID Returns: None if an error occurs, otherwise a :class:`Characteristic` object
def plot_reaction_scheme(df, temperature, pressure, potential, pH, e_lim=None): """Returns a matplotlib object with the plotted reaction path. Parameters ---------- df : Pandas DataFrame generated by reaction_network temperature : numeric temperature in K pressure : numeric pressure in mbar pH : PH in bulk solution potential : Electric potential vs. SHE in eV e_lim: Limits for the energy axis. Returns ------- fig: matplotlib object. """ ncols = int((df.shape[0]/20)) +1 fig_width = ncols + 1.5*len(df['intermediate_labels'][0]) figsize = (fig_width, 6) fig, ax = plt.subplots(figsize=figsize) if pressure == None: pressure_label = '0' else: pressure_label = str(pressure) lines = [] for j, energy_list in enumerate(df['reaction_energy']): ts = df['transition_states'][j] R = df['reaction_coordinate'][j] E = [[x, x] for x in energy_list] labels = df['system_label'] for i, n in enumerate(R): if i == 0: line = Line2D([0], [0], color=colors[j], lw=4) lines.append(line) ax.plot(n, E[i], ls='-', color=colors[j], linewidth=3.25, solid_capstyle='round', path_effects=[pe.Stroke(linewidth=6, foreground=edge_colors[j]), pe.Normal()], label=labels[j]) ax.plot([n[1], n[1] + 0.5], [E[i], E[i + 1]], ls='--', dashes=(3, 2), color=colors[j], linewidth=1.) else: if ts[i]: xts = [R[i-1][1], R[i][0], R[i+1][0]] yts = [energy_list[i-1], energy_list[i], energy_list[i+1]] z1 = np.polyfit(xts, yts, 2) xp1 = np.linspace(xts[0], xts[2], 100) p1 = np.poly1d(z1) ax.plot(xp1, p1(xp1), ls='--', color=colors[j], linewidth=2.) ax.plot(xts[1], yts[1], marker = 'o', c=colors[j], mec = edge_colors[j], lw=1.5, markersize=7) else: ax.plot(n, E[i], ls='-', color=colors[j], linewidth=3.25, solid_capstyle='round', path_effects=[pe.Stroke(linewidth=6, foreground=edge_colors[j]), pe.Normal()]) if i < len(R) - 1: ax.plot([n[1], n[1] + 0.5], [E[i], E[i + 1]], ls='--', dashes=(3, 2), color=colors[j], linewidth=1.) ax.legend(handlelength=0.4, ncol=ncols, loc=2, frameon=False, bbox_to_anchor=(1.05, 1), borderaxespad=0., fontsize=12) if e_lim: ax.set_ylim(e_lim) ax.set_xlabel('Reaction coordinate') ax.set_ylabel('Reaction free energy (eV)') reaction_labels = df['intermediate_labels'][0] reaction_labels = [sub(w) for w in reaction_labels] plt.xticks(np.arange(len(reaction_labels)) + 0.25, tuple(reaction_labels), rotation=45) # plt.tight_layout() a = ax.get_xlim()[1]+0.05*ax.get_xlim()[1] b = ax.get_ylim()[0]+0.05*ax.get_ylim()[1] if potential is not None and pH is not None: ax.text(a,b, 'U = '+str(potential)+' eV vs. SHE \n pH = ' +str(pH)+' \n T = '+str(temperature) +' K \n p = '+pressure_label+' mbar',fontsize=12) else: ax.text(a,b, 'T = '+str(temperature)+' \n p = '+pressure_label+' mbar',fontsize=12) plt.tight_layout() return(fig)
Returns a matplotlib object with the plotted reaction path. Parameters ---------- df : Pandas DataFrame generated by reaction_network temperature : numeric temperature in K pressure : numeric pressure in mbar pH : PH in bulk solution potential : Electric potential vs. SHE in eV e_lim: Limits for the energy axis. Returns ------- fig: matplotlib object.
def create_calc_dh_dv(estimator): """ Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the index. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `design` attribute that is a 2D ndarray representing the design matrix for this model and dataset. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of systematic utilities. The dimensions of the returned vector should be `(design.shape[0], design.shape[0])`. """ dh_dv = diags(np.ones(estimator.design.shape[0]), 0, format='csr') # Create a function that will take in the pre-formed matrix, replace its # data in-place with the new data, and return the correct dh_dv on each # iteration of the minimizer calc_dh_dv = partial(_uneven_transform_deriv_v, output_array=dh_dv) return calc_dh_dv
Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the index. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `design` attribute that is a 2D ndarray representing the design matrix for this model and dataset. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of systematic utilities. The dimensions of the returned vector should be `(design.shape[0], design.shape[0])`.
def get_product_version(path: typing.Union[str, Path]) -> VersionInfo: """ Get version info from executable Args: path: path to the executable Returns: VersionInfo """ path = Path(path).absolute() pe_info = pefile.PE(str(path)) try: for file_info in pe_info.FileInfo: # pragma: no branch if isinstance(file_info, list): result = _parse_file_info(file_info) if result: return result else: result = _parse_file_info(pe_info.FileInfo) if result: return result raise RuntimeError(f'unable to obtain version from {path}') except (KeyError, AttributeError) as exc: traceback.print_exc() raise RuntimeError(f'unable to obtain version from {path}') from exc
Get version info from executable Args: path: path to the executable Returns: VersionInfo
def indicator_associations(self, params=None): """ Gets the indicator association from a Indicator/Group/Victim Yields: Indicator Association """ if not self.can_update(): self._tcex.handle_error(910, [self.type]) if params is None: params = {} for ia in self.tc_requests.indicator_associations( self.api_type, self.api_sub_type, self.unique_id, owner=self.owner, params=params ): yield ia
Gets the indicator association from a Indicator/Group/Victim Yields: Indicator Association
def get_celery_app(name=None, **kwargs): # nocv # pylint: disable=import-error ''' Function to return celery-app. Works only if celery installed. :param name: Application name :param kwargs: overrided env-settings :return: Celery-app object ''' from celery import Celery prepare_environment(**kwargs) name = name or os.getenv("VST_PROJECT") celery_app = Celery(name) celery_app.config_from_object('django.conf:settings', namespace='CELERY') celery_app.autodiscover_tasks() return celery_app
Function to return celery-app. Works only if celery installed. :param name: Application name :param kwargs: overrided env-settings :return: Celery-app object
def to_dict(self): """ Returns: dict: ConciseCV represented as a dictionary. """ param = { "n_folds": self._n_folds, "n_rows": self._n_rows, "use_stored_folds": self._use_stored_folds } if self._concise_global_model is None: trained_global_model = None else: trained_global_model = self._concise_global_model.to_dict() obj_dict = {"param": param, "folds": self._kf, "init_model": self._concise_model.to_dict(), "trained_global_model": trained_global_model, "output": {fold: model.to_dict() for fold, model in self.get_CV_models().items()} } return obj_dict
Returns: dict: ConciseCV represented as a dictionary.
def get_wildcard(self): """Return the wildcard bits notation of the netmask.""" return _convert(self._ip, notation=NM_WILDCARD, inotation=IP_DOT, _check=False, _isnm=self._isnm)
Return the wildcard bits notation of the netmask.
def dump(self, fh, value, context=None): """Attempt to transform and write a string-based foreign value to the given file-like object. Returns the length written. """ value = self.dumps(value) fh.write(value) return len(value)
Attempt to transform and write a string-based foreign value to the given file-like object. Returns the length written.
def GetCoinAssets(self): """ Get asset ids of all coins present in the wallet. Returns: list: of UInt256 asset id's. """ assets = set() for coin in self.GetCoins(): assets.add(coin.Output.AssetId) return list(assets)
Get asset ids of all coins present in the wallet. Returns: list: of UInt256 asset id's.
def nonver_name(self): """Return the non versioned name""" nv = self.as_version(None) if not nv: import re nv = re.sub(r'-[^-]+$', '', self.name) return nv
Return the non versioned name
def deprecatedMessage(msg, key=None, printStack=False): ''' deprecatedMessage - Print a deprecated messsage (unless they are toggled off). Will print a message only once (based on "key") @param msg <str> - Deprecated message to possibly print @param key <anything> - A key that is specific to this message. If None is provided (default), one will be generated from the md5 of the message. However, better to save cycles and provide a unique key if at all possible. The decorator uses the function itself as the key. @param printStack <bool> Default False, if True print a stack trace ''' if __deprecatedMessagesEnabled is False: return if not _alreadyWarned: # First warning, let them know how to disable. sys.stderr.write('== DeprecatedWarning: warnings can be disabled by calling IndexedRedis.toggleDeprecatedMessages(False)\n') if key is None: from .compat_str import tobytes key = md5(tobytes(msg)).hexdigest() if key not in _alreadyWarned: _alreadyWarned[key] = True sys.stderr.write('== DeprecatedWarning: %s\n' %(msg, )) if printStack: sys.stderr.write(' at:\n') curStack = traceback.extract_stack() sys.stderr.write(' ' + '\n '.join(traceback.format_list(curStack[:-2])).replace('\t', ' ') + '\n')
deprecatedMessage - Print a deprecated messsage (unless they are toggled off). Will print a message only once (based on "key") @param msg <str> - Deprecated message to possibly print @param key <anything> - A key that is specific to this message. If None is provided (default), one will be generated from the md5 of the message. However, better to save cycles and provide a unique key if at all possible. The decorator uses the function itself as the key. @param printStack <bool> Default False, if True print a stack trace
def _get_updated_environment(self, env_dict=None): """Returns globals environment with 'magic' variable Parameters ---------- env_dict: Dict, defaults to {'S': self} \tDict that maps global variable name to value """ if env_dict is None: env_dict = {'S': self} env = globals().copy() env.update(env_dict) return env
Returns globals environment with 'magic' variable Parameters ---------- env_dict: Dict, defaults to {'S': self} \tDict that maps global variable name to value
def print_menuconfig(kconf): """ Prints all menu entries for the configuration. """ # Print the expanded mainmenu text at the top. This is the same as # kconf.top_node.prompt[0], but with variable references expanded. print("\n======== {} ========\n".format(kconf.mainmenu_text)) print_menuconfig_nodes(kconf.top_node.list, 0) print("")
Prints all menu entries for the configuration.
def get_low_battery_warning_level(self): """ Looks through all power supplies in POWER_SUPPLY_PATH. If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE. Otherwise determines total percentage and time remaining across all attached batteries. """ all_energy_full = [] all_energy_now = [] all_power_now = [] try: type = self.power_source_type() if type == common.POWER_TYPE_AC: if self.is_ac_online(): return common.LOW_BATTERY_WARNING_NONE elif type == common.POWER_TYPE_BATTERY: if self.is_battery_present() and self.is_battery_discharging(): energy_full, energy_now, power_now = self.get_battery_state() all_energy_full.append(energy_full) all_energy_now.append(energy_now) all_power_now.append(power_now) else: warnings.warn("UPS is not supported.") except (RuntimeError, IOError) as e: warnings.warn("Unable to read system power information!", category=RuntimeWarning) try: total_percentage = sum(all_energy_full) / sum(all_energy_now) total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)]) if total_time <= 10.0: return common.LOW_BATTERY_WARNING_FINAL elif total_percentage <= 22.0: return common.LOW_BATTERY_WARNING_EARLY else: return common.LOW_BATTERY_WARNING_NONE except ZeroDivisionError as e: warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning) return common.LOW_BATTERY_WARNING_NONE
Looks through all power supplies in POWER_SUPPLY_PATH. If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE. Otherwise determines total percentage and time remaining across all attached batteries.
def get_async_response(response_id): """ Get the response from the async table """ response = DYNAMODB_CLIENT.get_item( TableName=ASYNC_RESPONSE_TABLE, Key={'id': {'S': str(response_id)}} ) if 'Item' not in response: return None return { 'status': response['Item']['async_status']['S'], 'response': json.loads(response['Item']['async_response']['S']), }
Get the response from the async table
def _get_force_constants_disps(force_constants, supercell, dataset, symmetry, atom_list=None): """Calculate force constants Phi = -F / d Force constants are obtained by one of the following algorithm. Parameters ---------- force_constants: ndarray Force constants shape=(len(atom_list),n_satom,3,3) dtype=double supercell: Supercell Supercell dataset: dict Distplacement dataset. Forces are also stored. symmetry: Symmetry Symmetry information of supercell atom_list: list List of atom indices corresponding to the first index of force constants. None assigns all atoms in supercell. """ symprec = symmetry.get_symmetry_tolerance() disp_atom_list = np.unique([x['number'] for x in dataset['first_atoms']]) for disp_atom_number in disp_atom_list: disps = [] sets_of_forces = [] for x in dataset['first_atoms']: if x['number'] != disp_atom_number: continue disps.append(x['displacement']) sets_of_forces.append(x['forces']) site_symmetry = symmetry.get_site_symmetry(disp_atom_number) solve_force_constants(force_constants, disp_atom_number, disps, sets_of_forces, supercell, site_symmetry, symprec, atom_list=atom_list) return disp_atom_list
Calculate force constants Phi = -F / d Force constants are obtained by one of the following algorithm. Parameters ---------- force_constants: ndarray Force constants shape=(len(atom_list),n_satom,3,3) dtype=double supercell: Supercell Supercell dataset: dict Distplacement dataset. Forces are also stored. symmetry: Symmetry Symmetry information of supercell atom_list: list List of atom indices corresponding to the first index of force constants. None assigns all atoms in supercell.
def make_position_choices(self): """Create choices for available positions """ choices = [] for pos in self.get_available_positions(): choices.append({ "ResultValue": pos, "ResultText": pos, }) return choices
Create choices for available positions
def formatFunctionNode(node,path,stack): '''Add some helpful attributes to node.''' #node.name is already defined by AST module node.weight = calcFnWeight(node) node.path = path node.pclass = getCurrentClass(stack) return node
Add some helpful attributes to node.
def _empty_except_predicates(xast, node, context): '''Check if a node is empty (no child nodes or attributes) except for any predicates defined in the specified xpath. :param xast: parsed xpath (xpath abstract syntax tree) from :mod:`eulxml.xpath` :param node: lxml element to check :param context: any context required for the xpath (e.g., namespace definitions) :returns: boolean indicating if the element is empty or not ''' # copy the node, remove predicates, and check for any remaining # child nodes or attributes node_c = deepcopy(node) _remove_predicates(xast, node_c, context) return bool(len(node_c) == 0 and len(node_c.attrib) == 0)
Check if a node is empty (no child nodes or attributes) except for any predicates defined in the specified xpath. :param xast: parsed xpath (xpath abstract syntax tree) from :mod:`eulxml.xpath` :param node: lxml element to check :param context: any context required for the xpath (e.g., namespace definitions) :returns: boolean indicating if the element is empty or not
def worker_collectionfinish(self, node, ids): """worker has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collections), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use. """ if self.shuttingdown: return self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids) # tell session which items were effectively collected otherwise # the master node will finish the session with EXIT_NOTESTSCOLLECTED self._session.testscollected = len(ids) self.sched.add_node_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal and not self.sched.has_pending: self.trdist.ensure_show_status() self.terminal.write_line("") if self.config.option.verbose > 0: self.terminal.write_line( "scheduling tests via %s" % (self.sched.__class__.__name__) ) self.sched.schedule()
worker has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collections), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use.
def delete_token(): ''' Delete current token, file & CouchDB admin user ''' username = get_admin()[0] admins = get_couchdb_admins() # Delete current admin if exist if username in admins: print 'I delete {} CouchDB user'.format(username) delete_couchdb_admin(username) # Delete token file if exist if os.path.isfile(LOGIN_FILENAME): print 'I delete {} token file'.format(LOGIN_FILENAME) os.remove(LOGIN_FILENAME)
Delete current token, file & CouchDB admin user
def command( self, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, raw=False, job_id=None, pool=None, cfg_path=None): """ Returns a command that can be executed anywhere where airflow is installed. This command is part of the message sent to executors by the orchestrator. """ return " ".join(self.command_as_list( mark_success=mark_success, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state, local=local, pickle_id=pickle_id, raw=raw, job_id=job_id, pool=pool, cfg_path=cfg_path))
Returns a command that can be executed anywhere where airflow is installed. This command is part of the message sent to executors by the orchestrator.
def configure(self, options, config): """Configure the plugin and system, based on selected options. attr and eval_attr may each be lists. self.attribs will be a list of lists of tuples. In that list, each list is a group of attributes, all of which must match for the rule to match. """ self.attribs = [] # handle python eval-expression parameter if compat_24 and options.eval_attr: eval_attr = tolist(options.eval_attr) for attr in eval_attr: # "<python expression>" # -> eval(expr) in attribute context must be True def eval_in_context(expr, obj, cls): return eval(expr, None, ContextHelper(obj, cls)) self.attribs.append([(attr, eval_in_context)]) # attribute requirements are a comma separated list of # 'key=value' pairs if options.attr: std_attr = tolist(options.attr) for attr in std_attr: # all attributes within an attribute group must match attr_group = [] for attrib in attr.strip().split(","): # don't die on trailing comma if not attrib: continue items = attrib.split("=", 1) if len(items) > 1: # "name=value" # -> 'str(obj.name) == value' must be True key, value = items else: key = items[0] if key[0] == "!": # "!name" # 'bool(obj.name)' must be False key = key[1:] value = False else: # "name" # -> 'bool(obj.name)' must be True value = True attr_group.append((key, value)) self.attribs.append(attr_group) if self.attribs: self.enabled = True
Configure the plugin and system, based on selected options. attr and eval_attr may each be lists. self.attribs will be a list of lists of tuples. In that list, each list is a group of attributes, all of which must match for the rule to match.
def read_reaction(self, root): folder_name = os.path.basename(root) self.reaction, self.sites = ase_tools.get_reaction_from_folder( folder_name) # reaction dict self.stdout.write( '----------- REACTION: {} --> {} --------------\n' .format('+'.join(self.reaction['reactants']), '+'.join(self.reaction['products']))) self.reaction_atoms, self.prefactors, self.prefactors_TS, \ self.states = ase_tools.get_reaction_atoms(self.reaction) """Create empty dictionaries""" r_empty = ['' for n in range(len(self.reaction_atoms['reactants']))] p_empty = ['' for n in range(len(self.reaction_atoms['products']))] self.structures = {'reactants': r_empty[:], 'products': p_empty[:]} key_value_pairs = {} """ Match reaction gas species with their atomic structure """ for key, mollist in self.reaction_atoms.items(): for i, molecule in enumerate(mollist): if self.states[key][i] == 'gas': assert molecule in self.ase_ids_gas.keys(), \ """Molecule {molecule} is missing in folder {gas_folder}"""\ .format(molecule=clear_prefactor(self.reaction[key][i]), gas_folder=self.gas_folder) self.structures[key][i] = self.gas[molecule] species = clear_prefactor( self.reaction[key][i]) key_value_pairs.update( {'species': clear_state(species)}) self.ase_ids.update({species: self.ase_ids_gas[molecule]}) """ Add empty slab to structure dict""" for key, mollist in self.reaction_atoms.items(): if '' in mollist: n = mollist.index('') self.structures[key][n] = self.empty
Create empty dictionaries
def contains_vasp_input(dir_name): """ Checks if a directory contains valid VASP input. Args: dir_name: Directory name to check. Returns: True if directory contains all four VASP input files (INCAR, POSCAR, KPOINTS and POTCAR). """ for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]: if not os.path.exists(os.path.join(dir_name, f)) and \ not os.path.exists(os.path.join(dir_name, f + ".orig")): return False return True
Checks if a directory contains valid VASP input. Args: dir_name: Directory name to check. Returns: True if directory contains all four VASP input files (INCAR, POSCAR, KPOINTS and POTCAR).
def update_feature_flag(self, state, name, user_email=None, check_feature_exists=None, set_at_application_level_also=None): """UpdateFeatureFlag. [Preview API] Change the state of an individual feature flag for a name :param :class:`<FeatureFlagPatch> <azure.devops.v5_0.feature_availability.models.FeatureFlagPatch>` state: State that should be set :param str name: The name of the feature to change :param str user_email: :param bool check_feature_exists: Checks if the feature exists before setting the state :param bool set_at_application_level_also: :rtype: :class:`<FeatureFlag> <azure.devops.v5_0.feature_availability.models.FeatureFlag>` """ route_values = {} if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') query_parameters = {} if user_email is not None: query_parameters['userEmail'] = self._serialize.query('user_email', user_email, 'str') if check_feature_exists is not None: query_parameters['checkFeatureExists'] = self._serialize.query('check_feature_exists', check_feature_exists, 'bool') if set_at_application_level_also is not None: query_parameters['setAtApplicationLevelAlso'] = self._serialize.query('set_at_application_level_also', set_at_application_level_also, 'bool') content = self._serialize.body(state, 'FeatureFlagPatch') response = self._send(http_method='PATCH', location_id='3e2b80f8-9e6f-441e-8393-005610692d9c', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('FeatureFlag', response)
UpdateFeatureFlag. [Preview API] Change the state of an individual feature flag for a name :param :class:`<FeatureFlagPatch> <azure.devops.v5_0.feature_availability.models.FeatureFlagPatch>` state: State that should be set :param str name: The name of the feature to change :param str user_email: :param bool check_feature_exists: Checks if the feature exists before setting the state :param bool set_at_application_level_also: :rtype: :class:`<FeatureFlag> <azure.devops.v5_0.feature_availability.models.FeatureFlag>`
def vcsmode_vcs_mode(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcsmode = ET.SubElement(config, "vcsmode", xmlns="urn:brocade.com:mgmt:brocade-vcs") vcs_mode = ET.SubElement(vcsmode, "vcs-mode") vcs_mode.text = kwargs.pop('vcs_mode') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def trim_docstring(docstring): """Uniformly trims leading/trailing whitespace from docstrings. Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation """ if not docstring or not docstring.strip(): return "" # Convert tabs to spaces and split into lines lines = docstring.expandtabs().splitlines() indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip()) trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]] return "\n".join(trimmed).strip()
Uniformly trims leading/trailing whitespace from docstrings. Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
def wait_for_ajax_calls_to_complete(self, timeout=5): """ Waits until there are no active or pending ajax requests. Raises TimeoutException should silence not be had. :param timeout: time to wait for silence (default: 5 seconds) :return: None """ from selenium.webdriver.support.ui import WebDriverWait WebDriverWait(self.driver, timeout).until(lambda s: s.execute_script("return jQuery.active === 0"))
Waits until there are no active or pending ajax requests. Raises TimeoutException should silence not be had. :param timeout: time to wait for silence (default: 5 seconds) :return: None
def apply_gravity(repulsion, nodes, gravity, scaling_ratio): """ Iterate through the nodes or edges and apply the gravity directly to the node objects. """ for i in range(0, len(nodes)): repulsion.apply_gravitation(nodes[i], gravity / scaling_ratio)
Iterate through the nodes or edges and apply the gravity directly to the node objects.
def run_process(path: Union[Path, str], target: Callable, *, args: Tuple=(), kwargs: Dict[str, Any]=None, callback: Callable[[Set[Tuple[Change, str]]], None]=None, watcher_cls: Type[AllWatcher]=PythonWatcher, debounce=400, min_sleep=100): """ Run a function in a subprocess using multiprocessing.Process, restart it whenever files change in path. """ process = _start_process(target=target, args=args, kwargs=kwargs) reloads = 0 for changes in watch(path, watcher_cls=watcher_cls, debounce=debounce, min_sleep=min_sleep): callback and callback(changes) _stop_process(process) process = _start_process(target=target, args=args, kwargs=kwargs) reloads += 1 return reloads
Run a function in a subprocess using multiprocessing.Process, restart it whenever files change in path.
def average_loss(lc): """ Given a loss curve array with `poe` and `loss` fields, computes the average loss on a period of time. :note: As the loss curve is supposed to be piecewise linear as it is a result of a linear interpolation, we compute an exact integral by using the trapeizodal rule with the width given by the loss bin width. """ losses, poes = (lc['loss'], lc['poe']) if lc.dtype.names else lc return -pairwise_diff(losses) @ pairwise_mean(poes)
Given a loss curve array with `poe` and `loss` fields, computes the average loss on a period of time. :note: As the loss curve is supposed to be piecewise linear as it is a result of a linear interpolation, we compute an exact integral by using the trapeizodal rule with the width given by the loss bin width.
def past_active_subjunctive(self): """ Weak verbs I >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"]) >>> verb.past_active_subjunctive() ['kallaða', 'kallaðir', 'kallaði', 'kallaðim', 'kallaðið', 'kallaði'] II >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"]) >>> verb.past_active_subjunctive() ['mælta', 'mæltir', 'mælti', 'mæltim', 'mæltið', 'mælti'] III >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["telja", "taldi", "talinn"]) >>> verb.past_active_subjunctive() ['telda', 'teldir', 'teldi', 'teldim', 'teldið', 'teldi'] IV >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["vaka", "vakti", "vakat"]) >>> verb.past_active_subjunctive() ['vekta', 'vektir', 'vekti', 'vektim', 'vektið', 'vekti'] :return: """ subjunctive_root = self.sfg3et[:-1] if self.sng[-1] == "a" else self.sfg3et forms = [] if self.subclass in [1, 2]: forms.append(subjunctive_root + "a") subjunctive_root = subjunctive_root[:-1] if subjunctive_root[-1] == "j" else subjunctive_root forms.append(subjunctive_root + "ir") forms.append(subjunctive_root + "i") forms.append(subjunctive_root + "im") forms.append(subjunctive_root + "ið") forms.append(subjunctive_root + "i") elif self.subclass in [3, 4]: subjunctive_root = apply_i_umlaut(subjunctive_root) forms.append(subjunctive_root + "a") subjunctive_root = subjunctive_root[:-1] if subjunctive_root[-1] == "j" else subjunctive_root forms.append(subjunctive_root + "ir") forms.append(subjunctive_root + "i") forms.append(subjunctive_root + "im") forms.append(subjunctive_root + "ið") forms.append(subjunctive_root + "i") return forms
Weak verbs I >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"]) >>> verb.past_active_subjunctive() ['kallaða', 'kallaðir', 'kallaði', 'kallaðim', 'kallaðið', 'kallaði'] II >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"]) >>> verb.past_active_subjunctive() ['mælta', 'mæltir', 'mælti', 'mæltim', 'mæltið', 'mælti'] III >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["telja", "taldi", "talinn"]) >>> verb.past_active_subjunctive() ['telda', 'teldir', 'teldi', 'teldim', 'teldið', 'teldi'] IV >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["vaka", "vakti", "vakat"]) >>> verb.past_active_subjunctive() ['vekta', 'vektir', 'vekti', 'vektim', 'vektið', 'vekti'] :return:
def parse_args(): """ Parse commandline arguments. """ def exclusive_group(group, name, default, help): destname = name.replace('-', '_') subgroup = group.add_mutually_exclusive_group(required=False) subgroup.add_argument(f'--{name}', dest=f'{destname}', action='store_true', help=f'{help} (use \'--no-{name}\' to disable)') subgroup.add_argument(f'--no-{name}', dest=f'{destname}', action='store_false', help=argparse.SUPPRESS) subgroup.set_defaults(**{destname: default}) parser = argparse.ArgumentParser( description='GNMT training', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # dataset dataset = parser.add_argument_group('dataset setup') dataset.add_argument('--dataset-dir', default='data/wmt16_de_en', help='path to the directory with training/test data') dataset.add_argument('--max-size', default=None, type=int, help='use at most MAX_SIZE elements from training \ dataset (useful for benchmarking), by default \ uses entire dataset') # results results = parser.add_argument_group('results setup') results.add_argument('--results-dir', default='results', help='path to directory with results, it will be \ automatically created if it does not exist') results.add_argument('--save', default='gnmt', help='defines subdirectory within RESULTS_DIR for \ results from this training run') results.add_argument('--print-freq', default=10, type=int, help='print log every PRINT_FREQ batches') # model model = parser.add_argument_group('model setup') model.add_argument('--hidden-size', default=1024, type=int, help='model hidden size') model.add_argument('--num-layers', default=4, type=int, help='number of RNN layers in encoder and in decoder') model.add_argument('--dropout', default=0.2, type=float, help='dropout applied to input of RNN cells') exclusive_group(group=model, name='share-embedding', default=True, help='use shared embeddings for encoder and decoder') model.add_argument('--smoothing', default=0.1, type=float, help='label smoothing, if equal to zero model will use \ CrossEntropyLoss, if not zero model will be trained \ with label smoothing loss') # setup general = parser.add_argument_group('general setup') general.add_argument('--math', default='fp32', choices=['fp16', 'fp32'], help='arithmetic type') general.add_argument('--seed', default=None, type=int, help='master seed for random number generators, if \ "seed" is undefined then the master seed will be \ sampled from random.SystemRandom()') exclusive_group(group=general, name='eval', default=True, help='run validation and test after every epoch') exclusive_group(group=general, name='env', default=False, help='print info about execution env') exclusive_group(group=general, name='cuda', default=True, help='enables cuda') exclusive_group(group=general, name='cudnn', default=True, help='enables cudnn') # training training = parser.add_argument_group('training setup') training.add_argument('--train-batch-size', default=128, type=int, help='training batch size per worker') training.add_argument('--train-global-batch-size', default=None, type=int, help='global training batch size, this argument \ does not have to be defined, if it is defined it \ will be used to automatically \ compute train_iter_size \ using the equation: train_iter_size = \ train_global_batch_size // (train_batch_size * \ world_size)') training.add_argument('--train-iter-size', metavar='N', default=1, type=int, help='training iter size, training loop will \ accumulate gradients over N iterations and execute \ optimizer every N steps') training.add_argument('--epochs', default=8, type=int, help='max number of training epochs') training.add_argument('--grad-clip', default=5.0, type=float, help='enables gradient clipping and sets maximum \ norm of gradients') training.add_argument('--max-length-train', default=50, type=int, help='maximum sequence length for training \ (including special BOS and EOS tokens)') training.add_argument('--min-length-train', default=0, type=int, help='minimum sequence length for training \ (including special BOS and EOS tokens)') training.add_argument('--train-loader-workers', default=2, type=int, help='number of workers for training data loading') training.add_argument('--batching', default='bucketing', type=str, choices=['random', 'sharding', 'bucketing'], help='select batching algorithm') training.add_argument('--shard-size', default=80, type=int, help='shard size for "sharding" batching algorithm, \ in multiples of global batch size') training.add_argument('--num-buckets', default=5, type=int, help='number of buckets for "bucketing" batching \ algorithm') # optimizer optimizer = parser.add_argument_group('optimizer setup') optimizer.add_argument('--optimizer', type=str, default='Adam', help='training optimizer') optimizer.add_argument('--lr', type=float, default=1.00e-3, help='learning rate') optimizer.add_argument('--optimizer-extra', type=str, default="{}", help='extra options for the optimizer') # scheduler scheduler = parser.add_argument_group('learning rate scheduler setup') scheduler.add_argument('--warmup-steps', type=str, default='200', help='number of learning rate warmup iterations') scheduler.add_argument('--remain-steps', type=str, default='0.666', help='starting iteration for learning rate decay') scheduler.add_argument('--decay-interval', type=str, default='None', help='interval between learning rate decay steps') scheduler.add_argument('--decay-steps', type=int, default=4, help='max number of learning rate decay steps') scheduler.add_argument('--decay-factor', type=float, default=0.5, help='learning rate decay factor') # validation val = parser.add_argument_group('validation setup') val.add_argument('--val-batch-size', default=64, type=int, help='batch size for validation') val.add_argument('--max-length-val', default=125, type=int, help='maximum sequence length for validation \ (including special BOS and EOS tokens)') val.add_argument('--min-length-val', default=0, type=int, help='minimum sequence length for validation \ (including special BOS and EOS tokens)') val.add_argument('--val-loader-workers', default=0, type=int, help='number of workers for validation data loading') # test test = parser.add_argument_group('test setup') test.add_argument('--test-batch-size', default=128, type=int, help='batch size for test') test.add_argument('--max-length-test', default=150, type=int, help='maximum sequence length for test \ (including special BOS and EOS tokens)') test.add_argument('--min-length-test', default=0, type=int, help='minimum sequence length for test \ (including special BOS and EOS tokens)') test.add_argument('--beam-size', default=5, type=int, help='beam size') test.add_argument('--len-norm-factor', default=0.6, type=float, help='length normalization factor') test.add_argument('--cov-penalty-factor', default=0.1, type=float, help='coverage penalty factor') test.add_argument('--len-norm-const', default=5.0, type=float, help='length normalization constant') test.add_argument('--intra-epoch-eval', metavar='N', default=0, type=int, help='evaluate within training epoch, this option will \ enable extra N equally spaced evaluations executed \ during each training epoch') test.add_argument('--test-loader-workers', default=0, type=int, help='number of workers for test data loading') # checkpointing chkpt = parser.add_argument_group('checkpointing setup') chkpt.add_argument('--start-epoch', default=0, type=int, help='manually set initial epoch counter') chkpt.add_argument('--resume', default=None, type=str, metavar='PATH', help='resumes training from checkpoint from PATH') chkpt.add_argument('--save-all', action='store_true', default=False, help='saves checkpoint after every epoch') chkpt.add_argument('--save-freq', default=5000, type=int, help='save checkpoint every SAVE_FREQ batches') chkpt.add_argument('--keep-checkpoints', default=0, type=int, help='keep only last KEEP_CHECKPOINTS checkpoints, \ affects only checkpoints controlled by --save-freq \ option') # benchmarking benchmark = parser.add_argument_group('benchmark setup') benchmark.add_argument('--target-bleu', default=24.0, type=float, help='target accuracy, training will be stopped \ when the target is achieved') # distributed distributed = parser.add_argument_group('distributed setup') distributed.add_argument('--rank', default=0, type=int, help='global rank of the process, do not set!') distributed.add_argument('--local_rank', default=0, type=int, help='local rank of the process, do not set!') args = parser.parse_args() args.warmup_steps = literal_eval(args.warmup_steps) args.remain_steps = literal_eval(args.remain_steps) args.decay_interval = literal_eval(args.decay_interval) return args
Parse commandline arguments.
def make_regression(func, n_samples=100, n_features=1, bias=0.0, noise=0.0, random_state=None): """ Make dataset for a regression problem. Examples -------- >>> f = lambda x: 0.5*x + np.sin(2*x) >>> X, y = make_regression(f, bias=.5, noise=1., random_state=1) >>> X.shape (100, 1) >>> y.shape (100,) >>> X[:5].round(2) array([[ 1.62], [-0.61], [-0.53], [-1.07], [ 0.87]]) >>> y[:5].round(2) array([ 0.76, 0.48, -0.23, -0.28, 0.83]) """ generator = check_random_state(random_state) X = generator.randn(n_samples, n_features) # unpack the columns of X y = func(*X.T) + bias if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) return X, y
Make dataset for a regression problem. Examples -------- >>> f = lambda x: 0.5*x + np.sin(2*x) >>> X, y = make_regression(f, bias=.5, noise=1., random_state=1) >>> X.shape (100, 1) >>> y.shape (100,) >>> X[:5].round(2) array([[ 1.62], [-0.61], [-0.53], [-1.07], [ 0.87]]) >>> y[:5].round(2) array([ 0.76, 0.48, -0.23, -0.28, 0.83])
def hessian(self, x, y, kappa_ext, ra_0=0, dec_0=0): """ Hessian matrix :param x: x-coordinate :param y: y-coordinate :param kappa_ext: external convergence :return: second order derivatives f_xx, f_yy, f_xy """ gamma1 = 0 gamma2 = 0 kappa = kappa_ext f_xx = kappa + gamma1 f_yy = kappa - gamma1 f_xy = gamma2 return f_xx, f_yy, f_xy
Hessian matrix :param x: x-coordinate :param y: y-coordinate :param kappa_ext: external convergence :return: second order derivatives f_xx, f_yy, f_xy
def find_file(name, directory): """Searches up from a directory looking for a file""" path_bits = directory.split(os.sep) for i in range(0, len(path_bits) - 1): check_path = path_bits[0:len(path_bits) - i] check_file = "%s%s%s" % (os.sep.join(check_path), os.sep, name) if os.path.exists(check_file): return abspath(check_file) return None
Searches up from a directory looking for a file
def removeSheet(self, vs): 'Remove all traces of sheets named vs.name from the cmdlog.' self.rows = [r for r in self.rows if r.sheet != vs.name] status('removed "%s" from cmdlog' % vs.name)
Remove all traces of sheets named vs.name from the cmdlog.
def calculate_cycles(self): """ Calculate performance model cycles from cache stats. calculate_cache_access() needs to have been execute before. """ element_size = self.kernel.datatypes_size[self.kernel.datatype] elements_per_cacheline = float(self.machine['cacheline size']) // element_size iterations_per_cacheline = (sympy.Integer(self.machine['cacheline size']) / sympy.Integer(self.kernel.bytes_per_iteration)) self.results['iterations per cacheline'] = iterations_per_cacheline cacheline_size = float(self.machine['cacheline size']) loads, stores = (self.predictor.get_loads(), self.predictor.get_stores()) for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy']))[1:]: throughput, duplexness = cache_info['non-overlap upstream throughput'] if type(throughput) is str and throughput == 'full socket memory bandwidth': # Memory transfer # we use bandwidth to calculate cycles and then add panalty cycles (if given) # choose bw according to cache level and problem # first, compile stream counts at current cache level # write-allocate is allready resolved in cache predictor read_streams = loads[cache_level] write_streams = stores[cache_level] # second, try to find best fitting kernel (closest to stream seen stream counts): threads_per_core = 1 bw, measurement_kernel = self.machine.get_bandwidth( cache_level, read_streams, write_streams, threads_per_core) # calculate cycles if duplexness == 'half-duplex': cycles = float(loads[cache_level] + stores[cache_level]) * \ float(elements_per_cacheline) * float(element_size) * \ float(self.machine['clock']) / float(bw) else: # full-duplex raise NotImplementedError( "full-duplex mode is not (yet) supported for memory transfers.") # add penalty cycles for each read stream if 'penalty cycles per read stream' in cache_info: cycles += stores[cache_level] * \ cache_info['penalty cycles per read stream'] self.results.update({ 'memory bandwidth kernel': measurement_kernel, 'memory bandwidth': bw}) else: # since throughput is given in B/cy, and we need CL/cy: throughput = float(throughput) / cacheline_size # only cache cycles count if duplexness == 'half-duplex': cycles = (loads[cache_level] + stores[cache_level]) / float(throughput) elif duplexness == 'full-duplex': cycles = max(loads[cache_level] / float(throughput), stores[cache_level] / float(throughput)) else: raise ValueError("Duplexness of cache throughput may only be 'half-duplex'" "or 'full-duplex', found {} in {}.".format( duplexness, cache_info['name'])) self.results['cycles'].append((cache_info['level'], cycles)) self.results[cache_info['level']] = cycles return self.results
Calculate performance model cycles from cache stats. calculate_cache_access() needs to have been execute before.
def extract_diff_sla_from_config_file(obj, options_file): """ Helper function to parse diff config file, which contains SLA rules for diff comparisons """ rule_strings = {} config_obj = ConfigParser.ConfigParser() config_obj.optionxform = str config_obj.read(options_file) for section in config_obj.sections(): rule_strings, kwargs = get_rule_strings(config_obj, section) for (key, val) in rule_strings.iteritems(): set_sla(obj, section, key, val)
Helper function to parse diff config file, which contains SLA rules for diff comparisons
def rename(name, new_name): ''' Change the username for a named user CLI Example: .. code-block:: bash salt '*' user.rename name new_name ''' current_info = info(name) if not current_info: raise CommandExecutionError('User \'{0}\' does not exist'.format(name)) new_info = info(new_name) if new_info: raise CommandExecutionError( 'User \'{0}\' already exists'.format(new_name) ) cmd = ['usermod', '-l', new_name, name] __salt__['cmd.run'](cmd, python_shell=False) return info(new_name).get('name') == new_name
Change the username for a named user CLI Example: .. code-block:: bash salt '*' user.rename name new_name
def create_args(args, root): """ Encapsulates a set of custom command line arguments in key=value or key.namespace=value form into a chain of Namespace objects, where each next level is an attribute of the Namespace object on the current level Parameters ---------- args : list A list of strings representing arguments in key=value form root : Namespace The top-level element of the argument tree """ extension_args = {} for arg in args: parse_extension_arg(arg, extension_args) for name in sorted(extension_args, key=len): path = name.split('.') update_namespace(root, path, extension_args[name])
Encapsulates a set of custom command line arguments in key=value or key.namespace=value form into a chain of Namespace objects, where each next level is an attribute of the Namespace object on the current level Parameters ---------- args : list A list of strings representing arguments in key=value form root : Namespace The top-level element of the argument tree
def add_toolbars_to_menu(self, menu_title, actions): """Add toolbars to a menu.""" # Six is the position of the view menu in menus list # that you can find in plugins/editor.py setup_other_windows. view_menu = self.menus[6] if actions == self.toolbars and view_menu: toolbars = [] for toolbar in self.toolbars: action = toolbar.toggleViewAction() toolbars.append(action) add_actions(view_menu, toolbars)
Add toolbars to a menu.
def set_burnstages_upgrade_massive(self): ''' Outputs burnign stages as done in burningstages_upgrade (nugridse) ''' burn_info=[] burn_mini=[] for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) burn_info.append(sefiles.burnstage_upgrade()) mini=sefiles.get('mini') #zini=sefiles.get('zini') burn_mini.append(mini) for i in range(len(self.runs_H5_surf)): print 'Following returned for each initial mass' print '[burn_cycles,burn_ages, burn_abun, burn_type,burn_lifetime]' print '----Mini: ',burn_mini[i],'------' print burn_info[i]
Outputs burnign stages as done in burningstages_upgrade (nugridse)
def as_matrix(self, columns=None): """ Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. Parameters ---------- columns : list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.values Notes ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'. """ warnings.warn("Method .as_matrix will be removed in a future version. " "Use .values instead.", FutureWarning, stacklevel=2) self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns)
Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. Parameters ---------- columns : list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.values Notes ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'.
def confd_state_webui_listen_tcp_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") webui = ET.SubElement(confd_state, "webui") listen = ET.SubElement(webui, "listen") tcp = ET.SubElement(listen, "tcp") ip = ET.SubElement(tcp, "ip") ip.text = kwargs.pop('ip') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _build_message_body(self, body_size): """Build the Message body from the inbound queue. :rtype: str """ body = bytes() while len(body) < body_size: if not self._inbound: self.check_for_errors() sleep(IDLE_WAIT) continue body_piece = self._inbound.pop(0) if not body_piece.value: break body += body_piece.value return body
Build the Message body from the inbound queue. :rtype: str
def format(self, clip=0, grand=None): '''Return format dict. ''' if self.number > 1: # avg., plural a, p = int(self.total / self.number), 's' else: a, p = self.total, '' o = self.objref if self.weak: # weakref'd o = o() t = _SI2(self.total) if grand: t += ' (%s)' % _p100(self.total, grand, prec=0) return _kwds(avg=_SI2(a), high=_SI2(self.high), lengstr=_lengstr(o), obj=_repr(o, clip=clip), plural=p, total=t)
Return format dict.
def get_connection(self, command_name, *keys, **options): """ Get a connection, blocking for ``self.timeout`` until a connection is available from the pool. If the connection returned is ``None`` then creates a new connection. Because we use a last-in first-out queue, the existing connections (having been returned to the pool after the initial ``None`` values were added) will be returned before ``None`` values. This means we only create new connections when we need to, i.e.: the actual number of connections will only increase in response to demand. """ # Make sure we haven't changed process. self._checkpid() # Try and get a connection from the pool. If one isn't available within # self.timeout then raise a ``ConnectionError``. connection = None try: connection = self.pool.get(block=True, timeout=self.timeout) except Empty: # Note that this is not caught by the redis client and will be # raised unless handled by application code. If you want never to raise ConnectionError("No connection available.") # If the ``connection`` is actually ``None`` then that's a cue to make # a new connection to add to the pool. if connection is None: connection = self.make_connection() try: # ensure this connection is connected to Redis connection.connect() # connections that the pool provides should be ready to send # a command. if not, the connection was either returned to the # pool before all data has been read or the socket has been # closed. either way, reconnect and verify everything is good. if not connection.is_ready_for_command(): connection.disconnect() connection.connect() if not connection.is_ready_for_command(): raise ConnectionError('Connection not ready') except: # noqa: E722 # release the connection back to the pool so that we don't leak it self.release(connection) raise return connection
Get a connection, blocking for ``self.timeout`` until a connection is available from the pool. If the connection returned is ``None`` then creates a new connection. Because we use a last-in first-out queue, the existing connections (having been returned to the pool after the initial ``None`` values were added) will be returned before ``None`` values. This means we only create new connections when we need to, i.e.: the actual number of connections will only increase in response to demand.
def add_wikipage(self, slug, content, **attrs): """ Add a Wiki page to the project and returns a :class:`WikiPage` object. :param name: name of the :class:`WikiPage` :param attrs: optional attributes for :class:`WikiPage` """ return WikiPages(self.requester).create( self.id, slug, content, **attrs )
Add a Wiki page to the project and returns a :class:`WikiPage` object. :param name: name of the :class:`WikiPage` :param attrs: optional attributes for :class:`WikiPage`
def calculate_new_length(gene_split, gene_results, hit): ''' Function for calcualting new length if the gene is split on several contigs ''' # Looping over splitted hits and calculate new length first = 1 for split in gene_split[hit['sbjct_header']]: new_start = int(gene_results[split]['sbjct_start']) new_end = int(gene_results[split]['sbjct_end']) # Get the frist HSP if first == 1: new_length = int(gene_results[split]['HSP_length']) old_start = new_start old_end = new_end first = 0 continue if new_start < old_start: new_length = new_length + (old_start - new_start) old_start = new_start if new_end > old_end: new_length = new_length + (new_end - old_end) old_end = new_end return(new_length)
Function for calcualting new length if the gene is split on several contigs
def run(self): """Keep running this thread until it's stopped""" while not self._finished.isSet(): self._func(self._reference) self._finished.wait(self._func._interval / 1000.0)
Keep running this thread until it's stopped
def build_parser(self, options=None, permissive=False, **override_kwargs): """Construct an argparser from supplied options. :keyword override_kwargs: keyword arguments to override when calling parser constructor. :keyword permissive: when true, build a parser that does not validate required arguments. """ kwargs = copy.copy(self._parser_kwargs) kwargs.setdefault('formatter_class', argparse.ArgumentDefaultsHelpFormatter) kwargs.update(override_kwargs) if 'fromfile_prefix_chars' not in kwargs: kwargs['fromfile_prefix_chars'] = '@' parser = self._parser_class(**kwargs) if options is None: options = [] for _opt in self._options: _kw = _opt.kwargs.copy() if _kw.get('default') is None: _kw['default'] = argparse.SUPPRESS options.append(Option(*_opt.args, **_kw)) for option in options: option.add_argument(parser, permissive=permissive) return parser
Construct an argparser from supplied options. :keyword override_kwargs: keyword arguments to override when calling parser constructor. :keyword permissive: when true, build a parser that does not validate required arguments.
def endElement(self, name, value, connection): """Overwritten to also add the NextRecordName and NextRecordType to the base object""" if name == 'NextRecordName': self.next_record_name = value elif name == 'NextRecordType': self.next_record_type = value else: return ResultSet.endElement(self, name, value, connection)
Overwritten to also add the NextRecordName and NextRecordType to the base object
def get_userid_from_botid(self, botid): '''Perform a lookup of bots.info to resolve a botid to a userid Args: botid (string): Slack botid to lookup. Returns: string: userid value ''' botinfo = self.slack_client.api_call('bots.info', bot=botid) if botinfo['ok'] is True: return botinfo['bot'].get('user_id') else: return botid
Perform a lookup of bots.info to resolve a botid to a userid Args: botid (string): Slack botid to lookup. Returns: string: userid value
def getArgNames(function): ''' Returns a list of strings naming all of the arguments for the passed function. Parameters ---------- function : function A function whose argument names are wanted. Returns ------- argNames : [string] The names of the arguments of function. ''' argCount = function.__code__.co_argcount argNames = function.__code__.co_varnames[:argCount] return argNames
Returns a list of strings naming all of the arguments for the passed function. Parameters ---------- function : function A function whose argument names are wanted. Returns ------- argNames : [string] The names of the arguments of function.
def _get_result_paths(self, data): """ Set the result paths """ result = {} # OTU map (mandatory output) result['OtuMap'] = ResultPath(Path=self.Parameters['-O'].Value, IsWritten=True) # SumaClust will not produce any output file if the # input file was empty, so we create an empty # output file if not isfile(result['OtuMap'].Path): otumap_f = open(result['OtuMap'].Path, 'w') otumap_f.close() return result
Set the result paths
def new_worker_redirected_log_file(self, worker_id): """Create new logging files for workers to redirect its output.""" worker_stdout_file, worker_stderr_file = (self.new_log_files( "worker-" + ray.utils.binary_to_hex(worker_id), True)) return worker_stdout_file, worker_stderr_file
Create new logging files for workers to redirect its output.
def upload_object(self, object_name, file_obj): """ Upload an object to this bucket. :param str object_name: The target name of the object. :param file file_obj: The file (or file-like object) to upload. :param str content_type: The content type associated to this object. This is mainly useful when accessing an object directly via a web browser. If unspecified, a content type *may* be automatically derived from the specified ``file_obj``. """ return self._client.upload_object( self._instance, self.name, object_name, file_obj)
Upload an object to this bucket. :param str object_name: The target name of the object. :param file file_obj: The file (or file-like object) to upload. :param str content_type: The content type associated to this object. This is mainly useful when accessing an object directly via a web browser. If unspecified, a content type *may* be automatically derived from the specified ``file_obj``.
def _compute_cell_extents_grid(bounding_rect=(0.03, 0.03, 0.97, 0.97), num_rows=2, num_cols=6, axis_pad=0.01): """ Produces array of num_rows*num_cols elements each containing the rectangular extents of the corresponding cell the grid, whose position is within bounding_rect. """ left, bottom, width, height = bounding_rect height_padding = axis_pad * (num_rows + 1) width_padding = axis_pad * (num_cols + 1) cell_height = float((height - height_padding) / num_rows) cell_width = float((width - width_padding) / num_cols) cell_height_padded = cell_height + axis_pad cell_width_padded = cell_width + axis_pad extents = list() for row in range(num_rows - 1, -1, -1): for col in range(num_cols): extents.append((left + col * cell_width_padded, bottom + row * cell_height_padded, cell_width, cell_height)) return extents
Produces array of num_rows*num_cols elements each containing the rectangular extents of the corresponding cell the grid, whose position is within bounding_rect.
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the SignatureVerify request payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined. """ local_stream = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write( local_stream, kmip_version=kmip_version ) if self._cryptographic_parameters: self._cryptographic_parameters.write( local_stream, kmip_version=kmip_version ) if self._data: self._data.write(local_stream, kmip_version=kmip_version) if self._digested_data: self._digested_data.write(local_stream, kmip_version=kmip_version) if self._signature_data: self._signature_data.write( local_stream, kmip_version=kmip_version ) if self._correlation_value: self._correlation_value.write( local_stream, kmip_version=kmip_version ) if self._init_indicator: self._init_indicator.write( local_stream, kmip_version=kmip_version ) if self._final_indicator: self._final_indicator.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(SignatureVerifyRequestPayload, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the SignatureVerify request payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
def padded_grid_stack_from_mask_sub_grid_size_and_psf_shape(cls, mask, sub_grid_size, psf_shape): """Setup a grid-stack of masked grid_stack from a mask, sub-grid size and psf-shape. Parameters ----------- mask : Mask The mask whose masked pixels the grid-stack are setup using. sub_grid_size : int The size of a sub-pixels sub-grid (sub_grid_size x sub_grid_size). psf_shape : (int, int) The shape of the PSF used in the analysis, which defines the mask's blurring-region. """ regular_padded_grid = PaddedRegularGrid.padded_grid_from_shape_psf_shape_and_pixel_scale( shape=mask.shape, psf_shape=psf_shape, pixel_scale=mask.pixel_scale) sub_padded_grid = PaddedSubGrid.padded_grid_from_mask_sub_grid_size_and_psf_shape(mask=mask, sub_grid_size=sub_grid_size, psf_shape=psf_shape) # TODO : The blurring grid is not used when the grid mapper is called, the 0.0 0.0 stops errors inr ayT_racing # TODO : implement a more explicit solution return GridStack(regular=regular_padded_grid, sub=sub_padded_grid, blurring=np.array([[0.0, 0.0]]))
Setup a grid-stack of masked grid_stack from a mask, sub-grid size and psf-shape. Parameters ----------- mask : Mask The mask whose masked pixels the grid-stack are setup using. sub_grid_size : int The size of a sub-pixels sub-grid (sub_grid_size x sub_grid_size). psf_shape : (int, int) The shape of the PSF used in the analysis, which defines the mask's blurring-region.
def run_update_cat(_): ''' Update the catagery. ''' recs = MPost2Catalog.query_all().objects() for rec in recs: if rec.tag_kind != 'z': print('-' * 40) print(rec.uid) print(rec.tag_id) print(rec.par_id) MPost2Catalog.update_field(rec.uid, par_id=rec.tag_id[:2] + "00")
Update the catagery.
def match_regex_list(patterns, string): """Perform a regex match of a string against a list of patterns. Returns true if the string matches at least one pattern in the list.""" for p in patterns: if re.findall(p, string): return True return False
Perform a regex match of a string against a list of patterns. Returns true if the string matches at least one pattern in the list.
def _is_valid_relpath( relpath, maxdepth=None): ''' Performs basic sanity checks on a relative path. Requires POSIX-compatible paths (i.e. the kind obtained through cp.list_master or other such calls). Ensures that the path does not contain directory transversal, and that it does not exceed a stated maximum depth (if specified). ''' # Check relpath surrounded by slashes, so that `..` can be caught as # a path component at the start, end, and in the middle of the path. sep, pardir = posixpath.sep, posixpath.pardir if sep + pardir + sep in sep + relpath + sep: return False # Check that the relative path's depth does not exceed maxdepth if maxdepth is not None: path_depth = relpath.strip(sep).count(sep) if path_depth > maxdepth: return False return True
Performs basic sanity checks on a relative path. Requires POSIX-compatible paths (i.e. the kind obtained through cp.list_master or other such calls). Ensures that the path does not contain directory transversal, and that it does not exceed a stated maximum depth (if specified).
def system_drop_keyspace(self, keyspace): """ drops a keyspace and any column families that are part of it. returns the new schema id. Parameters: - keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_drop_keyspace(keyspace) return d
drops a keyspace and any column families that are part of it. returns the new schema id. Parameters: - keyspace
def delete(self, key, cas=0): """ Delete a key/value from server. If key does not exist, it returns True. :param key: Key's name to be deleted :param cas: CAS of the key :return: True in case o success and False in case of failure. """ returns = [] for server in self.servers: returns.append(server.delete(key, cas)) return any(returns)
Delete a key/value from server. If key does not exist, it returns True. :param key: Key's name to be deleted :param cas: CAS of the key :return: True in case o success and False in case of failure.
def get_entity_by_query(self, uuid=None, path=None, metadata=None): '''Retrieve entity by query param which can be either uuid/path/metadata. Args: uuid (str): The UUID of the requested entity. path (str): The path of the requested entity. metadata (dict): A dictionary of one metadata {key: value} of the requested entitity. Returns: The details of the entity, if found:: { u'content_type': u'plain/text', u'created_by': u'303447', u'created_on': u'2017-03-13T10:52:23.275087Z', u'description': u'', u'entity_type': u'file', u'modified_by': u'303447', u'modified_on': u'2017-03-13T10:52:23.275126Z', u'name': u'myfile', u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682', u'uuid': u'e2c25c1b-f6a9-4cf6-b8d2-271e628a9a56' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' if not (uuid or path or metadata): raise StorageArgumentException('No parameter given for the query.') if uuid and not is_valid_uuid(uuid): raise StorageArgumentException( 'Invalid UUID for uuid: {0}'.format(uuid)) params = locals().copy() if metadata: if not isinstance(metadata, dict): raise StorageArgumentException('The metadata needs to be provided' ' as a dictionary.') key, value = next(iter(metadata.items())) params[key] = value del params['metadata'] params = self._prep_params(params) return self._authenticated_request \ .to_endpoint('entity/') \ .with_params(params) \ .return_body() \ .get()
Retrieve entity by query param which can be either uuid/path/metadata. Args: uuid (str): The UUID of the requested entity. path (str): The path of the requested entity. metadata (dict): A dictionary of one metadata {key: value} of the requested entitity. Returns: The details of the entity, if found:: { u'content_type': u'plain/text', u'created_by': u'303447', u'created_on': u'2017-03-13T10:52:23.275087Z', u'description': u'', u'entity_type': u'file', u'modified_by': u'303447', u'modified_on': u'2017-03-13T10:52:23.275126Z', u'name': u'myfile', u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682', u'uuid': u'e2c25c1b-f6a9-4cf6-b8d2-271e628a9a56' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def get_value(self, name): """Get the value of a variable""" value = self.shellwidget.get_value(name) # Reset temporal variable where value is saved to # save memory self.shellwidget._kernel_value = None return value
Get the value of a variable
def delete(ctx, schema, uuid, object_filter, yes): """Delete stored objects (CAUTION!)""" database = ctx.obj['db'] if schema is None: log('No schema given. Read the help', lvl=warn) return model = database.objectmodels[schema] if uuid: count = model.count({'uuid': uuid}) obj = model.find({'uuid': uuid}) elif object_filter: count = model.count(literal_eval(object_filter)) obj = model.find(literal_eval(object_filter)) else: count = model.count() obj = model.find() if count == 0: log('No objects to delete found') return if not yes and not _ask("Are you sure you want to delete %i objects" % count, default=False, data_type="bool", show_hint=True): return for item in obj: item.delete() log('Done')
Delete stored objects (CAUTION!)
def write_graph(self, outfile, manifest): """Write the graph to a gpickle file. Before doing so, serialize and include all nodes in their corresponding graph entries. """ out_graph = _updated_graph(self.graph, manifest) nx.write_gpickle(out_graph, outfile)
Write the graph to a gpickle file. Before doing so, serialize and include all nodes in their corresponding graph entries.
def write_headers(self, fp, headers, mute=None): """ Convenience function to output headers in a formatted fashion to a file-like fp, optionally muting any headers in the mute list. """ if headers: if not mute: mute = [] fmt = '%%-%ds %%s\n' % (max(len(k) for k in headers) + 1) for key in sorted(headers): if key in mute: continue fp.write(fmt % (key.title() + ':', headers[key])) fp.flush()
Convenience function to output headers in a formatted fashion to a file-like fp, optionally muting any headers in the mute list.