code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _asciify_list(data): ret = [] for item in data: if isinstance(item, unicode): item = _remove_accents(item) item = item.encode('utf-8') elif isinstance(item, list): item = _asciify_list(item) elif isinstance(item, dict): item = _asciify_dict(item) ret.append(item) return ret
Ascii-fies list values
def safeMkdir(p, permissions = permissions755): try: os.mkdir(p) except OSError: pass os.chmod(p, permissions)
Wrapper around os.mkdir which does not raise an error if the directory exists.
def from_cli_single_ifo(opt, ifo, **kwargs): single_det_opt = copy_opts_for_single_ifo(opt, ifo) return from_cli(single_det_opt, **kwargs)
Get the strain for a single ifo when using the multi-detector CLI
def get_stddevs(self, C, stddev_shape, stddev_types, sites): stddevs = [] tau = C["tau_event"] sigma_s = np.zeros(sites.vs30measured.shape, dtype=float) sigma_s[sites.vs30measured] += C["sigma_s_obs"] sigma_s[np.logical_not(sites.vs30measured)] += C["sigma_s_inf"] phi = np.sqrt(C["phi0"] ** 2.0 + sigma_s ** 2.) for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt(tau ** 2. + phi ** 2.) + np.zeros(stddev_shape)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(phi + np.zeros(stddev_shape)) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(tau + np.zeros(stddev_shape)) return stddevs
Returns the standard deviations, with different site standard deviation for inferred vs. observed vs30 sites.
def order_error(subtag, got, expected): options = SUBTAG_TYPES[expected:] if len(options) == 1: expect_str = options[0] elif len(options) == 2: expect_str = '%s or %s' % (options[0], options[1]) else: expect_str = '%s, or %s' % (', '.join(options[:-1]), options[-1]) got_str = SUBTAG_TYPES[got] raise LanguageTagError("This %s subtag, %r, is out of place. " "Expected %s." % (got_str, subtag, expect_str))
Output an error indicating that tags were out of order.
def reverse_post_order_sort_nodes(graph, nodes=None): post_order = networkx.dfs_postorder_nodes(graph) if nodes is None: return reversed(list(post_order)) addrs_to_index = {} for i, n in enumerate(post_order): addrs_to_index[n.addr] = i return sorted(nodes, key=lambda n: addrs_to_index[n.addr], reverse=True)
Sort a given set of nodes in reverse post ordering. :param networkx.DiGraph graph: A local transition graph of a function. :param iterable nodes: A collection of nodes to sort. :return: A list of sorted nodes. :rtype: list
def metadata(self): from sqlalchemy import MetaData metadata = MetaData(bind=self.engine, schema=self._schema) metadata.reflect(self.engine) return metadata
Return an SqlAlchemy MetaData object, bound to the engine.
def _recv(self, line): if line == '': return if line[0] != Lutron.OP_RESPONSE: _LOGGER.debug("ignoring %s" % line) return parts = line[1:].split(',') cmd_type = parts[0] integration_id = int(parts[1]) args = parts[2:] if cmd_type not in self._ids: _LOGGER.info("Unknown cmd %s (%s)" % (cmd_type, line)) return ids = self._ids[cmd_type] if integration_id not in ids: _LOGGER.warning("Unknown id %d (%s)" % (integration_id, line)) return obj = ids[integration_id] handled = obj.handle_update(args)
Invoked by the connection manager to process incoming data.
def _get_enum_bins(configfile): config = yaml.safe_load(open(configfile)) emin = config['selection']['emin'] emax = config['selection']['emax'] log_emin = np.log10(emin) log_emax = np.log10(emax) ndec = log_emax - log_emin binsperdec = config['binning']['binsperdec'] nebins = int(np.round(binsperdec * ndec)) return nebins
Get the number of energy bin in the SED Parameters ---------- configfile : str Fermipy configuration file. Returns ------- nbins : int The number of energy bins
def holtWintersForecast(requestContext, seriesList): previewSeconds = 7 * 86400 newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) predictions = analysis['predictions'] windowPoints = previewSeconds // predictions.step result = TimeSeries("holtWintersForecast(%s)" % series.name, predictions.start + previewSeconds, predictions.end, predictions.step, predictions[windowPoints:]) result.pathExpression = result.name results.append(result) return results
Performs a Holt-Winters forecast using the series as input data. Data from one week previous to the series is used to bootstrap the initial forecast.
def _get_upload_cmd(self, mirror=False): if mirror: dest_uri = self.s3_mirror_uri else: dest_uri = self.s3_version_uri cmd = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}'.format(self.artifact_path, dest_uri, self.env) return cmd
Generate the S3 CLI upload command Args: mirror (bool): If true, uses a flat directory structure instead of nesting under a version. Returns: str: The full CLI command to run.
def delete_all_renditions(self): if self.renditions: for r in self.renditions.values(): default_storage.delete(r) self.renditions = {}
delete all renditions and rendition dict
def rmdir(self, tid): pt = self.PathType.get(tid) if pt is self.PathType.main: raise FuseOSError(errno.EINVAL) elif pt is not self.PathType.subdir: raise FuseOSError(errno.ENOTDIR) try: self.searches[tid[0]].clean() del self.searches[tid[0]] except KeyError: raise FuseOSError(errno.ENOENT) return 0
Directory removal. ``YTActions`` object under `tid` is told to clean all data, and then it is deleted. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
def master_primary_name(self) -> Optional[str]: master_primary_name = self.master_replica.primaryName if master_primary_name: return self.master_replica.getNodeName(master_primary_name) return None
Return the name of the primary node of the master instance
def build_deps(self): build_requires = self.metadata['setup_requires'] if self.has_test_suite: build_requires += self.metadata['tests_require'] + self.metadata[ 'install_requires'] if 'setuptools' not in build_requires: build_requires.append('setuptools') return sorted(self.name_convert_deps_list(deps_from_pyp_format( build_requires, runtime=False)))
Same as runtime_deps, but build dependencies. Test and install requires are included if package contains test suite to prevent %check phase crashes because of missing dependencies Returns: list of build dependencies of the package
def str_slice(arr, start=None, stop=None, step=None): obj = slice(start, stop, step) f = lambda x: x[obj] return _na_map(f, arr)
Slice substrings from each element in the Series or Index. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "fox", "chameleon"]) >>> s 0 koala 1 fox 2 chameleon dtype: object >>> s.str.slice(start=1) 0 oala 1 ox 2 hameleon dtype: object >>> s.str.slice(stop=2) 0 ko 1 fo 2 ch dtype: object >>> s.str.slice(step=2) 0 kaa 1 fx 2 caeen dtype: object >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 f 2 cm dtype: object Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 f 2 cm dtype: object
def register_new_edge(edge_id, first_char_index, last_char_index, source_node_id, dest_node_id): event = Edge.Created( originator_id=edge_id, first_char_index=first_char_index, last_char_index=last_char_index, source_node_id=source_node_id, dest_node_id=dest_node_id, ) entity = Edge.mutate(event=event) publish(event) return entity
Factory method, registers new edge.
def register_properties(self, properties): if isinstance(properties, collections.Mapping): properties = properties.itervalues() for prop in properties: self.register_property(**prop)
Register properties using a list defining the properties. The dictionary should itself contain dictionaries. i.e. .. code-block:: python D = [ { 'name':'ref_wave','dtype':np.float32, 'default':1.41e6 }, ] Parameters ---------- properties : A dictionary or list A dictionary or list of dictionaries describing properties
def check_regularizers(regularizers, keys): if regularizers is None: return {} _assert_is_dictlike(regularizers, valid_keys=keys) keys = set(keys) if not set(regularizers) <= keys: extra_keys = set(regularizers) - keys raise KeyError( "Invalid regularizer keys {}, regularizers can only " "be provided for {}".format( ", ".join("'{}'".format(key) for key in extra_keys), ", ".join("'{}'".format(key) for key in keys))) _check_nested_callables(regularizers, "Regularizer") return dict(regularizers)
Checks the given regularizers. This checks that `regularizers` is a dictionary that only contains keys in `keys`, and furthermore the entries in `regularizers` are functions or further dictionaries (the latter used, for example, in passing regularizers to modules inside modules) that must satisfy the same constraints. Args: regularizers: Dictionary of regularizers (allowing nested dictionaries) or None. keys: Iterable of valid keys for `regularizers`. Returns: Copy of checked dictionary of regularizers. If `regularizers=None`, an empty dictionary will be returned. Raises: KeyError: If an regularizers is provided for a key not in `keys`. TypeError: If a provided regularizer is not a callable function, or `regularizers` is not a Mapping.
def uninstall(plugin_name, *args): if isinstance(plugin_name, types.StringTypes): plugin_name = [plugin_name] available_path = MICRODROP_CONDA_SHARE.joinpath('plugins', 'available') for name_i in plugin_name: plugin_module_i = name_i.split('.')[-1].replace('-', '_') plugin_path_i = available_path.joinpath(plugin_module_i) if not _islinklike(plugin_path_i) and not plugin_path_i.isdir(): raise IOError('Plugin `{}` not found in `{}`' .format(name_i, available_path)) else: logging.debug('[uninstall] Found plugin `%s`', plugin_path_i) conda_args = ['uninstall', '--json', '-y'] + list(args) + plugin_name uninstall_log_js = ch.conda_exec(*conda_args, verbose=False) _remove_broken_links() logger.debug('Uninstalled plugins: ```%s```', plugin_name) return json.loads(uninstall_log_js.split('\x00')[-1])
Uninstall plugin packages. Plugin packages must have a directory with the same name as the package in the following directory: <conda prefix>/share/microdrop/plugins/available/ Parameters ---------- plugin_name : str or list Plugin package(s) to uninstall. *args Extra arguments to pass to Conda ``uninstall`` command. Returns ------- dict Conda uninstallation log object (from JSON Conda uninstall output).
def _fmadm_action_fmri(action, fmri): ret = {} fmadm = _check_fmadm() cmd = '{cmd} {action} {fmri}'.format( cmd=fmadm, action=action, fmri=fmri ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] result = {} if retcode != 0: result['Error'] = res['stderr'] else: result = True return result
Internal function for fmadm.repqired, fmadm.replaced, fmadm.flush
def cdssequencethreads(self): for i in range(self.cpus): threads = Thread(target=self.cdssequence, args=()) threads.setDaemon(True) threads.start() for sample in self.metadata.samples: sample[self.analysistype].coresequence = dict() self.sequencequeue.put(sample) self.sequencequeue.join()
Extracts the sequence of each gene for each strain
def find_ctrlpts_surface(t_u, t_v, surf, **kwargs): span_func = kwargs.get('find_span_func', helpers.find_span_linear) span_u = span_func(surf.degree_u, surf.knotvector_u, surf.ctrlpts_size_u, t_u) span_v = span_func(surf.degree_v, surf.knotvector_v, surf.ctrlpts_size_v, t_v) idx_u = span_u - surf.degree_u idx_v = span_v - surf.degree_v surf_ctrlpts = [[] for _ in range(surf.degree_u + 1)] for k in range(surf.degree_u + 1): temp = [() for _ in range(surf.degree_v + 1)] for l in range(surf.degree_v + 1): temp[l] = surf.ctrlpts2d[idx_u + k][idx_v + l] surf_ctrlpts[k] = temp return surf_ctrlpts
Finds the control points involved in the evaluation of the surface point defined by the input parameter pair. This function uses a modified version of the algorithm *A3.5 SurfacePoint* from The NURBS Book by Piegl & Tiller. :param t_u: parameter on the u-direction :type t_u: float :param t_v: parameter on the v-direction :type t_v: float :param surf: input surface :type surf: abstract.Surface :return: 2-dimensional control points array :rtype: list
def to_ufo_paths(self, ufo_glyph, layer): pen = ufo_glyph.getPointPen() for path in layer.paths: nodes = list(path.nodes) for node in nodes: self.to_ufo_node_user_data(ufo_glyph, node) pen.beginPath() if not nodes: pen.endPath() continue if not path.closed: node = nodes.pop(0) assert node.type == "line", "Open path starts with off-curve points" pen.addPoint(tuple(node.position), segmentType="move") else: nodes.insert(0, nodes.pop()) for node in nodes: node_type = _to_ufo_node_type(node.type) pen.addPoint( tuple(node.position), segmentType=node_type, smooth=node.smooth ) pen.endPath()
Draw .glyphs paths onto a pen.
def prepare_image(tarpath, outfolder, **kwargs): outfolder = path.Path(outfolder) untar(tarpath, outfolder, **kwargs) resolv_path = outfolder / 'etc' / 'resolv.conf' if resolv_path.islink(): resolv_path.remove().write_text('', encoding='ascii')
Unpack the OS image stored at tarpath to outfolder. Prepare the unpacked image for use as a VR base image.
def redirect_to(self, url=None, parameters={}): if url is None and 'RelayState' in self.__request_data['get_data']: url = self.__request_data['get_data']['RelayState'] return OneLogin_Saml2_Utils.redirect(url, parameters, request_data=self.__request_data)
Redirects the user to the URL passed by parameter or to the URL that we defined in our SSO Request. :param url: The target URL to redirect the user :type url: string :param parameters: Extra parameters to be passed as part of the URL :type parameters: dict :returns: Redirection URL
def format_diff_pyxb(a_pyxb, b_pyxb): return '\n'.join( difflib.ndiff( serialize_to_xml_str(a_pyxb).splitlines(), serialize_to_xml_str(b_pyxb).splitlines(), ) )
Create a diff between two PyXB objects. Args: a_pyxb: PyXB object b_pyxb: PyXB object Returns: str : `Differ`-style delta
def iter_python_modules(tile): for product_type in tile.PYTHON_PRODUCTS: for product in tile.find_products(product_type): entry_point = ENTRY_POINT_MAP.get(product_type) if entry_point is None: raise BuildError("Found an unknown python product (%s) whose entrypoint could not be determined (%s)" % (product_type, product)) if ':' in product: module, _, obj_name = product.rpartition(':') else: module = product obj_name = None if not os.path.exists(module): raise BuildError("Found a python product whose path did not exist: %s" % module) product_name = os.path.basename(module) if product_name.endswith(".py"): product_name = product_name[:-3] import_string = "{} = {}.{}".format(product_name, tile.support_distribution, product_name) if obj_name is not None: import_string += ":{}".format(obj_name) yield (module, import_string, entry_point)
Iterate over all python products in the given tile. This will yield tuples where the first entry is the path to the module containing the product the second entry is the appropriate import string to include in an entry point, and the third entry is the entry point name.
def _handle(self, nick, target, message, **kwargs): for regex, (func, pattern) in self.routes.items(): match = regex.match(message) if match: self.client.loop.create_task( func(nick, target, message, match, **kwargs))
client callback entrance
def gen(skipdirhtml=False): docs_changelog = 'docs/changelog.rst' check_git_unchanged(docs_changelog) pandoc('--from=markdown', '--to=rst', '--output=' + docs_changelog, 'CHANGELOG.md') if not skipdirhtml: sphinx_build['-b', 'dirhtml', '-W', '-E', 'docs', 'docs/_build/dirhtml'] & FG sphinx_build['-b', 'html', '-W', '-E', 'docs', 'docs/_build/html'] & FG
Generate html and dirhtml output.
def _isnan(self): if self._can_hold_na: return isna(self) else: values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values
Return if each value is NaN.
def get_domain_smarthost(self, domainid, serverid): return self.api_call( ENDPOINTS['domainsmarthosts']['get'], dict(domainid=domainid, serverid=serverid))
Get a domain smarthost
def encode (text): if isinstance(text, unicode): return text.encode(i18n.default_encoding, 'ignore') return text
Encode text with default encoding if its Unicode.
def row2table(soup, table, row): tr = Tag(soup, name="tr") table.append(tr) for attr in row: td = Tag(soup, name="td") tr.append(td) td.append(attr)
ad a row to the table
def set_pin_retries(ctx, pw_attempts, admin_pin, force): controller = ctx.obj['controller'] resets_pins = controller.version < (4, 0, 0) if resets_pins: click.echo('WARNING: Setting PIN retries will reset the values for all ' '3 PINs!') force or click.confirm('Set PIN retry counters to: {} {} {}?'.format( *pw_attempts), abort=True, err=True) controller.set_pin_retries(*(pw_attempts + (admin_pin.encode('utf8'),))) click.echo('PIN retries successfully set.') if resets_pins: click.echo('Default PINs are set.') echo_default_pins()
Manage pin-retries. Sets the number of attempts available before locking for each PIN. PW_ATTEMPTS should be three integer values corresponding to the number of attempts for the PIN, Reset Code, and Admin PIN, respectively.
def initialize(self, endog, freq_weights): if (endog.ndim > 1 and endog.shape[1] > 1): y = endog[:, 0] self.n = endog.sum(1) return y*1./self.n, self.n else: return endog, np.ones(endog.shape[0])
Initialize the response variable. Parameters ---------- endog : array Endogenous response variable Returns -------- If `endog` is binary, returns `endog` If `endog` is a 2d array, then the input is assumed to be in the format (successes, failures) and successes/(success + failures) is returned. And n is set to successes + failures.
def parsing(**kwargs): from .validators import Object with _VALIDATOR_FACTORIES_LOCK: old_values = {} for key, value in iteritems(kwargs): if value is not None: attr = key.upper() old_values[key] = getattr(Object, attr) setattr(Object, attr, value) try: yield finally: for key, value in iteritems(kwargs): if value is not None: setattr(Object, key.upper(), old_values[key])
Context manager for overriding the default validator parsing rules for the following code block.
def case_us2mc(x): return re.sub(r'_([a-z])', lambda m: (m.group(1).upper()), x)
underscore to mixed case notation
def _run_raw(self, cmd, ignore_errors=False): result = os.system(cmd) if result != 0: if ignore_errors: self.log(f"command ({cmd}) failed.") assert False, "_run_raw failed"
Runs command directly, skipping tmux interface
def compute_usage_requirements (self, subvariant): assert isinstance(subvariant, virtual_target.Subvariant) rproperties = subvariant.build_properties () xusage_requirements =self.evaluate_requirements( self.usage_requirements_, rproperties, "added") (r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties) extra = r1 + r2 result = property_set.create (xusage_requirements.non_dependency () + extra) properties = [] for p in subvariant.sources_usage_requirements().all(): if p.feature.name not in ('pch-header', 'pch-file'): properties.append(p) if 'shared' in rproperties.get('link'): new_properties = [] for p in properties: if p.feature.name != 'library': new_properties.append(p) properties = new_properties result = result.add_raw(properties) return result
Given the set of generated targets, and refined build properties, determines and sets appripriate usage requirements on those targets.
def _is_allowed_command(self, command): cmds = self._meta_data['allowed_commands'] if command not in self._meta_data['allowed_commands']: error_message = "The command value {0} does not exist. " \ "Valid commands are {1}".format(command, cmds) raise InvalidCommand(error_message)
Checking if the given command is allowed on a given endpoint.
def process_non_raw_string_token(self, prefix, string_body, start_row): if 'u' in prefix: if string_body.find('\\0') != -1: self.add_message('null-byte-unicode-literal', line=start_row)
check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source.
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'): if not data_available(data_set): download_data(data_set) from pandas import read_csv dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'binding_by_gene.tsv') S = read_csv(filename, header=1, index_col=0, sep='\t') transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed'] annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']] S = S[transcription_factors] return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
Yeast ChIP data from Lee et al.
def isinstance_(x, A_tuple): if is_union(A_tuple): return any(isinstance_(x, t) for t in A_tuple.__args__) elif getattr(A_tuple, '__origin__', None) is not None: return isinstance(x, A_tuple.__origin__) else: return isinstance(x, A_tuple)
native isinstance_ with the test for typing.Union overridden
def assure_container(fnc): @wraps(fnc) def _wrapped(self, container, *args, **kwargs): if not isinstance(container, Container): container = self.get(container) return fnc(self, container, *args, **kwargs) return _wrapped
Assures that whether a Container or a name of a container is passed, a Container object is available.
def add_root_log(self, log_id): if self._catalog_session is not None: return self._catalog_session.add_root_catalog(catalog_id=log_id) return self._hierarchy_session.add_root(id_=log_id)
Adds a root log. arg: log_id (osid.id.Id): the ``Id`` of a log raise: AlreadyExists - ``log_id`` is already in hierarchy raise: NotFound - ``log_id`` is not found raise: NullArgument - ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def get_url(self, url_or_dict): if isinstance(url_or_dict, basestring): url_or_dict = {'viewname': url_or_dict} try: return reverse(**url_or_dict) except NoReverseMatch: if MENU_DEBUG: print >>stderr,'Unable to reverse URL with kwargs %s' % url_or_dict
Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled
def get_comments_by_book(self, book_id): mgr = self._get_provider_manager('COMMENTING', local=True) lookup_session = mgr.get_comment_lookup_session_for_book(book_id, proxy=self._proxy) lookup_session.use_isolated_book_view() return lookup_session.get_comments()
Gets the list of ``Comments`` associated with a ``Book``. arg: book_id (osid.id.Id): ``Id`` of a ``Book`` return: (osid.commenting.CommentList) - list of related comments raise: NotFound - ``book_id`` is not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def convert_from_ik_angles(self, joints): if len(joints) != len(self.motors) + 2: raise ValueError('Incompatible data, len(joints) should be {}!'.format(len(self.motors) + 2)) joints = [rad2deg(j) for j in joints[1:-1]] joints *= self._reversed return [(j * (1 if m.direct else -1)) - m.offset for j, m in zip(joints, self.motors)]
Convert from IKPY internal representation to poppy representation.
def create_feature_layer(ds, sql, name="layer"): if arcpyFound == False: raise Exception("ArcPy is required to use this function") result = arcpy.MakeFeatureLayer_management(in_features=ds, out_layer=name, where_clause=sql) return result[0]
creates a feature layer object
def list_machines(self): for page in self._request('Machines.List'): for machine in page.get('machines', []): yield Machine(data=machine)
Retrieve a list of machines in the fleet cluster Yields: Machine: The next machine in the cluster Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
def _load_all_bookmarks(): slots = CONF.get('editor', 'bookmarks', {}) for slot_num in list(slots.keys()): if not osp.isfile(slots[slot_num][0]): slots.pop(slot_num) return slots
Load all bookmarks from config.
def AuthenticatedOrRedirect(invocation): class AuthenticatedOrRedirect(GiottoInputMiddleware): def http(self, request): if request.user: return request return Redirection(invocation) def cmd(self, request): if request.user: return request return Redirection(invocation) return AuthenticatedOrRedirect
Middleware class factory that redirects if the user is not logged in. Otherwise, nothing is effected.
def hasannotation(self,Class,set=None): return sum( 1 for _ in self.select(Class,set,True,default_ignore_annotations))
Returns an integer indicating whether such as annotation exists, and if so, how many. See :meth:`AllowTokenAnnotation.annotations`` for a description of the parameters.
def _on_io_events(self, fd=None, _events=None): if fd not in self._connections: LOGGER.warning('Received IO event for non-existing connection') return self._poll_connection(fd)
Invoked by Tornado's IOLoop when there are events for the fd :param int fd: The file descriptor for the event :param int _events: The events raised
def after_this_request(func: Callable) -> Callable: _request_ctx_stack.top._after_request_functions.append(func) return func
Schedule the func to be called after the current request. This is useful in situations whereby you want an after request function for a specific route or circumstance only, for example, .. code-block:: python def index(): @after_this_request def set_cookie(response): response.set_cookie('special', 'value') return response ...
def get_area_info(bbox, date_interval, maxcc=None): result_list = search_iter(bbox=bbox, start_date=date_interval[0], end_date=date_interval[1]) if maxcc: return reduce_by_maxcc(result_list, maxcc) return result_list
Get information about all images from specified area and time range :param bbox: bounding box of requested area :type bbox: geometry.BBox :param date_interval: a pair of time strings in ISO8601 format :type date_interval: tuple(str) :param maxcc: filter images by maximum percentage of cloud coverage :type maxcc: float in range [0, 1] or None :return: list of dictionaries containing info provided by Opensearch REST service :rtype: list(dict)
def unique_event_labels(event_list): if isinstance(event_list, dcase_util.containers.MetaDataContainer): return event_list.unique_event_labels else: labels = [] for event in event_list: if 'event_label' in event and event['event_label'] not in labels: labels.append(event['event_label']) labels.sort() return labels
Find the unique event labels Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- list Unique labels in alphabetical order
def get_sections_by_curriculum_and_term(curriculum, term): url = "{}?{}".format( section_res_url_prefix, urlencode([("curriculum_abbreviation", curriculum.label,), ("quarter", term.quarter.lower(),), ("year", term.year,), ])) return _json_to_sectionref(get_resource(url))
Returns a list of uw_sws.models.SectionReference objects for the passed curriculum and term.
def from_inline(cls: Type[ESUserEndpointType], inline: str) -> ESUserEndpointType: m = ESUserEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(ESUserEndpoint.API) server = m.group(1) port = int(m.group(2)) return cls(server, port)
Return ESUserEndpoint instance from endpoint string :param inline: Endpoint string :return:
def to_json_file(self, path, file_name=None): if bool(path) and os.path.isdir(path): self.write_to_json(path, file_name) else: self.write_to_json(os.getcwd(), file_name)
Writes output to a JSON file with the given file name
def session(self) -> SessionT: session = self._session_class()( connection_pool=self._connection_pool, ) self.event_dispatcher.notify(self.ClientEvent.new_session, session) return session
Return a new session.
def currencies(self): return [m.currency.code for m in self.monies() if m.amount]
Get all currencies with non-zero values
def get_language(query: str) -> str: query = query.lower() for language in LANGUAGES: if query.endswith(language): return language return ''
Tries to work out the highlight.js language of a given file name or shebang. Returns an empty string if none match.
def ci_plot(x, arr, conf=0.95, ax=None, line_kwargs=None, fill_kwargs=None): if ax is None: fig = plt.figure() ax = fig.add_subplot(111) line_kwargs = line_kwargs or {} fill_kwargs = fill_kwargs or {} m, lo, hi = ci(arr, conf) ax.plot(x, m, **line_kwargs) ax.fill_between(x, lo, hi, **fill_kwargs) return ax
Plots the mean and 95% ci for the given array on the given axes Parameters ---------- x : 1-D array-like x values for the plot arr : 2-D array-like The array to calculate mean and std for conf : float [.5 - 1] Confidence interval to use ax : matplotlib.Axes The axes object on which to plot line_kwargs : dict Additional kwargs passed to Axes.plot fill_kwargs : dict Additiona kwargs passed to Axes.fill_between
def sync(self, recursive=False): self.syncTree(recursive=recursive) self.syncView(recursive=recursive)
Syncs the information from this item to the tree and view.
def serialize( self, value, state ): start_element, end_element = _element_path_create_new(self.element_path) self._serialize(end_element, value, state) return start_element
Serialize the value into a new element object and return the element. If the omit_empty option was specified and the value is falsey, then this will return None.
def statement(self) -> Statement: pref, kw = self.keyword() pres = self.opt_separator() next = self.peek() if next == ";": arg = None sub = False elif next == "{": arg = None sub = True elif not pres: raise UnexpectedInput(self, "separator") else: self._arg = "" sub = self.argument() arg = self._arg self.offset += 1 res = Statement(kw, arg, pref=pref) if sub: res.substatements = self.substatements() for sub in res.substatements: sub.superstmt = res return res
Parse YANG statement. Raises: EndOfInput: If past the end of input. UnexpectedInput: If no syntactically correct statement is found.
def update_masters(self): if self.master is not None: self.master._medium2long.update(self._medium2long) self.master.update_masters()
Update all `master` |Substituter| objects. If a |Substituter| object is passed to the constructor of another |Substituter| object, they become `master` and `slave`: >>> from hydpy.core.autodoctools import Substituter >>> sub1 = Substituter() >>> from hydpy.core import devicetools >>> sub1.add_module(devicetools) >>> sub2 = Substituter(sub1) >>> sub3 = Substituter(sub2) >>> sub3.master.master is sub1 True >>> sub2 in sub1.slaves True During initialization, all mappings handled by the master object are passed to its new slave: >>> sub3.find('Node|') |Node| :class:`~hydpy.core.devicetools.Node` |devicetools.Node| :class:`~hydpy.core.devicetools.Node` Updating a slave, does not affect its master directly: >>> from hydpy.core import hydpytools >>> sub3.add_module(hydpytools) >>> sub3.find('HydPy|') |HydPy| :class:`~hydpy.core.hydpytools.HydPy` |hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy` >>> sub2.find('HydPy|') Through calling |Substituter.update_masters|, the `medium2long` mappings are passed from the slave to its master: >>> sub3.update_masters() >>> sub2.find('HydPy|') |hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy` Then each master object updates its own master object also: >>> sub1.find('HydPy|') |hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy` In reverse, subsequent updates of master objects to not affect their slaves directly: >>> from hydpy.core import masktools >>> sub1.add_module(masktools) >>> sub1.find('Masks|') |Masks| :class:`~hydpy.core.masktools.Masks` |masktools.Masks| :class:`~hydpy.core.masktools.Masks` >>> sub2.find('Masks|') Through calling |Substituter.update_slaves|, the `medium2long` mappings are passed the master to all of its slaves: >>> sub1.update_slaves() >>> sub2.find('Masks|') |masktools.Masks| :class:`~hydpy.core.masktools.Masks` >>> sub3.find('Masks|') |masktools.Masks| :class:`~hydpy.core.masktools.Masks`
def window_size(self, window_size): BasePlotter.window_size.fset(self, window_size) self.app_window.setBaseSize(*window_size)
set the render window size
def bounding_polygon(self): lon_left, lat_bottom, lon_right, lat_top = Tile.tile_coords_to_bbox(self.x, self.y, self.zoom) print(lon_left, lat_bottom, lon_right, lat_top) return Polygon([[[lon_left, lat_top], [lon_right, lat_top], [lon_right, lat_bottom], [lon_left, lat_bottom], [lon_left, lat_top]]])
Returns the bounding box polygon for this tile :return: `pywom.utils.geo.Polygon` instance
def connection_key(self): return "{host}:{namespace}:{username}".format(host=self.host, namespace=self.namespace, username=self.username)
Return an index key used to cache the sampler connection.
def split_query(qs, keep_blank_values=False): items = [] for pair in qs.split('&'): name, delim, value = pair.partition('=') if not delim and keep_blank_values: value = None if keep_blank_values or value: items.append((name, value)) return items
Split the query string. Note for empty values: If an equal sign (``=``) is present, the value will be an empty string (``''``). Otherwise, the value will be ``None``:: >>> list(split_query('a=&b', keep_blank_values=True)) [('a', ''), ('b', None)] No processing is done on the actual values.
def get_relation_routes(self, viewset): routes = [] if not hasattr(viewset, 'serializer_class'): return routes if not hasattr(viewset, 'list_related'): return routes serializer = viewset.serializer_class() fields = getattr(serializer, 'get_link_fields', lambda: [])() route_name = '{basename}-{methodnamehyphen}' for field_name, field in six.iteritems(fields): methodname = 'list_related' url = ( r'^{prefix}/{lookup}/(?P<field_name>%s)' '{trailing_slash}$' % field_name ) routes.append(Route( url=url, mapping={'get': methodname}, name=replace_methodname(route_name, field_name), initkwargs={} )) return routes
Generate routes to serve relational objects. This method will add a sub-URL for each relational field. e.g. A viewset for the following serializer: class UserSerializer(..): events = DynamicRelationField(EventSerializer, many=True) groups = DynamicRelationField(GroupSerializer, many=True) location = DynamicRelationField(LocationSerializer) will have the following URLs added: /users/<pk>/events/ /users/<pk>/groups/ /users/<pk>/location/
def interpolate(self, factor, minGlyph, maxGlyph, round=True, suppressError=True): factor = normalizers.normalizeInterpolationFactor(factor) if not isinstance(minGlyph, BaseGlyph): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, minGlyph.__class__.__name__)) if not isinstance(maxGlyph, BaseGlyph): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, maxGlyph.__class__.__name__)) round = normalizers.normalizeBoolean(round) suppressError = normalizers.normalizeBoolean(suppressError) self._interpolate(factor, minGlyph, maxGlyph, round=round, suppressError=suppressError)
Interpolate the contents of this glyph at location ``factor`` in a linear interpolation between ``minGlyph`` and ``maxGlyph``. >>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2) ``factor`` may be a :ref:`type-int-float` or a tuple containing two :ref:`type-int-float` values representing x and y factors. >>> glyph.interpolate((0.5, 1.0), otherGlyph1, otherGlyph2) ``minGlyph`` must be a :class:`BaseGlyph` and will be located at 0.0 in the interpolation range. ``maxGlyph`` must be a :class:`BaseGlyph` and will be located at 1.0 in the interpolation range. If ``round`` is ``True``, the contents of the glyph will be rounded to integers after the interpolation is performed. >>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2, round=True) This method assumes that ``minGlyph`` and ``maxGlyph`` are completely compatible with each other for interpolation. If not, any errors encountered will raise a :class:`FontPartsError`. If ``suppressError`` is ``True``, no exception will be raised and errors will be silently ignored.
def copy_signal(signal_glob, source_db, target_db): for frame in source_db.frames: for signal in frame.glob_signals(signal_glob): target_db.add_signal(signal)
Copy Signals identified by name from source CAN matrix to target CAN matrix. In target CanMatrix the signal is put without frame, just on top level. :param signal_glob: Signal glob pattern :param source_db: Source CAN matrix :param target_db: Destination CAN matrix
def to_bool(val): if val is None: return False if isinstance(val, bool): return val if isinstance(val, (six.text_type, six.string_types)): return val.lower() in ('yes', '1', 'true') if isinstance(val, six.integer_types): return val > 0 if not isinstance(val, collections.Hashable): return bool(val) return False
Returns the logical value. .. code-block:: jinja {{ 'yes' | to_bool }} will be rendered as: .. code-block:: text True
def visit_Tuple(self, node): if node.elts: elts_aliases = set() for i, elt in enumerate(node.elts): elt_aliases = self.visit(elt) elts_aliases.update(ContainerOf(alias, i) for alias in elt_aliases) else: elts_aliases = None return self.add(node, elts_aliases)
A tuple is abstracted as an ordered container of its values >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return a, b') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Tuple) (a, b) => ['|[0]=a|', '|[1]=b|'] where the |[i]=id| notation means something that may contain ``id`` at index ``i``.
def render_to_string(template_name, context=None, request=None, using=None): if isinstance(template_name, (list, tuple)): template = select_template(template_name, using=using) else: template = get_template(template_name, using=using) return template.render(context, request)
Loads a template and renders it with a context. Returns a string. template_name may be a string or a list of strings.
def connect(self, address, rack, slot, tcpport=102): logger.info("connecting to %s:%s rack %s slot %s" % (address, tcpport, rack, slot)) self.set_param(snap7.snap7types.RemotePort, tcpport) return self.library.Cli_ConnectTo( self.pointer, c_char_p(six.b(address)), c_int(rack), c_int(slot))
Connect to a S7 server. :param address: IP address of server :param rack: rack on server :param slot: slot on server.
def ResolveForCreate(self, document): if document is None: raise ValueError("document is None.") partition_key = self.partition_key_extractor(document) containing_range = self._GetContainingRange(partition_key) if containing_range is None: raise ValueError("A containing range for " + str(partition_key) + " doesn't exist in the partition map.") return self.partition_map.get(containing_range)
Resolves the collection for creating the document based on the partition key. :param dict document: The document to be created. :return: Collection Self link or Name based link which should handle the Create operation. :rtype: str
def safe_for_serialization(value): if isinstance(value, six.string_types): return value if isinstance(value, dict): return { safe_for_serialization(key): safe_for_serialization(val) for key, val in six.iteritems(value) } if isinstance(value, collections.Iterable): return list(map(safe_for_serialization, value)) try: return six.text_type(value) except Exception: return '[__unicode__ failed]'
Transform a value in preparation for serializing as json no-op for strings, mappings and iterables have their entries made safe, and all other values are stringified, with a fallback value if that fails
def WriteGraphSeries(graph_series, label, token = None): if data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientGraphSeries(graph_series, label) if _ShouldUseLegacyDatastore(): aff4_attr = _GetAFF4AttributeForReportType(graph_series.report_type)() if isinstance(aff4_attr, rdf_stats.GraphSeries): for graph in graph_series.graphs: aff4_attr.Append(graph) elif isinstance(aff4_attr, rdf_stats.Graph): for sample in graph_series.graphs[0]: aff4_attr.Append(x_value=sample.x_value, y_value=sample.y_value) else: raise AFF4AttributeTypeError(aff4_attr.__class__) with aff4.FACTORY.Create( GetAFF4ClientReportsURN().Add(label), aff4_type=aff4_stats.ClientFleetStats, mode="w", token=token) as stats_for_label: stats_for_label.AddAttribute(aff4_attr)
Writes graph series for a particular client label to the DB. Args: graph_series: A series of rdf_stats.Graphs containing aggregated data for a particular report-type. label: Client label by which data in the graph_series was aggregated. token: ACL token to use for writing to the legacy (non-relational) datastore. Raises: AFF4AttributeTypeError: If, when writing to the legacy DB, an unexpected report-data type is encountered.
def _row_from_frevent(frevent, columns, selection): params = dict(frevent.GetParam()) params['time'] = float(LIGOTimeGPS(*frevent.GetGTime())) params['amplitude'] = frevent.GetAmplitude() params['probability'] = frevent.GetProbability() params['timeBefore'] = frevent.GetTimeBefore() params['timeAfter'] = frevent.GetTimeAfter() params['comment'] = frevent.GetComment() if not all(op_(params[c], t) for c, op_, t in selection): return None return [params[c] for c in columns]
Generate a table row from an FrEvent Filtering (``selection``) is done here, rather than in the table reader, to enable filtering on columns that aren't being returned.
def git_ls_remote(self, uri, ref): logger.debug("Invoking git to retrieve commit id for repo %s...", uri) lsremote_output = subprocess.check_output(['git', 'ls-remote', uri, ref]) if b"\t" in lsremote_output: commit_id = lsremote_output.split(b"\t")[0] logger.debug("Matching commit id found: %s", commit_id) return commit_id else: raise ValueError("Ref \"%s\" not found for repo %s." % (ref, uri))
Determine the latest commit id for a given ref. Args: uri (string): git URI ref (string): git ref Returns: str: A commit id
def _aggregations(search, definitions): if definitions: for name, agg in definitions.items(): search.aggs[name] = agg if not callable(agg) else agg() return search
Add aggregations to query.
def init_nvidia(self): if import_error_tag: self.nvml_ready = False try: pynvml.nvmlInit() self.device_handles = get_device_handles() self.nvml_ready = True except Exception: logger.debug("pynvml could not be initialized.") self.nvml_ready = False return self.nvml_ready
Init the NVIDIA API.
def set_completion(self, completion): if self.get_completion_metadata().is_read_only(): raise errors.NoAccess() try: completion = float(completion) except ValueError: raise errors.InvalidArgument() if not self._is_valid_decimal(completion, self.get_completion_metadata()): raise errors.InvalidArgument() self._my_map['completion'] = completion
Sets the completion percentage. arg: completion (decimal): the completion percentage raise: InvalidArgument - ``completion`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def StartHunt(hunt_id): hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) num_hunt_clients = data_store.REL_DB.CountHuntFlows(hunt_id) if hunt_obj.hunt_state != hunt_obj.HuntState.PAUSED: raise OnlyPausedHuntCanBeStartedError(hunt_obj) data_store.REL_DB.UpdateHuntObject( hunt_id, hunt_state=hunt_obj.HuntState.STARTED, start_time=rdfvalue.RDFDatetime.Now(), num_clients_at_start_time=num_hunt_clients, ) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD: _ScheduleGenericHunt(hunt_obj) elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE: _ScheduleVariableHunt(hunt_obj) else: raise UnknownHuntTypeError("Invalid hunt type for hunt %s: %r" % (hunt_id, hunt_obj.args.hunt_type)) return hunt_obj
Starts a hunt with a given id.
def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"): return self.__do_menu("as_ul", show_leaf, current_linkable, class_current)
It returns breadcrumb as ul
def ssh_known_host_key(host, application_name, user=None): cmd = [ 'ssh-keygen', '-f', known_hosts(application_name, user), '-H', '-F', host] try: output = subprocess.check_output(cmd) except subprocess.CalledProcessError as e: if e.returncode == 1: output = e.output else: raise output = output.strip() if output: lines = output.split('\n') if len(lines) >= 1: return lines[0] return None
Return the first entry in known_hosts for host. :param host: hostname to lookup in file. :type host: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str :returns: Host key :rtype: str or None
def blockmix_salsa8(BY, Yi, r): start = (2 * r - 1) * 16 X = BY[start:start+16] tmp = [0]*16 for i in xrange(2 * r): salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) for i in xrange(r): BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)] BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]
Blockmix; Used by SMix
def count_rows_duplicates(self, table, cols='*'): return self.count_rows(table, '*') - self.count_rows_distinct(table, cols)
Get the number of rows that do not contain distinct values.
def reverseComplement(self, isRNA=None): isRNA_l = self.isRNA() if isRNA is None else isRNA tmp = "" for n in self.sequenceData: if isRNA_l: tmp += RNA_COMPLEMENTS[n] else: tmp += DNA_COMPLEMENTS[n] self.sequenceData = tmp[::-1]
Reverse complement this sequence in-place. :param isRNA: if True, treat this sequence as RNA. If False, treat it as DNA. If None (default), inspect the sequence and make a guess as to whether it is RNA or DNA.
def catchable_exceptions(exceptions): if isinstance(exceptions, type) and issubclass(exceptions, BaseException): return True if ( isinstance(exceptions, tuple) and exceptions and all(issubclass(it, BaseException) for it in exceptions) ): return True return False
Returns True if exceptions can be caught in the except clause. The exception can be caught if it is an Exception type or a tuple of exception types.
def patch_django_for_autodoc(): ManagerDescriptor.__get__ = lambda self, *args, **kwargs: self.manager models.QuerySet.__repr__ = lambda self: self.__class__.__name__
Fix the appearance of some classes in autodoc. This avoids query evaluation.
def listen_for_updates(self): self.toredis.subscribe(self.group_pubsub, callback=self.callback)
Attach a callback on the group pubsub
def component_for_entity(self, entity: int, component_type: Type[C]) -> C: return self._entities[entity][component_type]
Retrieve a Component instance for a specific Entity. Retrieve a Component instance for a specific Entity. In some cases, it may be necessary to access a specific Component instance. For example: directly modifying a Component to handle user input. Raises a KeyError if the given Entity and Component do not exist. :param entity: The Entity ID to retrieve the Component for. :param component_type: The Component instance you wish to retrieve. :return: The Component instance requested for the given Entity ID.
def _default_logfile(exe_name): if salt.utils.platform.is_windows(): tmp_dir = os.path.join(__opts__['cachedir'], 'tmp') if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) logfile_tmp = tempfile.NamedTemporaryFile(dir=tmp_dir, prefix=exe_name, suffix='.log', delete=False) logfile = logfile_tmp.name logfile_tmp.close() else: logfile = salt.utils.path.join( '/var/log', '{0}.log'.format(exe_name) ) return logfile
Retrieve the logfile name
def check_webserver_running(url="http://localhost:8800", max_retries=30): retry = 0 response = '' success = False while response != requests.codes.ok and retry < max_retries: try: response = requests.head(url, allow_redirects=True).status_code success = True except: sleep(1) retry += 1 if not success: logging.warning('Unable to connect to %s within %s retries' % (url, max_retries)) return success
Returns True if a given URL is responding within a given timeout.