code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_corpus_path(name: str) -> [str, None]: """ Get corpus path :param string name: corpus name """ db = TinyDB(corpus_db_path()) temp = Query() if len(db.search(temp.name == name)) > 0: path = get_full_data_path(db.search(temp.name == name)[0]["file"]) db.close() if not os.path.exists(path): download(name) return path return None
Get corpus path :param string name: corpus name
def completion_acd(edm, X0, W=None, tol=1e-6, sweeps=3): """ Complete an denoise EDM using alternating decent. The idea here is to simply run reconstruct_acd for a few iterations, yieding a position estimate, which can in turn be used to get a completed and denoised edm. :param edm: noisy matrix (NxN) :param X0: starting points (Nxd) :param W: optional weight matrix. :param tol: Stopping criterion of iterative algorithm. :param sweeps: Maximum number of sweeps. """ from .algorithms import reconstruct_acd Xhat, costs = reconstruct_acd(edm, X0, W, tol=tol, sweeps=sweeps) return get_edm(Xhat)
Complete an denoise EDM using alternating decent. The idea here is to simply run reconstruct_acd for a few iterations, yieding a position estimate, which can in turn be used to get a completed and denoised edm. :param edm: noisy matrix (NxN) :param X0: starting points (Nxd) :param W: optional weight matrix. :param tol: Stopping criterion of iterative algorithm. :param sweeps: Maximum number of sweeps.
def post(self, url, headers=None, params=None, **kwargs): """Send a JSON POST request with the given request headers, additional URL query parameters, and the given JSON in the request body. The extra query parameters are merged with any which already exist in the URL. The 'json' and 'data' parameters may not both be given. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional) json: json to send in the body of the Request. This must be a JSON-serializable object. (optional) data: raw request body data. May be a dictionary, list of tuples, bytes, or file-like object to send in the body of the Request. (optional) """ if len(kwargs) > 1: raise InvalidArgumentsError("Too many extra args ({} > 1)".format( len(kwargs))) if kwargs: kwarg = next(iter(kwargs)) if kwarg not in ("json", "data"): raise InvalidArgumentsError("Invalid kwarg: " + kwarg) resp = self.session.post(url, headers=headers, params=params, **kwargs) resp.raise_for_status() return _to_json(resp)
Send a JSON POST request with the given request headers, additional URL query parameters, and the given JSON in the request body. The extra query parameters are merged with any which already exist in the URL. The 'json' and 'data' parameters may not both be given. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional) json: json to send in the body of the Request. This must be a JSON-serializable object. (optional) data: raw request body data. May be a dictionary, list of tuples, bytes, or file-like object to send in the body of the Request. (optional)
def is_dtype(cls, dtype): """Check if we match 'dtype'. Parameters ---------- dtype : object The object to check. Returns ------- is_dtype : bool Notes ----- The default implementation is True if 1. ``cls.construct_from_string(dtype)`` is an instance of ``cls``. 2. ``dtype`` is an object and is an instance of ``cls`` 3. ``dtype`` has a ``dtype`` attribute, and any of the above conditions is true for ``dtype.dtype``. """ dtype = getattr(dtype, 'dtype', dtype) if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)): # https://github.com/pandas-dev/pandas/issues/22960 # avoid passing data to `construct_from_string`. This could # cause a FutureWarning from numpy about failing elementwise # comparison from, e.g., comparing DataFrame == 'category'. return False elif dtype is None: return False elif isinstance(dtype, cls): return True try: return cls.construct_from_string(dtype) is not None except TypeError: return False
Check if we match 'dtype'. Parameters ---------- dtype : object The object to check. Returns ------- is_dtype : bool Notes ----- The default implementation is True if 1. ``cls.construct_from_string(dtype)`` is an instance of ``cls``. 2. ``dtype`` is an object and is an instance of ``cls`` 3. ``dtype`` has a ``dtype`` attribute, and any of the above conditions is true for ``dtype.dtype``.
def start(self): """ Prepare the server to start serving connections. Configure the server socket handler and establish a TLS wrapping socket from which all client connections descend. Bind this TLS socket to the specified network address for the server. Raises: NetworkingError: Raised if the TLS socket cannot be bound to the network address. """ self.manager = multiprocessing.Manager() self.policies = self.manager.dict() policies = copy.deepcopy(operation_policy.policies) for policy_name, policy_set in six.iteritems(policies): self.policies[policy_name] = policy_set self.policy_monitor = monitor.PolicyDirectoryMonitor( self.config.settings.get('policy_path'), self.policies, self.live_policies ) def interrupt_handler(trigger, frame): self.policy_monitor.stop() signal.signal(signal.SIGINT, interrupt_handler) signal.signal(signal.SIGTERM, interrupt_handler) self.policy_monitor.start() self._engine = engine.KmipEngine( policies=self.policies, database_path=self.config.settings.get('database_path') ) self._logger.info("Starting server socket handler.") # Create a TCP stream socket and configure it for immediate reuse. socket.setdefaulttimeout(10) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._logger.debug( "Configured cipher suites: {0}".format( len(self.config.settings.get('tls_cipher_suites')) ) ) for cipher in self.config.settings.get('tls_cipher_suites'): self._logger.debug(cipher) auth_suite_ciphers = self.auth_suite.ciphers.split(':') self._logger.debug( "Authentication suite ciphers to use: {0}".format( len(auth_suite_ciphers) ) ) for cipher in auth_suite_ciphers: self._logger.debug(cipher) self._socket = ssl.wrap_socket( self._socket, keyfile=self.config.settings.get('key_path'), certfile=self.config.settings.get('certificate_path'), server_side=True, cert_reqs=ssl.CERT_REQUIRED, ssl_version=self.auth_suite.protocol, ca_certs=self.config.settings.get('ca_path'), do_handshake_on_connect=False, suppress_ragged_eofs=True, ciphers=self.auth_suite.ciphers ) try: self._socket.bind( ( self.config.settings.get('hostname'), int(self.config.settings.get('port')) ) ) except Exception as e: self._logger.exception(e) raise exceptions.NetworkingError( "Server failed to bind socket handler to {0}:{1}".format( self.config.settings.get('hostname'), self.config.settings.get('port') ) ) else: self._logger.info( "Server successfully bound socket handler to {0}:{1}".format( self.config.settings.get('hostname'), self.config.settings.get('port') ) ) self._is_serving = True
Prepare the server to start serving connections. Configure the server socket handler and establish a TLS wrapping socket from which all client connections descend. Bind this TLS socket to the specified network address for the server. Raises: NetworkingError: Raised if the TLS socket cannot be bound to the network address.
def combining_search(self): """ Searching the path for combining the pair. """ start = ( self.get_pair(), ( self.cube["L"], self.cube["U"], self.cube["F"], self.cube["D"], self.cube["R"], self.cube["B"], ), ) return sum(path_actions(a_star_search(start, self.combining_successors, lambda x: len(x), self.combining_goal)), Formula())
Searching the path for combining the pair.
def run_procedure(self, process_number, std_vs_mfg, params=''): """ Initiate a C1219 procedure, the request is written to table 7 and the response is read from table 8. :param int process_number: The numeric procedure identifier (0 <= process_number <= 2047). :param bool std_vs_mfg: Whether the procedure is manufacturer specified or not. True is manufacturer specified. :param bytes params: The parameters to pass to the procedure initiation request. :return: A tuple of the result code and the response data. :rtype: tuple """ seqnum = random.randint(2, 254) self.logger.info('starting procedure: ' + str(process_number) + ' (' + hex(process_number) + ') sequence number: ' + str(seqnum) + ' (' + hex(seqnum) + ')') procedure_request = C1219ProcedureInit(self.c1219_endian, process_number, std_vs_mfg, 0, seqnum, params).build() self.set_table_data(7, procedure_request) response = self.get_table_data(8) if response[:3] == procedure_request[:3]: return response[3], response[4:] else: self.logger.error('invalid response from procedure response table (table #8)') raise C1219ProcedureError('invalid response from procedure response table (table #8)')
Initiate a C1219 procedure, the request is written to table 7 and the response is read from table 8. :param int process_number: The numeric procedure identifier (0 <= process_number <= 2047). :param bool std_vs_mfg: Whether the procedure is manufacturer specified or not. True is manufacturer specified. :param bytes params: The parameters to pass to the procedure initiation request. :return: A tuple of the result code and the response data. :rtype: tuple
def _nonzero(self): """ Equivalent numpy's nonzero but returns a tuple of Varibles. """ # TODO we should replace dask's native nonzero # after https://github.com/dask/dask/issues/1076 is implemented. nonzeros = np.nonzero(self.data) return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
Equivalent numpy's nonzero but returns a tuple of Varibles.
def println(msg): """ Convenience function to print messages on a single line in the terminal """ sys.stdout.write(msg) sys.stdout.flush() sys.stdout.write('\x08' * len(msg)) sys.stdout.flush()
Convenience function to print messages on a single line in the terminal
def get_views_traffic(self, per=github.GithubObject.NotSet): """ :calls: `GET /repos/:owner/:repo/traffic/views <https://developer.github.com/v3/repos/traffic/>`_ :param per: string, must be one of day or week, day by default :rtype: None or list of :class:`github.View.View` """ assert per is github.GithubObject.NotSet or (isinstance(per, (str, unicode)) and (per == "day" or per == "week")), "per must be day or week, day by default" url_parameters = dict() if per is not github.GithubObject.NotSet: url_parameters["per"] = per headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/traffic/views", parameters=url_parameters ) if (isinstance(data, dict)) and ("views" in data) and (isinstance(data["views"], list)): data["views"] = [ github.View.View(self._requester, headers, item, completed=True) for item in data["views"] ] return data
:calls: `GET /repos/:owner/:repo/traffic/views <https://developer.github.com/v3/repos/traffic/>`_ :param per: string, must be one of day or week, day by default :rtype: None or list of :class:`github.View.View`
async def delete(self, turn_context: TurnContext) -> None: """ Delete any state currently stored in this state scope. :param turn_context: The context object for this turn. :return: None """ if turn_context == None: raise TypeError('BotState.delete(): turn_context cannot be None.') turn_context.turn_state.pop(self._context_service_key) storage_key = self.get_storage_key(turn_context) await self._storage.delete({ storage_key })
Delete any state currently stored in this state scope. :param turn_context: The context object for this turn. :return: None
def dry_run_from_args(args: argparse.Namespace): """ Just converts from an ``argparse.Namespace`` object to params. """ parameter_path = args.param_path serialization_dir = args.serialization_dir overrides = args.overrides params = Params.from_file(parameter_path, overrides) dry_run_from_params(params, serialization_dir)
Just converts from an ``argparse.Namespace`` object to params.
def _compute_ll(self): """ m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix """ self.fracs = [] self.logP = [] self.ll = [] for i in range(self.width): Dll = {'A': 0, 'C': 0, 'T': 0, 'G': 0} Df = {'A': 0, 'C': 0, 'T': 0, 'G': 0} DlogP= {'A': 0, 'C': 0, 'T': 0, 'G': 0} for key in self.counts[i].keys(): #print i,key,self.counts[i][key],self.nseqs Pij = self.counts[i][key] / float(self.nseqs) Df [key] = Pij Dll[key] = (math.log( (self.counts[i][key] + self.bgscale*self.background[key] ) / ((self.nseqs + self.bgscale) * self.background[key]) ) / math.log(2)) if Pij > 0: DlogP[key] = math.log(Pij)/math.log(2) else: DlogP[key] = -100 #Near zero self.fracs.append(Df) self.logP.append (DlogP) self.ll.append (Dll) self.P = self.fracs self._compute_bits() self._compute_ambig_ll() self._maxscore()
m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix
def set_options(self, **kw): r"""Set Parser options. .. seealso:: ``kw`` argument have the same meaning as in :func:`lazyxml.loads` """ for k, v in kw.iteritems(): if k in self.__options: self.__options[k] = v
r"""Set Parser options. .. seealso:: ``kw`` argument have the same meaning as in :func:`lazyxml.loads`
def create_job_flow(self, job_flow_overrides): """ Creates a job flow using the config from the EMR connection. Keys of the json extra hash may have the arguments of the boto3 run_job_flow method. Overrides for this config may be passed as the job_flow_overrides. """ if not self.emr_conn_id: raise AirflowException('emr_conn_id must be present to use create_job_flow') emr_conn = self.get_connection(self.emr_conn_id) config = emr_conn.extra_dejson.copy() config.update(job_flow_overrides) response = self.get_conn().run_job_flow(**config) return response
Creates a job flow using the config from the EMR connection. Keys of the json extra hash may have the arguments of the boto3 run_job_flow method. Overrides for this config may be passed as the job_flow_overrides.
def check_py(self, version, name, original, loc, tokens): """Check for Python-version-specific syntax.""" internal_assert(len(tokens) == 1, "invalid " + name + " tokens", tokens) if self.target_info < get_target_info(version): raise self.make_err(CoconutTargetError, "found Python " + ".".join(version) + " " + name, original, loc, target=version) else: return tokens[0]
Check for Python-version-specific syntax.
def default_sort_key(item, order=None): """Return a key that can be used for sorting. The key has the structure: (class_key, (len(args), args), exponent.sort_key(), coefficient) This key is supplied by the sort_key routine of Basic objects when ``item`` is a Basic object or an object (other than a string) that sympifies to a Basic object. Otherwise, this function produces the key. The ``order`` argument is passed along to the sort_key routine and is used to determine how the terms *within* an expression are ordered. (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex', and reversed values of the same (e.g. 'rev-lex'). The default order value is None (which translates to 'lex'). Examples ======== >>> from sympy import S, I, default_sort_key >>> from sympy.core.function import UndefinedFunction >>> from sympy.abc import x The following are equivalent ways of getting the key for an object: >>> x.sort_key() == default_sort_key(x) True Here are some examples of the key that is produced: >>> default_sort_key(UndefinedFunction('f')) ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key('1') ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key(S.One) ((1, 0, 'Number'), (0, ()), (), 1) >>> default_sort_key(2) ((1, 0, 'Number'), (0, ()), (), 2) While sort_key is a method only defined for SymPy objects, default_sort_key will accept anything as an argument so it is more robust as a sorting key. For the following, using key= lambda i: i.sort_key() would fail because 2 doesn't have a sort_key method; that's why default_sort_key is used. Note, that it also handles sympification of non-string items likes ints: >>> a = [2, I, -I] >>> sorted(a, key=default_sort_key) [2, -I, I] The returned key can be used anywhere that a key can be specified for a function, e.g. sort, min, max, etc...: >>> a.sort(key=default_sort_key); a[0] 2 >>> min(a, key=default_sort_key) 2 Note ---- The key returned is useful for getting items into a canonical order that will be the same across platforms. It is not directly useful for sorting lists of expressions: >>> a, b = x, 1/x Since ``a`` has only 1 term, its value of sort_key is unaffected by ``order``: >>> a.sort_key() == a.sort_key('rev-lex') True If ``a`` and ``b`` are combined then the key will differ because there are terms that can be ordered: >>> eq = a + b >>> eq.sort_key() == eq.sort_key('rev-lex') False >>> eq.as_ordered_terms() [x, 1/x] >>> eq.as_ordered_terms('rev-lex') [1/x, x] But since the keys for each of these terms are independent of ``order``'s value, they don't sort differently when they appear separately in a list: >>> sorted(eq.args, key=default_sort_key) [1/x, x] >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex')) [1/x, x] The order of terms obtained when using these keys is the order that would be obtained if those terms were *factors* in a product. See Also ======== sympy.core.expr.as_ordered_factors, sympy.core.expr.as_ordered_terms """ from sympy.core import S, Basic from sympy.core.sympify import sympify, SympifyError from sympy.core.compatibility import iterable if isinstance(item, Basic): return item.sort_key(order=order) if iterable(item, exclude=string_types): if isinstance(item, dict): args = item.items() unordered = True elif isinstance(item, set): args = item unordered = True else: # e.g. tuple, list args = list(item) unordered = False args = [default_sort_key(arg, order=order) for arg in args] if unordered: # e.g. dict, set args = sorted(args) cls_index, args = 10, (len(args), tuple(args)) else: if not isinstance(item, string_types): try: item = sympify(item) except SympifyError: # e.g. lambda x: x pass else: if isinstance(item, Basic): # e.g int -> Integer return default_sort_key(item) # e.g. UndefinedFunction # e.g. str cls_index, args = 0, (1, (str(item),)) return (cls_index, 0, item.__class__.__name__ ), args, S.One.sort_key(), S.One
Return a key that can be used for sorting. The key has the structure: (class_key, (len(args), args), exponent.sort_key(), coefficient) This key is supplied by the sort_key routine of Basic objects when ``item`` is a Basic object or an object (other than a string) that sympifies to a Basic object. Otherwise, this function produces the key. The ``order`` argument is passed along to the sort_key routine and is used to determine how the terms *within* an expression are ordered. (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex', and reversed values of the same (e.g. 'rev-lex'). The default order value is None (which translates to 'lex'). Examples ======== >>> from sympy import S, I, default_sort_key >>> from sympy.core.function import UndefinedFunction >>> from sympy.abc import x The following are equivalent ways of getting the key for an object: >>> x.sort_key() == default_sort_key(x) True Here are some examples of the key that is produced: >>> default_sort_key(UndefinedFunction('f')) ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key('1') ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key(S.One) ((1, 0, 'Number'), (0, ()), (), 1) >>> default_sort_key(2) ((1, 0, 'Number'), (0, ()), (), 2) While sort_key is a method only defined for SymPy objects, default_sort_key will accept anything as an argument so it is more robust as a sorting key. For the following, using key= lambda i: i.sort_key() would fail because 2 doesn't have a sort_key method; that's why default_sort_key is used. Note, that it also handles sympification of non-string items likes ints: >>> a = [2, I, -I] >>> sorted(a, key=default_sort_key) [2, -I, I] The returned key can be used anywhere that a key can be specified for a function, e.g. sort, min, max, etc...: >>> a.sort(key=default_sort_key); a[0] 2 >>> min(a, key=default_sort_key) 2 Note ---- The key returned is useful for getting items into a canonical order that will be the same across platforms. It is not directly useful for sorting lists of expressions: >>> a, b = x, 1/x Since ``a`` has only 1 term, its value of sort_key is unaffected by ``order``: >>> a.sort_key() == a.sort_key('rev-lex') True If ``a`` and ``b`` are combined then the key will differ because there are terms that can be ordered: >>> eq = a + b >>> eq.sort_key() == eq.sort_key('rev-lex') False >>> eq.as_ordered_terms() [x, 1/x] >>> eq.as_ordered_terms('rev-lex') [1/x, x] But since the keys for each of these terms are independent of ``order``'s value, they don't sort differently when they appear separately in a list: >>> sorted(eq.args, key=default_sort_key) [1/x, x] >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex')) [1/x, x] The order of terms obtained when using these keys is the order that would be obtained if those terms were *factors* in a product. See Also ======== sympy.core.expr.as_ordered_factors, sympy.core.expr.as_ordered_terms
def show_condition_operators(self, condition): """ Show available operators for a given saved search condition """ # dict keys of allowed operators for the current condition permitted_operators = self.savedsearch.conditions_operators.get(condition) # transform these into values permitted_operators_list = set( [self.savedsearch.operators.get(op) for op in permitted_operators] ) return permitted_operators_list
Show available operators for a given saved search condition
def _create_at(self, timestamp=None, id=None, forced_identity=None, **kwargs): """ WARNING: Only for internal use and testing. Create a Versionable having a version_start_date and version_birth_date set to some pre-defined timestamp :param timestamp: point in time at which the instance has to be created :param id: version 4 UUID unicode object. Usually this is not specified, it will be automatically created. :param forced_identity: version 4 UUID unicode object. For internal use only. :param kwargs: arguments needed for initializing the instance :return: an instance of the class """ id = Versionable.uuid(id) if forced_identity: ident = Versionable.uuid(forced_identity) else: ident = id if timestamp is None: timestamp = get_utc_now() kwargs['id'] = id kwargs['identity'] = ident kwargs['version_start_date'] = timestamp kwargs['version_birth_date'] = timestamp return super(VersionManager, self).create(**kwargs)
WARNING: Only for internal use and testing. Create a Versionable having a version_start_date and version_birth_date set to some pre-defined timestamp :param timestamp: point in time at which the instance has to be created :param id: version 4 UUID unicode object. Usually this is not specified, it will be automatically created. :param forced_identity: version 4 UUID unicode object. For internal use only. :param kwargs: arguments needed for initializing the instance :return: an instance of the class
def logout(self): """Logout of a vSphere server.""" if self._logged_in is True: self.si.flush_cache() self.sc.sessionManager.Logout() self._logged_in = False
Logout of a vSphere server.
def extract(group_id, access_token, fields=None): ''' FIXME: DOCS... Links: * https://developers.facebook.com/tools/explorer/ ''' fields = fields or ['id', 'owner', 'email', 'name', 'members'] # TEST that fields are a subset of valid fields assert set(fields).issubset(VALID_FIELDS) get_args = {'fields': ','.join(fields), 'access_token': access_token} get_args_str = '&'.join( ['{}={}'.format(x, y) for x, y in get_args.items()]) base_url = 'https://graph.facebook.com/{}/?{}'.format( group_id, get_args_str) logr.debug(' LOADING URL: {}'.format(base_url)) response = rq.get(base_url) # FIXME: check for errors # probably want to validate here somehow...? r_json = response.json() subscribers = r_json.get('members') return subscribers
FIXME: DOCS... Links: * https://developers.facebook.com/tools/explorer/
def remove_child_family(self, family_id, child_id): """Removes a child from a family. arg: family_id (osid.id.Id): the ``Id`` of a family arg: child_id (osid.id.Id): the ``Id`` of the new child raise: NotFound - ``family_id`` not a parent of ``child_id`` raise: NullArgument - ``family_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.remove_child_bin_template if self._catalog_session is not None: return self._catalog_session.remove_child_catalog(catalog_id=family_id, child_id=child_id) return self._hierarchy_session.remove_child(id_=family_id, child_id=child_id)
Removes a child from a family. arg: family_id (osid.id.Id): the ``Id`` of a family arg: child_id (osid.id.Id): the ``Id`` of the new child raise: NotFound - ``family_id`` not a parent of ``child_id`` raise: NullArgument - ``family_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def apply_templates(toks, templates): """ Generate features for an item sequence by applying feature templates. A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value. Generated features are stored in the 'F' field of each item in the sequence. Parameters ---------- toks: list of tokens A list of processed toknes. templates: list of template tuples (str, int) A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value. """ for template in templates: name = '|'.join(['%s[%d]' % (f, o) for f, o in template]) for t in range(len(toks)): values_list = [] for field, offset in template: p = t + offset if p < 0 or p >= len(toks): values_list = [] break if field in toks[p]: value = toks[p][field] values_list.append(value if isinstance(value, (set, list)) else [value]) if len(template) == len(values_list): for values in product(*values_list): toks[t]['F'].append('%s=%s' % (name, '|'.join(values)))
Generate features for an item sequence by applying feature templates. A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value. Generated features are stored in the 'F' field of each item in the sequence. Parameters ---------- toks: list of tokens A list of processed toknes. templates: list of template tuples (str, int) A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value.
def commit_branches(sha1): # type: (str) -> List[str] """ Get the name of the branches that this commit belongs to. """ cmd = 'git branch --contains {}'.format(sha1) return shell.run( cmd, capture=True, never_pretend=True ).stdout.strip().split()
Get the name of the branches that this commit belongs to.
def qhalfx(self): """get the half normal matrix attribute. Create the attribute if it has not yet been created Returns ------- qhalfx : pyemu.Matrix """ if self.__qhalfx is None: self.log("qhalfx") self.__qhalfx = self.qhalf * self.jco self.log("qhalfx") return self.__qhalfx
get the half normal matrix attribute. Create the attribute if it has not yet been created Returns ------- qhalfx : pyemu.Matrix
def get_end(pos, alt, category, snvend=None, svend=None, svlen=None): """Return the end coordinate for a variant Args: pos(int) alt(str) category(str) snvend(str) svend(int) svlen(int) Returns: end(int) """ # If nothing is known we set end to be same as start end = pos # If variant is snv or indel we know that cyvcf2 can handle end pos if category in ('snv', 'indel', 'cancer'): end = snvend # With SVs we have to be a bit more careful elif category == 'sv': # The END field from INFO usually works fine end = svend # For some cases like insertions the callers set end to same as pos # In those cases we can hope that there is a svlen... if svend == pos: if svlen: end = pos + svlen # If variant is 'BND' they have ':' in alt field # Information about other end is in the alt field if ':' in alt: match = BND_ALT_PATTERN.match(alt) if match: end = int(match.group(2)) return end
Return the end coordinate for a variant Args: pos(int) alt(str) category(str) snvend(str) svend(int) svlen(int) Returns: end(int)
def create_or_update_record(data, pid_type, id_key, minter): """Register a funder or grant.""" resolver = Resolver( pid_type=pid_type, object_type='rec', getter=Record.get_record) try: pid, record = resolver.resolve(data[id_key]) data_c = deepcopy(data) del data_c['remote_modified'] record_c = deepcopy(record) del record_c['remote_modified'] # All grants on OpenAIRE are modified periodically even if nothing # has changed. We need to check for actual differences in the metadata if data_c != record_c: record.update(data) record.commit() record_id = record.id db.session.commit() RecordIndexer().index_by_id(str(record_id)) except PIDDoesNotExistError: record = Record.create(data) record_id = record.id minter(record.id, data) db.session.commit() RecordIndexer().index_by_id(str(record_id))
Register a funder or grant.
def get_versioned_delete_collector_class(): """ Gets the class to use for deletion collection. :return: class """ key = 'VERSIONED_DELETE_COLLECTOR' try: cls = _cache[key] except KeyError: collector_class_string = getattr(settings, key) cls = import_from_string(collector_class_string, key) _cache[key] = cls return cls
Gets the class to use for deletion collection. :return: class
def check_marker_kwargs(self, kwargs): """ Check the types of the keyword arguments for marker creation :param kwargs: dictionary of options for marker creation :type kwargs: dict :raises: TypeError, ValueError """ text = kwargs.get("text", "") if not isinstance(text, str) and text is not None: raise TypeError("text argument is not of str type") for color in (item for item in (prefix + color for prefix in ["active_", "hover_", ""] for color in ["background", "foreground", "outline"])): value = kwargs.get(color, "") if value == "default": continue if not isinstance(value, str): raise TypeError("{} argument not of str type".format(color)) font = kwargs.get("font", ("default", 10)) if (not isinstance(font, tuple) or not len(font) > 0 or not isinstance(font[0], str)) and font != "default": raise ValueError("font argument is not a valid font tuple") for border in (prefix + "border" for prefix in ["active_", "hover_", ""]): border_v = kwargs.get(border, 0) if border_v == "default": continue if not isinstance(border_v, int) or border_v < 0: raise ValueError("{} argument is not of int type or smaller than zero".format(border)) iid = kwargs.get("iid", "-1") if not isinstance(iid, str): raise TypeError("iid argument not of str type") if iid == "": raise ValueError("iid argument empty string") for boolean_arg in ["move", "category_change", "allow_overlap", "snap_to_ticks"]: value = kwargs.get(boolean_arg, False) if value == "default": continue if not isinstance(value, bool): raise TypeError("{} argument is not of bool type".format(boolean_arg)) tags = kwargs.get("tags", ()) if not isinstance(tags, tuple): raise TypeError("tags argument is not of tuple type") for tag in tags: if not isinstance(tag, str): raise TypeError("one or more values in tags argument is not of str type") if tag not in self._tags: raise ValueError("unknown tag in tags argument")
Check the types of the keyword arguments for marker creation :param kwargs: dictionary of options for marker creation :type kwargs: dict :raises: TypeError, ValueError
def validate_extra_link(self, extra_link): """validate extra link""" if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link: raise Exception("Invalid extra.links format. " + "Extra link must include a 'name' and 'formatter' field") self.validated_formatter(extra_link[EXTRA_LINK_FORMATTER_KEY]) return extra_link
validate extra link
def blogroll(request, btype): 'View that handles the generation of blogrolls.' response, site, cachekey = initview(request) if response: return response[0] template = loader.get_template('feedjack/{0}.xml'.format(btype)) ctx = dict() fjlib.get_extra_context(site, ctx) ctx = Context(ctx) response = HttpResponse( template.render(ctx), content_type='text/xml; charset=utf-8' ) patch_vary_headers(response, ['Host']) fjcache.cache_set(site, cachekey, (response, ctx_get(ctx, 'last_modified'))) return response
View that handles the generation of blogrolls.
def visualize_detection(self, img, dets, classes=[], thresh=0.6): """ visualize detections in one image Parameters: ---------- img : numpy.array image, in bgr format dets : numpy.array ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) each row is one object classes : tuple or list of str class names thresh : float score threshold """ import matplotlib.pyplot as plt import random plt.imshow(img) height = img.shape[0] width = img.shape[1] colors = dict() for det in dets: (klass, score, x0, y0, x1, y1) = det if score < thresh: continue cls_id = int(klass) if cls_id not in colors: colors[cls_id] = (random.random(), random.random(), random.random()) xmin = int(x0 * width) ymin = int(y0 * height) xmax = int(x1 * width) ymax = int(y1 * height) rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, edgecolor=colors[cls_id], linewidth=3.5) plt.gca().add_patch(rect) class_name = str(cls_id) if classes and len(classes) > cls_id: class_name = classes[cls_id] plt.gca().text(xmin, ymin - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor=colors[cls_id], alpha=0.5), fontsize=12, color='white') plt.show()
visualize detections in one image Parameters: ---------- img : numpy.array image, in bgr format dets : numpy.array ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) each row is one object classes : tuple or list of str class names thresh : float score threshold
def bgc(mag_file, dir_path=".", input_dir_path="", meas_file='measurements.txt', spec_file='specimens.txt', samp_file='samples.txt', site_file='sites.txt', loc_file='locations.txt', append=False, location="unknown", site="", samp_con='1', specnum=0, meth_code="LP-NO", volume=12, user="", timezone='US/Pacific', noave=False): """ Convert BGC format file to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" append : bool append output files to existing files instead of overwrite, default False location : str location name, default "unknown" site : str site name, default "" samp_con : str sample/site naming convention, default '1', see info below specnum : int number of characters to designate a specimen, default 0 meth_code : str orientation method codes, default "LP-NO" e.g. [SO-MAG, SO-SUN, SO-SIGHT, ...] volume : float volume in ccs, default 12. user : str user name, default "" timezone : str timezone in pytz library format, default "US/Pacific" list of timezones can be found at http://pytz.sourceforge.net/ noave : bool do not average duplicate measurements, default False (so by default, DO average) Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY """ version_num = pmag.get_version() input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) samp_con = str(samp_con) specnum = - int(specnum) volume *= 1e-6 # convert cc to m^3 if "4" in samp_con: if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "option [4] must be in form 4-Z where Z is an integer" else: Z = int(samp_con.split("-")[1]) samp_con = "4" if "7" in samp_con: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "option [7] must be in form 7-Z where Z is an integer" else: Z = int(samp_con.split("-")[1]) samp_con = "7" else: Z = 1 # format variables mag_file = os.path.join(input_dir_path, mag_file) if not os.path.isfile(mag_file): print("%s is not a BGC file" % mag_file) return False, 'You must provide a BCG format file' # Open up the BGC file and read the header information print('mag_file in bgc_magic', mag_file) pre_data = open(mag_file, 'r') line = pre_data.readline() line_items = line.split(' ') specimen = line_items[2] specimen = specimen.replace('\n', '') line = pre_data.readline() line = pre_data.readline() line_items = line.split('\t') azimuth = float(line_items[1]) dip = float(line_items[2]) bed_dip = line_items[3] sample_bed_azimuth = line_items[4] lon = line_items[5] lat = line_items[6] tmp_volume = line_items[7] if tmp_volume != 0.0: volume = float(tmp_volume) * 1e-6 pre_data.close() data = pd.read_csv(mag_file, sep='\t', header=3, index_col=False) cart = np.array([data['X'], data['Y'], data['Z']]).transpose() direction = pmag.cart2dir(cart).transpose() data['dir_dec'] = direction[0] data['dir_inc'] = direction[1] # the data are in EMU - this converts to Am^2 data['magn_moment'] = direction[2] / 1000 data['magn_volume'] = (direction[2] / 1000) / \ volume # EMU - data converted to A/m # Configure the magic_measurements table MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] for rowNum, row in data.iterrows(): MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} if specnum != 0: sample = specimen[:specnum] else: sample = specimen if site == '': site = pmag.parse_site(sample, samp_con, Z) if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]: SpecRec['specimen'] = specimen SpecRec['sample'] = sample SpecRec['volume'] = volume SpecRec['analysts'] = user SpecRec['citations'] = 'This study' SpecRecs.append(SpecRec) if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample SampRec['site'] = site SampRec['azimuth'] = azimuth SampRec['dip'] = dip SampRec['bed_dip_direction'] = sample_bed_azimuth SampRec['bed_dip'] = bed_dip SampRec['method_codes'] = meth_code SampRec['analysts'] = user SampRec['citations'] = 'This study' SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRec['analysts'] = user SiteRec['citations'] = 'This study' SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec['analysts'] = user LocRec['citations'] = 'This study' LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) MeasRec['description'] = 'Date: ' + \ str(row['Date']) + ' Time: ' + str(row['Time']) if '.' in row['Date']: datelist = row['Date'].split('.') elif '/' in row['Date']: datelist = row['Date'].split('/') elif '-' in row['Date']: datelist = row['Date'].split('-') else: print( "unrecogized date formating on one of the measurement entries for specimen %s" % specimen) datelist = ['', '', ''] if ':' in row['Time']: timelist = row['Time'].split(':') else: print( "unrecogized time formating on one of the measurement entries for specimen %s" % specimen) timelist = ['', '', ''] datelist[2] = '19' + \ datelist[2] if len(datelist[2]) <= 2 else datelist[2] dt = ":".join([datelist[1], datelist[0], datelist[2], timelist[0], timelist[1], timelist[2]]) local = pytz.timezone(timezone) naive = datetime.datetime.strptime(dt, "%m:%d:%Y:%H:%M:%S") local_dt = local.localize(naive, is_dst=None) utc_dt = local_dt.astimezone(pytz.utc) timestamp = utc_dt.strftime("%Y-%m-%dT%H:%M:%S")+"Z" MeasRec["timestamp"] = timestamp MeasRec["citations"] = "This study" MeasRec['software_packages'] = version_num MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["quality"] = 'g' MeasRec["standard"] = 'u' MeasRec["treat_step_num"] = rowNum MeasRec["specimen"] = specimen MeasRec["treat_ac_field"] = '0' if row['DM Val'] == '0': meas_type = "LT-NO" elif int(row['DM Type']) > 0.0: meas_type = "LT-AF-Z" treat = float(row['DM Val']) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif int(row['DM Type']) == -1: meas_type = "LT-T-Z" treat = float(row['DM Val']) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin else: print("measurement type unknown:", row['DM Type'], " in row ", rowNum) MeasRec["magn_moment"] = str(row['magn_moment']) MeasRec["magn_volume"] = str(row['magn_volume']) MeasRec["dir_dec"] = str(row['dir_dec']) MeasRec["dir_inc"] = str(row['dir_inc']) MeasRec['method_codes'] = meas_type MeasRec['dir_csd'] = '0.0' # added due to magic.write error MeasRec['meas_n_orient'] = '1' # added due to magic.write error MeasRecs.append(MeasRec.copy()) con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) MeasOuts = pmag.measurements_methods3(MeasRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.write_table_to_file('specimens', custom_name=spec_file, append=append) con.write_table_to_file('samples', custom_name=samp_file, append=append) con.write_table_to_file('sites', custom_name=site_file, append=append) con.write_table_to_file('locations', custom_name=loc_file, append=append) meas_file = con.write_table_to_file( 'measurements', custom_name=meas_file, append=append) return True, meas_file
Convert BGC format file to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" append : bool append output files to existing files instead of overwrite, default False location : str location name, default "unknown" site : str site name, default "" samp_con : str sample/site naming convention, default '1', see info below specnum : int number of characters to designate a specimen, default 0 meth_code : str orientation method codes, default "LP-NO" e.g. [SO-MAG, SO-SUN, SO-SIGHT, ...] volume : float volume in ccs, default 12. user : str user name, default "" timezone : str timezone in pytz library format, default "US/Pacific" list of timezones can be found at http://pytz.sourceforge.net/ noave : bool do not average duplicate measurements, default False (so by default, DO average) Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
def get_calc_id(db, datadir, job_id=None): """ Return the latest calc_id by looking both at the datastore and the database. :param db: a :class:`openquake.server.dbapi.Db` instance :param datadir: the directory containing the datastores :param job_id: a job ID; if None, returns the latest job ID """ calcs = datastore.get_calc_ids(datadir) calc_id = 0 if not calcs else calcs[-1] if job_id is None: try: job_id = db('SELECT seq FROM sqlite_sequence WHERE name="job"', scalar=True) except NotFound: job_id = 0 return max(calc_id, job_id)
Return the latest calc_id by looking both at the datastore and the database. :param db: a :class:`openquake.server.dbapi.Db` instance :param datadir: the directory containing the datastores :param job_id: a job ID; if None, returns the latest job ID
def part(z, s): r"""Get the real or imaginary part of a complex number.""" if sage_included: if s == 1: return np.real(z) elif s == -1: return np.imag(z) elif s == 0: return z else: if s == 1: return z.real elif s == -1: return z.imag elif s == 0: return z
r"""Get the real or imaginary part of a complex number.
def convert_basis(basis_dict, fmt, header=None): ''' Returns the basis set data as a string representing the data in the specified output format ''' # make converters case insensitive fmt = fmt.lower() if fmt not in _converter_map: raise RuntimeError('Unknown basis set format "{}"'.format(fmt)) converter = _converter_map[fmt] # Determine if the converter supports all the types in the basis_dict if converter['valid'] is not None: ftypes = set(basis_dict['function_types']) if ftypes > converter['valid']: raise RuntimeError('Converter {} does not support all function types: {}'.format(fmt, str(ftypes))) # Actually do the conversion ret_str = converter['function'](basis_dict) if header is not None and fmt != 'json': comment_str = _converter_map[fmt]['comment'] header_str = comment_str + comment_str.join(header.splitlines(True)) ret_str = header_str + '\n\n' + ret_str # HACK - Psi4 requires the first non-comment line be spherical/cartesian # so we have to add that before the header if fmt == 'psi4': types = basis_dict['function_types'] harm_type = 'spherical' if 'spherical_gto' in types else 'cartesian' ret_str = harm_type + '\n\n' + ret_str return ret_str
Returns the basis set data as a string representing the data in the specified output format
def get_performance_signatures(self, project, **params): ''' Gets a set of performance signatures associated with a project and time range ''' results = self._get_json(self.PERFORMANCE_SIGNATURES_ENDPOINT, project, **params) return PerformanceSignatureCollection(results)
Gets a set of performance signatures associated with a project and time range
def args_to_inject(self, function, bindings, owner_key): """Inject arguments into a function. :param function: The function. :param bindings: Map of argument name to binding key to inject. :param owner_key: A key uniquely identifying the *scope* of this function. For a method this will be the owning class. :returns: Dictionary of resolved arguments. """ dependencies = {} key = (owner_key, function, tuple(sorted(bindings.items()))) def repr_key(k): owner_key, function, bindings = k return '%s.%s(injecting %s)' % (tuple(map(_describe, k[:2])) + (dict(k[2]),)) log.debug('%sProviding %r for %r', self._log_prefix, bindings, function) if key in self._stack: raise CircularDependency( 'circular dependency detected: %s -> %s' % (' -> '.join(map(repr_key, self._stack)), repr_key(key)) ) self._stack += (key,) try: for arg, key in bindings.items(): try: instance = self.get(key.interface) except UnsatisfiedRequirement as e: if not e.args[0]: e = UnsatisfiedRequirement(owner_key, e.args[1]) raise e dependencies[arg] = instance finally: self._stack = tuple(self._stack[:-1]) return dependencies
Inject arguments into a function. :param function: The function. :param bindings: Map of argument name to binding key to inject. :param owner_key: A key uniquely identifying the *scope* of this function. For a method this will be the owning class. :returns: Dictionary of resolved arguments.
def get_newsentry_meta_description(newsentry): """Returns the meta description for the given entry.""" if newsentry.meta_description: return newsentry.meta_description # If there is no seo addon found, take the info from the placeholders text = newsentry.get_description() if len(text) > 160: return u'{}...'.format(text[:160]) return text
Returns the meta description for the given entry.
def main(search, query): """main function that does the search""" url = search.search(query) print(url) search.open_page(url)
main function that does the search
def get_um(method_name, response=False): """Get protobuf for given method name :param method_name: full method name (e.g. ``Player.GetGameBadgeLevels#1``) :type method_name: :class:`str` :param response: whether to return proto for response or request :type response: :class:`bool` :return: protobuf message """ key = (method_name, response) if key not in method_lookup: match = re.findall(r'^([a-z]+)\.([a-z]+)#(\d)?$', method_name, re.I) if not match: return None interface, method, version = match[0] if interface not in service_lookup: return None package = import_module(service_lookup[interface]) service = getattr(package, interface, None) if service is None: return None for method_desc in service.GetDescriptor().methods: name = "%s.%s#%d" % (interface, method_desc.name, 1) method_lookup[(name, False)] = getattr(package, method_desc.input_type.full_name, None) method_lookup[(name, True)] = getattr(package, method_desc.output_type.full_name, None) return method_lookup[key]
Get protobuf for given method name :param method_name: full method name (e.g. ``Player.GetGameBadgeLevels#1``) :type method_name: :class:`str` :param response: whether to return proto for response or request :type response: :class:`bool` :return: protobuf message
def thumbnail(self): """Path to the thumbnail image (relative to the album directory).""" if not isfile(self.thumb_path): self.logger.debug('Generating thumbnail for %r', self) path = (self.dst_path if os.path.exists(self.dst_path) else self.src_path) try: # if thumbnail is missing (if settings['make_thumbs'] is False) s = self.settings if self.type == 'image': image.generate_thumbnail( path, self.thumb_path, s['thumb_size'], fit=s['thumb_fit']) elif self.type == 'video': video.generate_thumbnail( path, self.thumb_path, s['thumb_size'], s['thumb_video_delay'], fit=s['thumb_fit'], converter=s['video_converter']) except Exception as e: self.logger.error('Failed to generate thumbnail: %s', e) return return url_from_path(self.thumb_name)
Path to the thumbnail image (relative to the album directory).
def Parse(conditions): """Parses the file finder condition types into the condition objects. Args: conditions: An iterator over `FileFinderCondition` objects. Yields: `MetadataCondition` objects that correspond to the file-finder conditions. """ kind = rdf_file_finder.FileFinderCondition.Type classes = { kind.MODIFICATION_TIME: ModificationTimeCondition, kind.ACCESS_TIME: AccessTimeCondition, kind.INODE_CHANGE_TIME: InodeChangeTimeCondition, kind.SIZE: SizeCondition, kind.EXT_FLAGS: ExtFlagsCondition, } for condition in conditions: try: yield classes[condition.condition_type](condition) except KeyError: pass
Parses the file finder condition types into the condition objects. Args: conditions: An iterator over `FileFinderCondition` objects. Yields: `MetadataCondition` objects that correspond to the file-finder conditions.
def parse(self, data, extent): # type: (bytes, int) -> None ''' Parse the passed in data into a UDF Descriptor tag. Parameters: data - The data to parse. extent - The extent to compare against for the tag location. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Tag already initialized') (self.tag_ident, self.desc_version, tag_checksum, reserved, self.tag_serial_number, desc_crc, self.desc_crc_length, self.tag_location) = struct.unpack_from(self.FMT, data, 0) if reserved != 0: raise pycdlibexception.PyCdlibInvalidISO('Reserved data not 0!') if _compute_csum(data[:16]) != tag_checksum: raise pycdlibexception.PyCdlibInvalidISO('Tag checksum does not match!') if self.tag_location != extent: # In theory, we should abort (throw an exception) if we see that a # tag location that doesn't match an actual location. However, we # have seen UDF ISOs in the wild (most notably PS2 GT4 ISOs) that # have an invalid tag location for the second anchor and File Set # Terminator. So that we can support those ISOs, just silently # fix it up. We lose a little bit of detection of whether this is # "truly" a UDFTag, but it is really not a big risk. self.tag_location = extent if self.desc_version not in (2, 3): raise pycdlibexception.PyCdlibInvalidISO('Tag version not 2 or 3') if (len(data) - 16) < self.desc_crc_length: raise pycdlibexception.PyCdlibInternalError('Not enough CRC bytes to compute (expected at least %d, got %d)' % (self.desc_crc_length, len(data) - 16)) if desc_crc != crc_ccitt(data[16:16 + self.desc_crc_length]): raise pycdlibexception.PyCdlibInvalidISO('Tag CRC does not match!') self._initialized = True
Parse the passed in data into a UDF Descriptor tag. Parameters: data - The data to parse. extent - The extent to compare against for the tag location. Returns: Nothing.
def GetPatternIdTripDict(self): """Return a dictionary that maps pattern_id to a list of Trip objects.""" d = {} for t in self._trips: d.setdefault(t.pattern_id, []).append(t) return d
Return a dictionary that maps pattern_id to a list of Trip objects.
def batch_get_documents( self, database, documents, mask=None, transaction=None, new_transaction=None, read_time=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "batch_get_documents" not in self._inner_api_calls: self._inner_api_calls[ "batch_get_documents" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_get_documents, default_retry=self._method_configs["BatchGetDocuments"].retry, default_timeout=self._method_configs["BatchGetDocuments"].timeout, client_info=self._client_info, ) # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. google.api_core.protobuf_helpers.check_oneof( transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) request = firestore_pb2.BatchGetDocumentsRequest( database=database, documents=documents, mask=mask, transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["batch_get_documents"]( request, retry=retry, timeout=timeout, metadata=metadata )
Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def is_micropython_usb_device(port): """Checks a USB device to see if it looks like a MicroPython device. """ if type(port).__name__ == 'Device': # Assume its a pyudev.device.Device if ('ID_BUS' not in port or port['ID_BUS'] != 'usb' or 'SUBSYSTEM' not in port or port['SUBSYSTEM'] != 'tty'): return False usb_id = 'usb vid:pid={}:{}'.format(port['ID_VENDOR_ID'], port['ID_MODEL_ID']) else: # Assume its a port from serial.tools.list_ports.comports() usb_id = port[2].lower() # We don't check the last digit of the PID since there are 3 possible # values. if usb_id.startswith('usb vid:pid=f055:980'): return True # Check for Teensy VID:PID if usb_id.startswith('usb vid:pid=16c0:0483'): return True return False
Checks a USB device to see if it looks like a MicroPython device.
def print_validation_errors(result): """ Accepts validation result object and prints report (in red)""" click.echo(red('\nValidation failed:')) click.echo(red('-' * 40)) messages = result.get_messages() for property in messages.keys(): click.echo(yellow(property + ':')) for error in messages[property]: click.echo(red('* ' + error)) click.echo('')
Accepts validation result object and prints report (in red)
def remove_widget(self, widget): """Remove the given widget from the tooltip :param widget: the widget to remove :type widget: QtGui.QWidget :returns: None :rtype: None :raises: KeyError """ button = self._buttons.pop(widget) self.layout().removeWidget(button) button.deleteLater()
Remove the given widget from the tooltip :param widget: the widget to remove :type widget: QtGui.QWidget :returns: None :rtype: None :raises: KeyError
def _traverse_repos(self, callback, repo_name=None): ''' Traverse through all repo files and apply the functionality provided in the callback to them ''' repo_files = [] if os.path.exists(self.opts['spm_repos_config']): repo_files.append(self.opts['spm_repos_config']) for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])): for repo_file in filenames: if not repo_file.endswith('.repo'): continue repo_files.append(repo_file) for repo_file in repo_files: repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file) with salt.utils.files.fopen(repo_path) as rph: repo_data = salt.utils.yaml.safe_load(rph) for repo in repo_data: if repo_data[repo].get('enabled', True) is False: continue if repo_name is not None and repo != repo_name: continue callback(repo, repo_data[repo])
Traverse through all repo files and apply the functionality provided in the callback to them
def login_failures(user): ''' Query for all accounts which have 3 or more login failures. CLI Example: .. code-block:: bash salt <minion_id> shadow.login_failures ALL ''' cmd = 'lsuser -a unsuccessful_login_count {0}'.format(user) cmd += " | grep -E 'unsuccessful_login_count=([3-9]|[0-9][0-9]+)'" out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True) ret = [] lines = out['stdout'].splitlines() for line in lines: ret.append(line.split()[0]) return ret
Query for all accounts which have 3 or more login failures. CLI Example: .. code-block:: bash salt <minion_id> shadow.login_failures ALL
def lookup(self, mbid, include=()): """ Lookup an entity directly from a specified :term:`MBID`\ . """ if include: for included in include: if included not in self.available_includes: raise ValueError( "{0!r} is not an includable entity for {1}".format( included, self.path, ), ) query_string = "?" + urlencode([("inc", " ".join(include))]) else: query_string = "" path = "{0}/{1}{2}".format(self.path, mbid, query_string) return self.client.request(path)
Lookup an entity directly from a specified :term:`MBID`\ .
def listDataTypes(self, datatype="", dataset=""): """ API to list data types known to dbs (when no parameter supplied). :param dataset: Returns data type (of primary dataset) of the dataset (Optional) :type dataset: str :param datatype: List specific data type :type datatype: str :returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type) :rtype: list of dicts """ try: return self.dbsDataType.listDataType(dataType=datatype, dataset=dataset) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listDataTypes. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
API to list data types known to dbs (when no parameter supplied). :param dataset: Returns data type (of primary dataset) of the dataset (Optional) :type dataset: str :param datatype: List specific data type :type datatype: str :returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type) :rtype: list of dicts
def _freeze_relations(self, relations): """Freeze relation.""" if relations: sel = relations[0] sel.relations.extend(relations[1:]) return ct.SelectorList([sel.freeze()]) else: return ct.SelectorList()
Freeze relation.
def addports(self): """ Look through the list of service ports and construct a list of tuples where each tuple is used to describe a port and it's list of methods as: (port, [method]). Each method is tuple: (name, [pdef,..] where each pdef is a tuple: (param-name, type). """ timer = metrics.Timer() timer.start() for port in self.service.ports: p = self.findport(port) for op in port.binding.operations.values(): m = p[0].method(op.name) binding = m.binding.input method = (m.name, binding.param_defs(m)) p[1].append(method) metrics.log.debug("method '%s' created: %s", m.name, timer) p[1].sort() timer.stop()
Look through the list of service ports and construct a list of tuples where each tuple is used to describe a port and it's list of methods as: (port, [method]). Each method is tuple: (name, [pdef,..] where each pdef is a tuple: (param-name, type).
def discover(self, metafile): """ Determine what summary stats, time series, and CDF csv exist for the reports that need to be diffed. :return: boolean: return whether the summary stats / time series / CDF csv summary was successfully located """ for report in self.reports: if report.remote_location == 'local': if naarad.utils.is_valid_file(os.path.join(os.path.join(report.location, self.resource_path), metafile)): with open(os.path.join(os.path.join(report.location, self.resource_path), metafile), 'r') as meta_file: if metafile == CONSTANTS.STATS_CSV_LIST_FILE: report.stats = meta_file.readlines()[0].split(',') elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE: report.datasource = meta_file.readlines()[0].split(',') elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE: report.cdf_datasource = meta_file.readlines()[0].split(',') else: report.status = 'NO_SUMMARY_STATS' self.status = 'ERROR' logger.error('Unable to access summary stats file for report :%s', report.label) return False else: stats_url = report.remote_location + '/' + self.resource_path + '/' + metafile meta_file_data = naarad.httpdownload.stream_url(stats_url) if meta_file_data: if metafile == CONSTANTS.STATS_CSV_LIST_FILE: report.stats = meta_file_data.split(',') elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE: report.datasource = meta_file_data.split(',') elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE: report.cdf_datasource = meta_file_data.split(',') else: report.status = 'NO_SUMMARY_STATS' self.status = 'ERROR' logger.error('No summary stats available for report :%s', report.label) return False return True
Determine what summary stats, time series, and CDF csv exist for the reports that need to be diffed. :return: boolean: return whether the summary stats / time series / CDF csv summary was successfully located
def uimports(code): """ converts CPython module names into MicroPython equivalents """ for uimport in UIMPORTLIST: uimport = bytes(uimport, 'utf8') code = code.replace(uimport, b'u' + uimport) return code
converts CPython module names into MicroPython equivalents
def bios_image(self, bios_image): """ Sets the bios image for this QEMU VM. :param bios_image: QEMU bios image path """ self._bios_image = self.manager.get_abs_image_path(bios_image) log.info('QEMU VM "{name}" [{id}] has set the QEMU bios image path to {bios_image}'.format(name=self._name, id=self._id, bios_image=self._bios_image))
Sets the bios image for this QEMU VM. :param bios_image: QEMU bios image path
def names(self): """Names, by which the instance can be retrieved.""" if getattr(self, 'key', None) is None: result = [] else: result = [self.key] if hasattr(self, 'aliases'): result.extend(self.aliases) return result
Names, by which the instance can be retrieved.
def url(self): """The url for this collection.""" if self.parent is None: # TODO: differing API Versions? pieces = [self.client.base_url, 'api', 'atlas', 'v2'] else: pieces = [self.parent.url] pieces.append(self.model_class.path) return '/'.join(pieces)
The url for this collection.
def translate_latex2unicode(text, kb_file=None): """Translate latex text to unicode. This function will take given text, presumably containing LaTeX symbols, and attempts to translate it to Unicode using the given or default KB translation table located under CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb. The translated Unicode string will then be returned. If the translation table and compiled regular expression object is not previously generated in the current session, they will be. :param text: a text presumably containing LaTeX symbols. :type text: string :param kb_file: full path to file containing latex2unicode translations. Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb :type kb_file: string :return: Unicode representation of translated text :rtype: unicode """ if kb_file is None: kb_file = get_kb_filename() # First decode input text to Unicode try: text = decode_to_unicode(text) except UnicodeDecodeError: text = unicode(wash_for_utf8(text)) # Load translation table, if required if CFG_LATEX_UNICODE_TRANSLATION_CONST == {}: _load_latex2unicode_constants(kb_file) # Find all matches and replace text for match in CFG_LATEX_UNICODE_TRANSLATION_CONST['regexp_obj'] \ .finditer(text): # If LaTeX style markers {, } and $ are before or after the # matching text, it will replace those as well text = re.sub("[\{\$]?%s[\}\$]?" % (re.escape(match.group()),), CFG_LATEX_UNICODE_TRANSLATION_CONST[ 'table'][match.group()], text) # Return Unicode representation of translated text return text
Translate latex text to unicode. This function will take given text, presumably containing LaTeX symbols, and attempts to translate it to Unicode using the given or default KB translation table located under CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb. The translated Unicode string will then be returned. If the translation table and compiled regular expression object is not previously generated in the current session, they will be. :param text: a text presumably containing LaTeX symbols. :type text: string :param kb_file: full path to file containing latex2unicode translations. Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb :type kb_file: string :return: Unicode representation of translated text :rtype: unicode
def extendedEuclid(a, b): """return a tuple of three values: x, y and z, such that x is the GCD of a and b, and x = y * a + z * b""" if a == 0: return b, 0, 1 else: g, y, x = extendedEuclid(b % a, a) return g, x - (b // a) * y, y
return a tuple of three values: x, y and z, such that x is the GCD of a and b, and x = y * a + z * b
def soma_points(self): '''Get the soma points''' db = self.data_block return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
Get the soma points
def decrypt(self, ciphertext): """Return plaintext for given ciphertext.""" # String to bytes. cipherbytes = ciphertext.encode('utf8') # Decode from Base64. try: combined = base64.b64decode(cipherbytes) except (base64.binascii.Error, TypeError) as e: # base64.binascii.Error for Python 3. # TypeError for Python 2. raise DataIntegrityError("Cipher text is damaged: {}".format(e)) # Split out the nonce, tag, and encrypted data. nonce = combined[:12] if len(nonce) != 12: raise DataIntegrityError("Cipher text is damaged: invalid nonce length") tag = combined[12:28] if len(tag) != 16: raise DataIntegrityError("Cipher text is damaged: invalid tag length") encrypted = combined[28:] # Construct AES cipher, with old nonce. cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce) # Decrypt and verify. try: compressed = cipher.decrypt_and_verify(encrypted, tag) except ValueError as e: raise DataIntegrityError("Cipher text is damaged: {}".format(e)) # Decompress plaintext bytes. plainbytes = zlib.decompress(compressed) # Bytes to string. plaintext = plainbytes.decode('utf8') # Return plaintext. return plaintext
Return plaintext for given ciphertext.
def fetch_access_token_by_client_credentials(self): ''' There are three ways to let you start using KKBOX's Open/Partner API. The first way among them is to generate a client credential to fetch an access token to let KKBOX identify you. It allows you to access public data from KKBOX such as public albums, playlists and so on. However, you cannot use client credentials to access private data of a user. You have to let users to log-in into KKBOX and grant permissions for you to do so. You cannot use client credentials to do media playback either, since it requires a Premium Membership. :return: an access token :rtype: :class:`kkbox_sdk.KKBOXAccessToken` See `https://docs-en.kkbox.codes/docs/appendix-a`. ''' client_credential_base = '%s:%s' % (self.client_id, self.client_secret) try: client_credentials = base64.b64encode( bytes(client_credential_base, 'utf-8')) except: client_credentials = base64.b64encode(client_credential_base) client_credentials = client_credentials.decode('utf-8') headers = {'Authorization': 'Basic ' + client_credentials, 'Content-type': 'application/x-www-form-urlencoded'} post_parameters = {'grant_type': 'client_credentials', 'scope': 'user_profile user_territory'} json_object = self.http._post_data(KKBOXOAuth.OAUTH_TOKEN_URL, post_parameters, headers) self.access_token = KKBOXAccessToken(**json_object) return self.access_token
There are three ways to let you start using KKBOX's Open/Partner API. The first way among them is to generate a client credential to fetch an access token to let KKBOX identify you. It allows you to access public data from KKBOX such as public albums, playlists and so on. However, you cannot use client credentials to access private data of a user. You have to let users to log-in into KKBOX and grant permissions for you to do so. You cannot use client credentials to do media playback either, since it requires a Premium Membership. :return: an access token :rtype: :class:`kkbox_sdk.KKBOXAccessToken` See `https://docs-en.kkbox.codes/docs/appendix-a`.
def pseudosection(self, column='r', filename=None, log10=False, **kwargs): """Plot a pseudosection of the given column. Note that this function only works with dipole-dipole data at the moment. Parameters ---------- column : string, optional Column to plot into the pseudosection, default: r filename : string, optional if not None, save the resulting figure directory to disc log10 : bool, optional if True, then plot values in log10, default: False **kwargs : dict all additional parameters are directly provided to :py:func:`reda.plotters.pseudoplots.PS.plot_pseudosection_type2` Returns ------- fig : :class:`matplotlib.Figure` matplotlib figure object ax : :class:`matplotlib.axes` matplotlib axes object cb : colorbar object matplotlib colorbar object """ fig, ax, cb = PS.plot_pseudosection_type2( self.data, column=column, log10=log10, **kwargs ) if filename is not None: fig.savefig(filename, dpi=300) return fig, ax, cb
Plot a pseudosection of the given column. Note that this function only works with dipole-dipole data at the moment. Parameters ---------- column : string, optional Column to plot into the pseudosection, default: r filename : string, optional if not None, save the resulting figure directory to disc log10 : bool, optional if True, then plot values in log10, default: False **kwargs : dict all additional parameters are directly provided to :py:func:`reda.plotters.pseudoplots.PS.plot_pseudosection_type2` Returns ------- fig : :class:`matplotlib.Figure` matplotlib figure object ax : :class:`matplotlib.axes` matplotlib axes object cb : colorbar object matplotlib colorbar object
def content_children(self): """ A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld). """ text_types = {CT_RegularTextRun, CT_TextLineBreak, CT_TextField} return tuple(elm for elm in self if type(elm) in text_types)
A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld).
def get_pool_details(self, pool_id): """ Method to return object pool by id Param pool_id: pool id Returns object pool """ uri = 'api/v3/pool/details/%s/' % pool_id return super(ApiPool, self).get(uri)
Method to return object pool by id Param pool_id: pool id Returns object pool
def MatrixTriangularSolve(a, rhs, lower, adj): """ Matrix triangular solve op. """ trans = 0 if not adj else 2 r = np.empty(rhs.shape).astype(a.dtype) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.conj(a[pos]), rhs[pos], trans=trans, lower=lower) return r,
Matrix triangular solve op.
def print_file(self, f=sys.stdout, file_format="mwtab"): """Print :class:`~mwtab.mwtab.MWTabFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `mwtab` or `json`. :param f: Print to file or stdout. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "mwtab": for key in self: if key == "SUBJECT_SAMPLE_FACTORS": print("#SUBJECT_SAMPLE_FACTORS: \tSUBJECT(optional)[tab]SAMPLE[tab]FACTORS(NAME:VALUE pairs separated by |)[tab]Additional sample data", file=f) elif key == "METABOLOMICS WORKBENCH": print(self.header, file=f) else: print("#{}".format(key), file=f) self.print_block(key, f=f, file_format=file_format) print("#END", file=f) elif file_format == "json": print(self._to_json(), file=f)
Print :class:`~mwtab.mwtab.MWTabFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `mwtab` or `json`. :param f: Print to file or stdout. :param int tw: Tab width. :return: None :rtype: :py:obj:`None`
def unindex_template(self, tpl): """ Unindex a template from the `templates` container. :param tpl: The template to un-index :type tpl: alignak.objects.item.Item :return: None """ name = getattr(tpl, 'name', '') try: del self.name_to_template[name] except KeyError: # pragma: no cover, simple protection pass
Unindex a template from the `templates` container. :param tpl: The template to un-index :type tpl: alignak.objects.item.Item :return: None
def avl_release_parent(node): """ removes the parent of a child """ parent = node.parent if parent is not None: if parent.right is node: parent.right = None elif parent.left is node: parent.left = None else: raise AssertionError('impossible state') node.parent = None parent.balance = max(height(parent.right), height(parent.left)) + 1 return node, parent
removes the parent of a child
def get_K(rho, z, alpha=1.0, zint=100.0, n2n1=0.95, get_hdet=False, K=1, Kprefactor=None, return_Kprefactor=False, npts=20, **kwargs): """ Calculates one of three electric field integrals. Internal function for calculating point spread functions. Returns one of three electric field integrals that describe the electric field near the focus of a lens; these integrals appear in Hell's psf calculation. Parameters ---------- rho : numpy.ndarray Rho in cylindrical coordinates, in units of 1/k. z : numpy.ndarray Z in cylindrical coordinates, in units of 1/k. `rho` and `z` must be the same shape alpha : Float, optional The acceptance angle of the lens, on (0,pi/2). Default is 1. zint : Float, optional The distance of the len's unaberrated focal point from the optical interface, in units of 1/k. Default is 100. n2n1 : Float, optional The ratio n2/n1 of the index mismatch between the sample (index n2) and the optical train (index n1). Must be on [0,inf) but should be near 1. Default is 0.95 get_hdet : Bool, optional Set to True to get the detection portion of the psf; False to get the illumination portion of the psf. Default is True K : {1, 2, 3}, optional Which of the 3 integrals to evaluate. Default is 1 Kprefactor : numpy.ndarray or None This array is calculated internally and optionally returned; pass it back to avoid recalculation and increase speed. Default is None, i.e. calculate it internally. return_Kprefactor : Bool, optional Set to True to also return the Kprefactor (parameter above) to speed up the calculation for the next values of K. Default is False npts : Int, optional The number of points to use for Gauss-Legendre quadrature of the integral. Default is 20, which is a good number for x,y,z less than 100 or so. Returns ------- kint : numpy.ndarray The integral K_i; rho.shape numpy.array [, Kprefactor] : numpy.ndarray The prefactor that is independent of which integral is being calculated but does depend on the parameters; can be passed back to the function for speed. Notes ----- npts=20 gives double precision (no difference between 20, 30, and doing all the integrals with scipy.quad). The integrals are only over the acceptance angle of the lens, so for moderate x,y,z they don't vary too rapidly. For x,y,z, zint large compared to 100, a higher npts might be necessary. """ # Comments: # This is the only function that relies on rho,z being numpy.arrays, # and it's just in a flag that I've added.... move to psf? if type(rho) != np.ndarray or type(z) != np.ndarray or (rho.shape != z.shape): raise ValueError('rho and z must be np.arrays of same shape.') pts, wts = np.polynomial.legendre.leggauss(npts) n1n2 = 1.0/n2n1 rr = np.ravel(rho) zr = np.ravel(z) #Getting the array of points to quad at cos_theta = 0.5*(1-np.cos(alpha))*pts+0.5*(1+np.cos(alpha)) #[cos_theta,rho,z] if Kprefactor is None: Kprefactor = get_Kprefactor(z, cos_theta, zint=zint, \ n2n1=n2n1,get_hdet=get_hdet, **kwargs) if K==1: part_1 = j0(np.outer(rr,np.sqrt(1-cos_theta**2)))*\ np.outer(np.ones_like(rr), 0.5*(get_taus(cos_theta,n2n1=n2n1)+\ get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2)))) integrand = Kprefactor * part_1 elif K==2: part_2=j2(np.outer(rr,np.sqrt(1-cos_theta**2)))*\ np.outer(np.ones_like(rr),0.5*(get_taus(cos_theta,n2n1=n2n1)-\ get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2)))) integrand = Kprefactor * part_2 elif K==3: part_3=j1(np.outer(rho,np.sqrt(1-cos_theta**2)))*\ np.outer(np.ones_like(rr), n1n2*get_taup(cos_theta,n2n1=n2n1)*\ np.sqrt(1-cos_theta**2)) integrand = Kprefactor * part_3 else: raise ValueError('K=1,2,3 only...') big_wts=np.outer(np.ones_like(rr), wts) kint = (big_wts*integrand).sum(axis=1) * 0.5*(1-np.cos(alpha)) if return_Kprefactor: return kint.reshape(rho.shape), Kprefactor else: return kint.reshape(rho.shape)
Calculates one of three electric field integrals. Internal function for calculating point spread functions. Returns one of three electric field integrals that describe the electric field near the focus of a lens; these integrals appear in Hell's psf calculation. Parameters ---------- rho : numpy.ndarray Rho in cylindrical coordinates, in units of 1/k. z : numpy.ndarray Z in cylindrical coordinates, in units of 1/k. `rho` and `z` must be the same shape alpha : Float, optional The acceptance angle of the lens, on (0,pi/2). Default is 1. zint : Float, optional The distance of the len's unaberrated focal point from the optical interface, in units of 1/k. Default is 100. n2n1 : Float, optional The ratio n2/n1 of the index mismatch between the sample (index n2) and the optical train (index n1). Must be on [0,inf) but should be near 1. Default is 0.95 get_hdet : Bool, optional Set to True to get the detection portion of the psf; False to get the illumination portion of the psf. Default is True K : {1, 2, 3}, optional Which of the 3 integrals to evaluate. Default is 1 Kprefactor : numpy.ndarray or None This array is calculated internally and optionally returned; pass it back to avoid recalculation and increase speed. Default is None, i.e. calculate it internally. return_Kprefactor : Bool, optional Set to True to also return the Kprefactor (parameter above) to speed up the calculation for the next values of K. Default is False npts : Int, optional The number of points to use for Gauss-Legendre quadrature of the integral. Default is 20, which is a good number for x,y,z less than 100 or so. Returns ------- kint : numpy.ndarray The integral K_i; rho.shape numpy.array [, Kprefactor] : numpy.ndarray The prefactor that is independent of which integral is being calculated but does depend on the parameters; can be passed back to the function for speed. Notes ----- npts=20 gives double precision (no difference between 20, 30, and doing all the integrals with scipy.quad). The integrals are only over the acceptance angle of the lens, so for moderate x,y,z they don't vary too rapidly. For x,y,z, zint large compared to 100, a higher npts might be necessary.
def _process_state_embryo(self, job_record): """ method that takes care of processing job records in STATE_EMBRYO state""" uow, is_duplicate = self.insert_and_publish_uow(job_record, 0, 0) self.update_job(job_record, uow, job.STATE_IN_PROGRESS)
method that takes care of processing job records in STATE_EMBRYO state
def add(self, field, op=None, val=None): """Update report fields to include new one, if it doesn't already. :param field: The field to include :type field: Field :param op: Operation :type op: ConstraintOperator :return: None """ if field.has_subfield(): self._fields[field.full_name] = 1 else: self._fields[field.name] = 1 if op and op.is_size() and not op.is_variable(): # get minimal part of array with slicing, # but cannot use slice with variables self._slices[field.name] = val + 1 if op and op.is_variable(): # add the variable too self._fields[val] = 1
Update report fields to include new one, if it doesn't already. :param field: The field to include :type field: Field :param op: Operation :type op: ConstraintOperator :return: None
def month_days(year, month): '''How many days are in a given month of a given year''' if month > 13: raise ValueError("Incorrect month index") # First of all, dispose of fixed-length 29 day months if month in (IYYAR, TAMMUZ, ELUL, TEVETH, VEADAR): return 29 # If it's not a leap year, Adar has 29 days if month == ADAR and not leap(year): return 29 # If it's Heshvan, days depend on length of year if month == HESHVAN and (year_days(year) % 10) != 5: return 29 # Similarly, Kislev varies with the length of year if month == KISLEV and (year_days(year) % 10) == 3: return 29 # Nope, it's a 30 day month return 30
How many days are in a given month of a given year
def transforms(self) -> Mapping[Type, Iterable[Type]]: """The available data transformers.""" try: return getattr(self.__class__, "transform")._transforms except AttributeError: return {}
The available data transformers.
def get_template_names(self): """Returns the template name to use for this request.""" if self.request.is_ajax(): template = self.ajax_template_name else: template = self.template_name return template
Returns the template name to use for this request.
def _slice_area_from_bbox(self, src_area, dst_area, ll_bbox=None, xy_bbox=None): """Slice the provided area using the bounds provided.""" if ll_bbox is not None: dst_area = AreaDefinition( 'crop_area', 'crop_area', 'crop_latlong', {'proj': 'latlong'}, 100, 100, ll_bbox) elif xy_bbox is not None: dst_area = AreaDefinition( 'crop_area', 'crop_area', 'crop_xy', src_area.proj_dict, src_area.x_size, src_area.y_size, xy_bbox) x_slice, y_slice = src_area.get_area_slices(dst_area) return src_area[y_slice, x_slice], y_slice, x_slice
Slice the provided area using the bounds provided.
def configure(args, parser): """ Color guide - red: Error and warning messages - green: Welcome messages (use sparingly) - blue: Default values - bold_magenta: Action items - bold_black: Parts of code to be run or copied that should be modified """ if not args.force and on_travis(): parser.error(red("doctr appears to be running on Travis. Use " "doctr configure --force to run anyway.")) if not args.authenticate: args.upload_key = False if args.travis_tld: if args.travis_tld in ['c', 'com', '.com', 'travis-ci.com']: args.travis_tld = 'travis-ci.com' else: args.travis_tld = 'travis-ci.org' print(green(dedent("""\ Welcome to Doctr. We need to ask you a few questions to get you on your way to automatically deploying from Travis CI to GitHub pages. """))) login_kwargs = {} if args.authenticate: while not login_kwargs: try: login_kwargs = GitHub_login() except AuthenticationFailed as e: print(red(e)) else: login_kwargs = {'auth': None, 'headers': None} GitHub_token = None get_build_repo = False default_repo = guess_github_repo() while not get_build_repo: try: if default_repo: build_repo = input("What repo do you want to build the docs for? [{default_repo}] ".format(default_repo=blue(default_repo))) if not build_repo: build_repo = default_repo else: build_repo = input("What repo do you want to build the docs for (org/reponame, like 'drdoctr/doctr')? ") is_private = check_repo_exists(build_repo, service='github', **login_kwargs)['private'] if is_private and not args.authenticate: sys.exit(red("--no-authenticate is not supported for private repositories.")) headers = {} travis_token = None if is_private: if args.token: GitHub_token = generate_GitHub_token(note="Doctr token for pushing to gh-pages from Travis (for {build_repo}).".format(build_repo=build_repo), scopes=["read:org", "user:email", "repo"], **login_kwargs)['token'] travis_token = get_travis_token(GitHub_token=GitHub_token, **login_kwargs) headers['Authorization'] = "token {}".format(travis_token) service = args.travis_tld if args.travis_tld else 'travis' c = check_repo_exists(build_repo, service=service, ask=True, headers=headers) tld = c['service'][-4:] is_private = c['private'] or is_private if is_private and not args.authenticate: sys.exit(red("--no-authenticate is not supported for private repos.")) get_build_repo = True except GitHubError: raise except RuntimeError as e: print(red('\n{!s:-^{}}\n'.format(e, 70))) get_deploy_repo = False while not get_deploy_repo: try: deploy_repo = input("What repo do you want to deploy the docs to? [{build_repo}] ".format(build_repo=blue(build_repo))) if not deploy_repo: deploy_repo = build_repo if deploy_repo != build_repo: check_repo_exists(deploy_repo, service='github', **login_kwargs) get_deploy_repo = True except GitHubError: raise except RuntimeError as e: print(red('\n{!s:-^{}}\n'.format(e, 70))) N = IncrementingInt(1) header = green("\n================== You should now do the following ==================\n") if args.token: if not GitHub_token: GitHub_token = generate_GitHub_token(**login_kwargs)['token'] encrypted_variable = encrypt_variable("GH_TOKEN={GitHub_token}".format(GitHub_token=GitHub_token).encode('utf-8'), build_repo=build_repo, tld=tld, travis_token=travis_token, **login_kwargs) print(dedent(""" A personal access token for doctr has been created. You can go to https://github.com/settings/tokens to revoke it.""")) print(header) else: deploy_key_repo, env_name, keypath = get_deploy_key_repo(deploy_repo, args.key_path) private_ssh_key, public_ssh_key = generate_ssh_key() key = encrypt_to_file(private_ssh_key, keypath + '.enc') del private_ssh_key # Prevent accidental use below public_ssh_key = public_ssh_key.decode('ASCII') encrypted_variable = encrypt_variable(env_name.encode('utf-8') + b"=" + key, build_repo=build_repo, tld=tld, travis_token=travis_token, **login_kwargs) deploy_keys_url = 'https://github.com/{deploy_repo}/settings/keys'.format(deploy_repo=deploy_key_repo) if args.upload_key: upload_GitHub_deploy_key(deploy_key_repo, public_ssh_key, **login_kwargs) print(dedent(""" The deploy key has been added for {deploy_repo}. You can go to {deploy_keys_url} to revoke the deploy key.\ """.format(deploy_repo=deploy_key_repo, deploy_keys_url=deploy_keys_url, keypath=keypath))) print(header) else: print(header) print(dedent("""\ {N}. {BOLD_MAGENTA}Go to {deploy_keys_url} and add the following as a new key:{RESET} {ssh_key} {BOLD_MAGENTA}Be sure to allow write access for the key.{RESET} """.format(ssh_key=public_ssh_key, deploy_keys_url=deploy_keys_url, N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) print(dedent("""\ {N}. {BOLD_MAGENTA}Add the file {keypath}.enc to be staged for commit:{RESET} git add {keypath}.enc """.format(keypath=keypath, N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) options = '--built-docs ' + bold_black('<path/to/built/html/>') if args.key_path: options += ' --key-path {keypath}.enc'.format(keypath=keypath) if deploy_repo != build_repo: options += ' --deploy-repo {deploy_repo}'.format(deploy_repo=deploy_repo) key_type = "deploy key" if args.token: options += ' --token' key_type = "personal access token" print(dedent("""\ {N}. {BOLD_MAGENTA}Add these lines to your `.travis.yml` file:{RESET} env: global: # Doctr {key_type} for {deploy_repo} - secure: "{encrypted_variable}" script: - set -e - {BOLD_BLACK}<Command to build your docs>{RESET} - pip install doctr - doctr deploy {options} {BOLD_BLACK}<target-directory>{RESET} """.format(options=options, N=N, key_type=key_type, encrypted_variable=encrypted_variable.decode('utf-8'), deploy_repo=deploy_repo, BOLD_MAGENTA=BOLD_MAGENTA, BOLD_BLACK=BOLD_BLACK, RESET=RESET))) print(dedent("""\ Replace the text in {BOLD_BLACK}<angle brackets>{RESET} with the relevant things for your repository. """.format(BOLD_BLACK=BOLD_BLACK, RESET=RESET))) print(dedent("""\ Note: the `set -e` prevents doctr from running when the docs build fails. We put this code under `script:` so that if doctr fails it causes the build to fail. """)) print(dedent("""\ {N}. {BOLD_MAGENTA}Commit and push these changes to your GitHub repository.{RESET} The docs should now build automatically on Travis. """.format(N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) print("See the documentation at https://drdoctr.github.io/ for more information.")
Color guide - red: Error and warning messages - green: Welcome messages (use sparingly) - blue: Default values - bold_magenta: Action items - bold_black: Parts of code to be run or copied that should be modified
def create_resource_quota(self, name, quota_json): """ Prevent builds being scheduled and wait for running builds to finish. :return: """ url = self._build_k8s_url("resourcequotas/") response = self._post(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) if response.status_code == http_client.CONFLICT: url = self._build_k8s_url("resourcequotas/%s" % name) response = self._put(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) check_response(response) return response
Prevent builds being scheduled and wait for running builds to finish. :return:
def __roll(self, unrolled): """Converts parameter array back into matrices.""" rolled = [] index = 0 for count in range(len(self.__sizes) - 1): in_size = self.__sizes[count] out_size = self.__sizes[count+1] theta_unrolled = np.matrix(unrolled[index:index+(in_size+1)*out_size]) theta_rolled = theta_unrolled.reshape((out_size, in_size+1)) rolled.append(theta_rolled) index += (in_size + 1) * out_size return rolled
Converts parameter array back into matrices.
def attribute(name, value, getter=None, setter=None, deleter=None, label=None, desc=None, meta=None): """ Annotates a model attribute. @param name: attribute name, unique for a model. @type name: str or unicode @param value: attribute type information. @type value: implementer of L{src.feat.models.interface.IValueInfo} @param getter: an effect or None if the attribute is write-only; the retrieved value that will be validated; see feat.models.call for effect information. @type getter: callable or None @param setter: an effect or None if the attribute is read-only; the new value will be validated, possibly converted and returned; see feat.models.call for effect information. @type setter: callable or None @param deleter: an effect or None if the attribute cannot be deleted; @type deleter: callable or None @param label: the attribute label or None. @type label: str or unicode or None @param desc: the description of the attribute or None if not documented. @type desc: str or unicode or None @param meta: model item metadata atoms. @type meta: list of tuple """ _annotate("attribute", name, value, getter=getter, setter=setter, deleter=deleter, label=label, desc=desc, meta=meta)
Annotates a model attribute. @param name: attribute name, unique for a model. @type name: str or unicode @param value: attribute type information. @type value: implementer of L{src.feat.models.interface.IValueInfo} @param getter: an effect or None if the attribute is write-only; the retrieved value that will be validated; see feat.models.call for effect information. @type getter: callable or None @param setter: an effect or None if the attribute is read-only; the new value will be validated, possibly converted and returned; see feat.models.call for effect information. @type setter: callable or None @param deleter: an effect or None if the attribute cannot be deleted; @type deleter: callable or None @param label: the attribute label or None. @type label: str or unicode or None @param desc: the description of the attribute or None if not documented. @type desc: str or unicode or None @param meta: model item metadata atoms. @type meta: list of tuple
def getMonitor(self): """ Returns an instance of the ``Screen`` object this Location is inside. Returns the primary screen if the Location isn't positioned in any screen. """ from .RegionMatching import Screen scr = self.getScreen() return scr if scr is not None else Screen(0)
Returns an instance of the ``Screen`` object this Location is inside. Returns the primary screen if the Location isn't positioned in any screen.
def remove_isolated_nodes(graph): """Remove isolated nodes from the network, in place. :param pybel.BELGraph graph: A BEL graph """ nodes = list(nx.isolates(graph)) graph.remove_nodes_from(nodes)
Remove isolated nodes from the network, in place. :param pybel.BELGraph graph: A BEL graph
def hasLogger(self, logger): """ Returns whether or not the inputed logger is tracked by this widget. :param logger | <str> || <logging.Logger> """ if isinstance(logger, logging.Logger): logger = logging.name return logger in self._loggers
Returns whether or not the inputed logger is tracked by this widget. :param logger | <str> || <logging.Logger>
def get_exception(self): """ Return any exception that happened during the last server request. This can be used to fetch more specific error information after using calls like `start_client`. The exception (if any) is cleared after this call. :return: an exception, or ``None`` if there is no stored exception. .. versionadded:: 1.1 """ self.lock.acquire() try: e = self.saved_exception self.saved_exception = None return e finally: self.lock.release()
Return any exception that happened during the last server request. This can be used to fetch more specific error information after using calls like `start_client`. The exception (if any) is cleared after this call. :return: an exception, or ``None`` if there is no stored exception. .. versionadded:: 1.1
def light_3d(self, r, kwargs_list, k=None): """ computes 3d density at radius r :param x: coordinate in units of arcsec relative to the center of the image :type x: set or single 1d numpy array """ r = np.array(r, dtype=float) flux = np.zeros_like(r) for i, func in enumerate(self.func_list): if k is None or k == i: kwargs = {k: v for k, v in kwargs_list[i].items() if not k in ['center_x', 'center_y']} if self.profile_type_list[i] in ['HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE', 'POWER_LAW']: flux += func.light_3d(r, **kwargs) else: raise ValueError('Light model %s does not support a 3d light distribution!' % self.profile_type_list[i]) return flux
computes 3d density at radius r :param x: coordinate in units of arcsec relative to the center of the image :type x: set or single 1d numpy array
def _delete(self, identifier=None): """ Deletes given identifier from index. Args: identifier (str): identifier of the document to delete. """ assert identifier is not None, 'identifier argument can not be None.' writer = self.index.writer() writer.delete_by_term('identifier', identifier) writer.commit()
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
def cosine(brands, exemplars, weighted_avg=False, sqrt=False): """ Return the cosine similarity betwee a brand's followers and the exemplars. """ scores = {} for brand, followers in brands: if weighted_avg: scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()], weights=[1. / len(others) for others in exemplars.values()]) else: scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars) if sqrt: scores = dict([(b, math.sqrt(s)) for b, s in scores.items()]) return scores
Return the cosine similarity betwee a brand's followers and the exemplars.
def process_tokens(self, tokens): """process tokens from the current module to search for module/block level options """ control_pragmas = {"disable", "enable"} for (tok_type, content, start, _, _) in tokens: if tok_type != tokenize.COMMENT: continue match = OPTION_RGX.search(content) if match is None: continue first_group = match.group(1) if ( first_group.strip() == "disable-all" or first_group.strip() == "skip-file" ): if first_group.strip() == "disable-all": self.add_message( "deprecated-pragma", line=start[0], args=("disable-all", "skip-file"), ) self.add_message("file-ignored", line=start[0]) self._ignore_file = True return try: opt, value = first_group.split("=", 1) except ValueError: self.add_message( "bad-inline-option", args=first_group.strip(), line=start[0] ) continue opt = opt.strip() if opt in self._options_methods or opt in self._bw_options_methods: try: meth = self._options_methods[opt] except KeyError: meth = self._bw_options_methods[opt] # found a "(dis|en)able-msg" pragma deprecated suppression self.add_message( "deprecated-pragma", line=start[0], args=(opt, opt.replace("-msg", "")), ) for msgid in utils._splitstrip(value): # Add the line where a control pragma was encountered. if opt in control_pragmas: self._pragma_lineno[msgid] = start[0] try: if (opt, msgid) == ("disable", "all"): self.add_message( "deprecated-pragma", line=start[0], args=("disable=all", "skip-file"), ) self.add_message("file-ignored", line=start[0]) self._ignore_file = True return meth(msgid, "module", start[0]) except exceptions.UnknownMessageError: self.add_message("bad-option-value", args=msgid, line=start[0]) else: self.add_message("unrecognized-inline-option", args=opt, line=start[0])
process tokens from the current module to search for module/block level options
def get_group_target(self): """Returns the current destination surface for the context. This is either the original target surface as passed to :class:`Context` or the target surface for the current group as started by the most recent call to :meth:`push_group` or :meth:`push_group_with_content`. """ return Surface._from_pointer( cairo.cairo_get_group_target(self._pointer), incref=True)
Returns the current destination surface for the context. This is either the original target surface as passed to :class:`Context` or the target surface for the current group as started by the most recent call to :meth:`push_group` or :meth:`push_group_with_content`.
def wsp(word): '''Return the number of unstressed superheavy syllables.''' violations = 0 unstressed = [] for w in extract_words(word): unstressed += w.split('.')[1::2] # even syllables # include extrametrical odd syllables as potential WSP violations if w.count('.') % 2 == 0: unstressed += [w.rsplit('.', 1)[-1], ] # SHSP for syll in unstressed: if re.search(r'[ieaouäöy]{2}[^$ieaouäöy]+', syll, flags=FLAGS): violations += 1 # # WSP (CVV = heavy) # for syll in unstressed: # if re.search( # ur'[ieaouäöy]{2}|[ieaouäöy]+[^ieaouäöy]+', # syll, flags=re.I | re.U): # violations += 1 return violations
Return the number of unstressed superheavy syllables.
def db_downgrade(version): """Downgrade the database""" v1 = get_db_version() migrate_api.downgrade(url=db_url, repository=db_repo, version=version) v2 = get_db_version() if v1 == v2: print 'No changes made.' else: print 'Downgraded: %s ... %s' % (v1, v2)
Downgrade the database
def _fixpath(self, p): """Apply tilde expansion and absolutization to a path.""" return os.path.abspath(os.path.expanduser(p))
Apply tilde expansion and absolutization to a path.
def __select_builder(lxml_builder, libxml2_builder, cmdline_builder): """ Selects a builder, based on which Python modules are present. """ if prefer_xsltproc: return cmdline_builder if not has_libxml2: # At the moment we prefer libxml2 over lxml, the latter can lead # to conflicts when installed together with libxml2. if has_lxml: return lxml_builder else: return cmdline_builder return libxml2_builder
Selects a builder, based on which Python modules are present.
def hardmask(self): """ Mask all lowercase nucleotides with N's """ p = re.compile("a|c|g|t|n") for seq_id in self.fasta_dict.keys(): self.fasta_dict[seq_id] = p.sub("N", self.fasta_dict[seq_id]) return self
Mask all lowercase nucleotides with N's
def log_request_data_send(self, target_system, target_component, id, ofs, count, force_mavlink1=False): ''' Request a chunk of a log target_system : System ID (uint8_t) target_component : Component ID (uint8_t) id : Log id (from LOG_ENTRY reply) (uint16_t) ofs : Offset into the log (uint32_t) count : Number of bytes (uint32_t) ''' return self.send(self.log_request_data_encode(target_system, target_component, id, ofs, count), force_mavlink1=force_mavlink1)
Request a chunk of a log target_system : System ID (uint8_t) target_component : Component ID (uint8_t) id : Log id (from LOG_ENTRY reply) (uint16_t) ofs : Offset into the log (uint32_t) count : Number of bytes (uint32_t)
def jsonRender(self, def_buf): """ Translate the passed serial block into string only JSON. Args: def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object. Returns: str: JSON rendering of meter record. """ try: ret_dict = SerialBlock() ret_dict[Field.Meter_Address] = self.getMeterAddress() for fld in def_buf: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue] except: ekm_log(traceback.format_exc(sys.exc_info())) return "" return json.dumps(ret_dict, indent=4)
Translate the passed serial block into string only JSON. Args: def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object. Returns: str: JSON rendering of meter record.
def cleanup(self): """Cleanup all the expired keys""" keys = self.client.smembers(self.keys_container) for key in keys: entry = self.client.get(key) if entry: entry = pickle.loads(entry) if self._is_expired(entry, self.timeout): self.delete_entry(key)
Cleanup all the expired keys