code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _config_win32_search(self, search): search = str(search) split_char = self._determine_split_char(search) search_list = search.split(split_char) for s in search_list: if not s in self.search: self.search.add(dns.name.from_text(s))
Configure a Search registry entry.
def rewrite_url(self, url_info: URLInfo) -> URLInfo: if self._url_rewriter: return self._url_rewriter.rewrite(url_info) else: return url_info
Return a rewritten URL such as escaped fragment.
def remove_identity(cls, sh_db, ident_id): success = False try: api.delete_identity(sh_db, ident_id) logger.debug("Identity %s deleted", ident_id) success = True except Exception as e: logger.debug("Identity not deleted due to %s", str(e)) return success
Delete an identity from SortingHat. :param sh_db: SortingHat database :param ident_id: identity identifier
def _cleanup(self, domains): for option in domains.values(): try: os.remove(option['pot']) except (IOError, OSError): pass
Remove the temporary '.pot' files that were created for the domains.
def set_rating(self, grade_id): if self.get_rating_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(grade_id): raise errors.InvalidArgument() self._my_map['ratingId'] = str(grade_id)
Sets the rating. arg: grade_id (osid.id.Id): the new rating raise: InvalidArgument - ``grade_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def list_user_page_views(self, user_id, end_time=None, start_time=None): path = {} data = {} params = {} path["user_id"] = user_id if start_time is not None: params["start_time"] = start_time if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/users/{user_id}/page_views with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/page_views".format(**path), data=data, params=params, all_pages=True)
List user page views. Return the user's page view history in json format, similar to the available CSV download. Pagination is used as described in API basics section. Page views are returned in descending order, newest to oldest.
def get_ngroups(self, field=None): field = field if field else self._determine_group_field(field) if 'ngroups' in self.data['grouped'][field]: return self.data['grouped'][field]['ngroups'] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for.
def get(cls, id): if CACHE: if id in _cache['Pool']: log.debug('cache hit for pool %d' % id) return _cache['Pool'][id] log.debug('cache miss for pool %d' % id) try: pool = Pool.list({'id': id})[0] except (IndexError, KeyError): raise NipapNonExistentError('no pool with ID ' + str(id) + ' found') _cache['Pool'][id] = pool return pool
Get the pool with id 'id'.
def _request(self, resource, rtype, action=None, payload=None, offset=None, limit=None, requestId=None, is_crud=False): end = self.__end if end.is_set(): raise LinkShutdownException('Client stopped') rng = None if offset is not None and limit is not None: Validation.limit_offset_check(limit, offset) rng = "%d/%d" % (offset, limit) with self.__requests: if requestId is None: requestId = self.__new_request_id() elif requestId in self.__requests: raise ValueError('requestId %s already in use' % requestId) inner_msg = self.__make_innermsg(resource, rtype, requestId, action, payload, rng) self.__requests[requestId] = ret = RequestEvent(requestId, inner_msg, is_crud=is_crud) if not self.__retry_enqueue(PreparedMessage(inner_msg, requestId)): raise LinkShutdownException('Client stopping') return ret
_request amqp queue publish helper return: RequestEvent object or None for failed to publish
def response_builder(self, response): try: r = response.json() result = r['query']['results'] response = { 'num_result': r['query']['count'] , 'result': result } except (Exception,) as e: print(e) return response.content return response
Try to return a pretty formatted response object
def particle_covariance_mtx(weights,locations): warnings.warn('particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution', DeprecationWarning) mu = particle_meanfn(weights, locations) xs = locations.transpose([1, 0]) ws = weights cov = ( np.einsum('i,mi,ni', ws, xs, xs) - np.dot(mu[..., np.newaxis], mu[np.newaxis, ...]) ) assert np.all(np.isfinite(cov)) if not np.all(la.eig(cov)[0] >= 0): warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning) return cov
Returns an estimate of the covariance of a distribution represented by a given set of SMC particle. :param weights: An array containing the weights of each particle. :param location: An array containing the locations of each particle. :rtype: :class:`numpy.ndarray`, shape ``(n_modelparams, n_modelparams)``. :returns: An array containing the estimated covariance matrix.
def find_project_config_file(project_root: str) -> str: if project_root: project_config_file = os.path.join(project_root, YCONFIG_FILE) if os.path.isfile(project_config_file): return project_config_file
Return absolute path to project-specific config file, if it exists. :param project_root: Absolute path to project root directory. A project config file is a file named `YCONFIG_FILE` found at the top level of the project root dir. Return `None` if project root dir is not specified, or if no such file is found.
def add_to(self, parent, additions): "Modify parent to include all elements in additions" for x in additions: if x not in parent: parent.append(x) self.changed()
Modify parent to include all elements in additions
def _write(self, item, labels, features): data = Data([item], [labels], [features]) self._writer.write(data, self.groupname, append=True)
Writes the given item to the owned file.
def semantic_version(tag): try: version = list(map(int, tag.split('.'))) assert len(version) == 3 return tuple(version) except Exception as exc: raise CommandError( 'Could not parse "%s", please use ' 'MAJOR.MINOR.PATCH' % tag ) from exc
Get a valid semantic version for tag
def status(self, value): if self._bug.get('id', None): if value in VALID_STATUS: self._bug['status'] = value else: raise BugException("Invalid status type was used") else: raise BugException("Can not set status unless there is a bug id." " Please call Update() before setting")
Property for getting or setting the bug status >>> bug.status = "REOPENED"
def check_ts_data_with_ts_target(X, y=None): if y is not None: Nx = len(X) Ny = len(y) if Nx != Ny: raise ValueError("Number of time series different in X (%d) and y (%d)" % (Nx, Ny)) Xt, _ = get_ts_data_parts(X) Ntx = np.array([len(Xt[i]) for i in np.arange(Nx)]) Nty = np.array([len(np.atleast_1d(y[i])) for i in np.arange(Nx)]) if np.count_nonzero(Nty == Ntx) == Nx: return else: raise ValueError("Invalid time series lengths.\n" "Ns: ", Nx, "Ntx: ", Ntx, "Nty: ", Nty)
Checks time series data with time series target is good. If not raises value error. Parameters ---------- X : array-like, shape [n_series, ...] Time series data and (optionally) contextual data y : array-like, shape [n_series, ...] target data
def genCaCert(self, name, signas=None, outp=None, save=True): pkey, cert = self._genBasePkeyCert(name) ext0 = crypto.X509Extension(b'basicConstraints', False, b'CA:TRUE') cert.add_extensions([ext0]) if signas is not None: self.signCertAs(cert, signas) else: self.selfSignCert(cert, pkey) if save: keypath = self._savePkeyTo(pkey, 'cas', '%s.key' % name) if outp is not None: outp.printf('key saved: %s' % (keypath,)) crtpath = self._saveCertTo(cert, 'cas', '%s.crt' % name) if outp is not None: outp.printf('cert saved: %s' % (crtpath,)) return pkey, cert
Generates a CA keypair. Args: name (str): The name of the CA keypair. signas (str): The CA keypair to sign the new CA with. outp (synapse.lib.output.Output): The output buffer. Examples: Make a CA named "myca": mycakey, mycacert = cdir.genCaCert('myca') Returns: ((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the private key and certificate objects.
def from_clauses(self, clauses): self.clauses = copy.deepcopy(clauses) for cl in self.clauses: self.nv = max([abs(l) for l in cl] + [self.nv])
This methods copies a list of clauses into a CNF object. :param clauses: a list of clauses. :type clauses: list(list(int)) Example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [1, -2], [5]]) >>> print cnf.clauses [[-1, 2], [1, -2], [5]] >>> print cnf.nv 5
def link_callback(uri, rel): sUrl = settings.STATIC_URL sRoot = settings.STATIC_ROOT mUrl = settings.MEDIA_URL mRoot = settings.MEDIA_ROOT if uri.startswith(mUrl): path = os.path.join(mRoot, uri.replace(mUrl, "")) elif uri.startswith(sUrl): path = os.path.join(sRoot, uri.replace(sUrl, "")) else: return uri if not os.path.isfile(path): raise Exception( 'media URI must start with %s or %s' % (sUrl, mUrl) ) return path
Convert HTML URIs to absolute system paths so xhtml2pdf can access those resources
def dict_of_lists(self): result = {} for key, value in self._items: if key in result: result[key].append(value) else: result[key] = [value] return result
Returns a dictionary where each key is associated with a list of values.
def is_known_scalar(value): def _is_datetime_or_timedelta(value): return pd.Series(value).dtype.kind in ('M', 'm') return not np.iterable(value) and (isinstance(value, numbers.Number) or _is_datetime_or_timedelta(value))
Return True if value is a type we expect in a dataframe
def register(cls, name, type_): if not issubclass(type_, Entry): raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_)) cls._registry[name.lower()] = type_
Register a new type for an entry-type. The 2nd argument has to be a subclass of structures.Entry.
def _process(self, metric): if not self.enabled: return try: try: self.lock.acquire() self.process(metric) except Exception: self.log.error(traceback.format_exc()) finally: if self.lock.locked(): self.lock.release()
Decorator for processing handlers with a lock, catching exceptions
def get_privkey(self, address: AddressHex, password: str) -> PrivateKey: address = add_0x_prefix(address).lower() if not self.address_in_keystore(address): raise ValueError('Keystore file not found for %s' % address) with open(self.accounts[address]) as data_file: data = json.load(data_file) acc = Account(data, password, self.accounts[address]) return acc.privkey
Find the keystore file for an account, unlock it and get the private key Args: address: The Ethereum address for which to find the keyfile in the system password: Mostly for testing purposes. A password can be provided as the function argument here. If it's not then the user is interactively queried for one. Returns The private key associated with the address
def create_language_model(self, name, base_model_name, dialect=None, description=None, **kwargs): if name is None: raise ValueError('name must be provided') if base_model_name is None: raise ValueError('base_model_name must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'create_language_model') headers.update(sdk_headers) data = { 'name': name, 'base_model_name': base_model_name, 'dialect': dialect, 'description': description } url = '/v1/customizations' response = self.request( method='POST', url=url, headers=headers, json=data, accept_json=True) return response
Create a custom language model. Creates a new custom language model for a specified base model. The custom language model can be used only with the base model for which it is created. The model is owned by the instance of the service whose credentials are used to create it. **See also:** [Create a custom language model](https://cloud.ibm.com/docs/services/speech-to-text/language-create.html#createModel-language). :param str name: A user-defined name for the new custom language model. Use a name that is unique among all custom language models that you own. Use a localized name that matches the language of the custom model. Use a name that describes the domain of the custom model, such as `Medical custom model` or `Legal custom model`. :param str base_model_name: The name of the base language model that is to be customized by the new custom language model. The new custom model can be used only with the base model that it customizes. To determine whether a base model supports language model customization, use the **Get a model** method and check that the attribute `custom_language_model` is set to `true`. You can also refer to [Language support for customization](https://cloud.ibm.com/docs/services/speech-to-text/custom.html#languageSupport). :param str dialect: The dialect of the specified language that is to be used with the custom language model. The parameter is meaningful only for Spanish models, for which the service creates a custom language model that is suited for speech in one of the following dialects: * `es-ES` for Castilian Spanish (the default) * `es-LA` for Latin American Spanish * `es-US` for North American (Mexican) Spanish A specified dialect must be valid for the base model. By default, the dialect matches the language of the base model; for example, `en-US` for either of the US English language models. :param str description: A description of the new custom language model. Use a localized description that matches the language of the custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def _check_cygwin_installed(cyg_arch='x86_64'): path_to_cygcheck = os.sep.join(['C:', _get_cyg_dir(cyg_arch), 'bin', 'cygcheck.exe']) LOG.debug('Path to cygcheck.exe: %s', path_to_cygcheck) if not os.path.exists(path_to_cygcheck): LOG.debug('Could not find cygcheck.exe') return False return True
Return True or False if cygwin is installed. Use the cygcheck executable to check install. It is installed as part of the base package, and we use it to check packages
def load_data(directory, num): root = os.path.abspath(os.path.dirname(__file__)) def get_path(i): return os.path.join(root, 'data', directory, str(i) + '.npy') return [np.load(get_path(i)) for i in range(num)]
Load numpy data from the data directory. The files should stored in ``../data/<dir>`` and named ``0.npy, 1.npy, ... <num - 1>.npy``. Returns: list: A list of loaded data, such that ``list[i]`` contains the the contents of ``i.npy``.
def compute_update_ratio(weight_tensors, before_weights, after_weights): deltas = [after - before for after, before in zip(after_weights, before_weights)] delta_norms = [np.linalg.norm(d.ravel()) for d in deltas] weight_norms = [np.linalg.norm(w.ravel()) for w in before_weights] ratios = [d / w for d, w in zip(delta_norms, weight_norms)] all_summaries = [ tf.Summary.Value(tag='update_ratios/' + tensor.name, simple_value=ratio) for tensor, ratio in zip(weight_tensors, ratios)] return tf.Summary(value=all_summaries)
Compute the ratio of gradient norm to weight norm.
def collection_list(self, resource_id, resource_type="collection"): def fetch_children(children): results = [] for child in children: results.append(child["slug"]) if "children" in child: results.extend(fetch_children(child["children"])) return results response = self._get( urljoin(self.base_url, "informationobjects/tree/{}".format(resource_id)) ) tree = response.json() return fetch_children(tree["children"])
Fetches a list of slug representing descriptions within the specified parent description. :param resource_id str: The slug of the description to fetch children from. :param resource_type str: no-op; not required or used in this implementation. :return: A list of strings representing the slugs for all children of the requested description. :rtype list:
def same_page(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_from_span(_to_span(c[i])).page == bbox_from_span(_to_span(c[0])).page for i in range(len(c)) ] )
Return true if all the components of c are on the same page of the document. Page numbers are based on the PDF rendering of the document. If a PDF file is provided, it is used. Otherwise, if only a HTML/XML document is provided, a PDF is created and then used to determine the page number of a Mention. :param c: The candidate to evaluate :rtype: boolean
def expose_event(self, widget, event): x, y, width, height = event.area self.logger.debug("surface is %s" % self.surface) if self.surface is not None: win = widget.get_window() cr = win.cairo_create() cr.rectangle(x, y, width, height) cr.clip() cr.set_source_surface(self.surface, 0, 0) cr.set_operator(cairo.OPERATOR_SOURCE) cr.paint() return False
When an area of the window is exposed, we just copy out of the server-side, off-screen surface to that area.
def _urls_for_js(urls=None): if urls is None: from .urls import urlpatterns urls = [url.name for url in urlpatterns if getattr(url, 'name', None)] urls = dict(zip(urls, [get_uri_template(url) for url in urls])) urls.update(getattr(settings, 'LEAFLET_STORAGE_EXTRA_URLS', {})) return urls
Return templated URLs prepared for javascript.
def _parse_hunk_line(self, line): components = line.split('@@') if len(components) >= 2: hunk_info = components[1] groups = self.HUNK_LINE_RE.findall(hunk_info) if len(groups) == 1: try: return int(groups[0]) except ValueError: msg = "Could not parse '{}' as a line number".format(groups[0]) raise GitDiffError(msg) else: msg = "Could not find start of hunk in line '{}'".format(line) raise GitDiffError(msg) else: msg = "Could not parse hunk in line '{}'".format(line) raise GitDiffError(msg)
Given a hunk line in `git diff` output, return the line number at the start of the hunk. A hunk is a segment of code that contains changes. The format of the hunk line is: @@ -k,l +n,m @@ TEXT where `k,l` represent the start line and length before the changes and `n,m` represent the start line and length after the changes. `git diff` will sometimes put a code excerpt from within the hunk in the `TEXT` section of the line.
def query_array(ncfile, name) -> numpy.ndarray: variable = query_variable(ncfile, name) maskedarray = variable[:] fillvalue_ = getattr(variable, '_FillValue', numpy.nan) if not numpy.isnan(fillvalue_): maskedarray[maskedarray.mask] = numpy.nan return maskedarray.data
Return the data of the variable with the given name from the given NetCDF file. The following example shows that |query_array| returns |nan| entries to represent missing values even when the respective NetCDF variable defines a different fill value: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> from hydpy.core import netcdftools >>> netcdftools.fillvalue = -999.0 >>> with TestIO(): ... with netcdf4.Dataset('test.nc', 'w') as ncfile: ... netcdftools.create_dimension(ncfile, 'dim1', 5) ... netcdftools.create_variable(ncfile, 'var1', 'f8', ('dim1',)) ... ncfile = netcdf4.Dataset('test.nc', 'r') >>> netcdftools.query_variable(ncfile, 'var1')[:].data array([-999., -999., -999., -999., -999.]) >>> netcdftools.query_array(ncfile, 'var1') array([ nan, nan, nan, nan, nan]) >>> import numpy >>> netcdftools.fillvalue = numpy.nan
def nodes(self, t=None, data=False): return list(self.nodes_iter(t=t, data=data))
Return a list of the nodes in the graph at a given snapshot. Parameters ---------- t : snapshot id (default=None) If None the the method returns all the nodes of the flattened graph. data : boolean, optional (default=False) If False return a list of nodes. If True return a two-tuple of node and node data dictionary Returns ------- nlist : list A list of nodes. If data=True a list of two-tuples containing (node, node data dictionary). Examples -------- >>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2], 0) >>> G.nodes(t=0) [0, 1, 2] >>> G.add_edge(1, 4, t=1) >>> G.nodes(t=0) [0, 1, 2]
def get_root_gradebook_ids(self): if self._catalog_session is not None: return self._catalog_session.get_root_catalog_ids() return self._hierarchy_session.get_roots()
Gets the root gradebook ``Ids`` in this hierarchy. return: (osid.id.IdList) - the root gradebook ``Ids`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def get_elements(self, filter_cls, elem_id=None): result = [] if elem_id is not None: try: result = [self._class_collection_map[filter_cls][elem_id]] except KeyError: result = [] else: for e in self._class_collection_map[filter_cls].values(): result.append(e) return result
Get a list of elements from the result and filter the element type by a class. :param filter_cls: :param elem_id: ID of the object :type elem_id: Integer :return: List of available elements :rtype: List
def _setup_converter_graph(self, converter_list, prune_converters): for converter in converter_list: if prune_converters: try: converter.configure() except ConverterUnavailable as e: log.warning('%s unavailable: %s' % (converter.__class__.__name__, str(e))) continue for in_ in converter.inputs: for out in converter.outputs: self.dgraph.add_edge(in_, out, converter.cost) self.converters[(in_, out)] = converter if hasattr(converter, 'direct_outputs'): self._setup_direct_converter(converter)
Set up directed conversion graph, pruning unavailable converters as necessary
def get_value(self): with self._lock: if self._future_value is not None: return { key: value[:] for key, value in self._future_value.items() } return None
Retrieves the value to inject in the component :return: The value to inject
def get_news(self): headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/login.phtml',"User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/team_news.phtml',headers=headers).content soup = BeautifulSoup(req) news = [] for i in soup.find_all('div',{'class','article_content_text'}): news.append(i.text) return news
Get all the news from first page
def get_instance(self, payload): return AssistantFallbackActionsInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], )
Build an instance of AssistantFallbackActionsInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance
def total_vat(self): q = Vat.objects.filter(receipt=self).aggregate(total=Sum('amount')) return q['total'] or 0
Returns the sum of all Vat objects.
def astensor(array: TensorLike) -> BKTensor: tensor = tf.convert_to_tensor(array, dtype=CTYPE) if DEVICE == 'gpu': tensor = tensor.gpu() N = int(math.log2(size(tensor))) tensor = tf.reshape(tensor, ([2]*N)) return tensor
Convert to product tensor
def ListFiles(self, ext_attrs=False): if not self.IsDirectory(): raise IOError("%s is not a directory." % self.path) for path in self.files: try: filepath = utils.JoinPath(self.path, path) response = self._Stat(filepath, ext_attrs=ext_attrs) pathspec = self.pathspec.Copy() pathspec.last.path = utils.JoinPath(pathspec.last.path, path) response.pathspec = pathspec yield response except OSError: pass
List all files in the dir.
def _valid_directory(self, path): abspath = os.path.abspath(path) if not os.path.isdir(abspath): raise argparse.ArgumentTypeError('Not a valid directory: {}'.format(abspath)) return abspath
Ensure that the given path is valid. :param str path: A valid directory path. :raises: :py:class:`argparse.ArgumentTypeError` :returns: An absolute directory path.
def schema_remove(dbname, name, user=None, db_user=None, db_password=None, db_host=None, db_port=None): if not schema_exists(dbname, name, user=None, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): log.info('Schema \'%s\' does not exist in \'%s\'', name, dbname) return False sub_cmd = 'DROP SCHEMA "{0}"'.format(name) _psql_prepare_and_run( ['-c', sub_cmd], runas=user, maintenance_db=dbname, host=db_host, user=db_user, port=db_port, password=db_password) if not schema_exists(dbname, name, user, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): return True else: log.info('Failed to delete schema \'%s\'.', name) return False
Removes a schema from the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.schema_remove dbname schemaname dbname Database name we work on schemaname The schema's name we'll remove user System user all operations should be performed on behalf of db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default
def _include_environment_variables(self, program, executor_vars): env_vars = { 'RESOLWE_HOST_URL': self.settings_actual.get('RESOLWE_HOST_URL', 'localhost'), } set_env = self.settings_actual.get('FLOW_EXECUTOR', {}).get('SET_ENV', {}) env_vars.update(executor_vars) env_vars.update(set_env) export_commands = ['export {}={}'.format(key, shlex.quote(value)) for key, value in env_vars.items()] return os.linesep.join(export_commands) + os.linesep + program
Define environment variables.
def user_list(database=None, user=None, password=None, host=None, port=None): client = _client(user=user, password=password, host=host, port=port) if not database: return client.get_list_cluster_admins() client.switch_database(database) return client.get_list_users()
List cluster admins or database users. If a database is specified: it will return database users list. If a database is not specified: it will return cluster admins list. database The database to list the users from user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example: .. code-block:: bash salt '*' influxdb08.user_list salt '*' influxdb08.user_list <database> salt '*' influxdb08.user_list <database> <user> <password> <host> <port>
def _exchange_refresh_tokens(self): 'Exchanges a refresh token for an access token' if self.token_cache is not None and 'refresh' in self.token_cache: refresh_form = { 'grant_type': 'refresh_token', 'refresh_token': self.token_cache['refresh'], 'client_id': self.client_id, 'client_secret': self.client_secret, } try: tokens = self._request_tokens_from_token_endpoint(refresh_form) tokens['refresh'] = self.token_cache['refresh'] return tokens except OAuth2Exception: logging.exception( 'Encountered an exception during refresh token flow.') return None
Exchanges a refresh token for an access token
def _parse_domain_id(self, config): match = re.search(r'domain-id (.+)$', config) value = match.group(1) if match else None return dict(domain_id=value)
Scans the config block and parses the domain-id value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict
def clean_new(self, value): value = self.schema_class(value).full_clean() return self.object_class(**value)
Return a new object instantiated with cleaned data.
def create_user_deliveryserver(self, domainid, data): return self.api_call( ENDPOINTS['userdeliveryservers']['new'], dict(domainid=domainid), body=data)
Create a user delivery server
def rewind(self): self.__data = deque() self.__id = None self.__address = None self.__retrieved = 0 self.__killed = False return self
Rewind this cursor to its unevaluated state. Reset this cursor if it has been partially or completely evaluated. Any options that are present on the cursor will remain in effect. Future iterating performed on this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.
def swap(self, a, b): self.mem[a], self.mem[b] = self.mem[b], self.mem[a] self.asm[a], self.asm[b] = self.asm[b], self.asm[a]
Swaps mem positions a and b
def pformat(tree): if tree.empty(): return '' buf = six.StringIO() for line in _pformat(tree.root, 0): buf.write(line + "\n") return buf.getvalue().strip()
Recursively formats a tree into a nice string representation. Example Input: yahoo = tt.Tree(tt.Node("CEO")) yahoo.root.add(tt.Node("Infra")) yahoo.root[0].add(tt.Node("Boss")) yahoo.root[0][0].add(tt.Node("Me")) yahoo.root.add(tt.Node("Mobile")) yahoo.root.add(tt.Node("Mail")) Example Output: CEO |__Infra | |__Boss | |__Me |__Mobile |__Mail
def create_server(cloud, **kwargs): if cloud == 'ec2': _create_server_ec2(**kwargs) elif cloud == 'rackspace': _create_server_rackspace(**kwargs) elif cloud == 'gce': _create_server_gce(**kwargs) else: raise ValueError("Unknown cloud type: {}".format(cloud))
Create a new instance
def wait_for_jobs(self, job_ids, timeout, delay): if self.skip: return logger.debug("Waiting up to %d sec for completion of the job IDs %s", timeout, job_ids) remaining_job_ids = set(job_ids) found_jobs = [] countdown = timeout while countdown > 0: matched_jobs = self.find_jobs(remaining_job_ids) if matched_jobs: remaining_job_ids.difference_update({job["id"] for job in matched_jobs}) found_jobs.extend(matched_jobs) if not remaining_job_ids: return found_jobs time.sleep(delay) countdown -= delay logger.error( "Timed out while waiting for completion of the job IDs %s. Results not updated.", list(remaining_job_ids), )
Waits until the jobs appears in the completed job queue.
def S_star(u, dfs_data): s_u = S(u, dfs_data) if u not in s_u: s_u.append(u) return s_u
The set of all descendants of u, with u added.
def _gen_full_path(self, filename, file_system=None): if file_system is None: return '{}/{}'.format(self.dest_file_system, filename) else: if ":" not in file_system: raise ValueError("Invalid file_system specified: {}".format(file_system)) return '{}/{}'.format(file_system, filename)
Generate full file path on remote device.
def setUser(self, *args, **kwargs): try: user = self.mambuuserclass(entid=self['assignedUserKey'], *args, **kwargs) except KeyError as kerr: err = MambuError("La cuenta %s no tiene asignado un usuario" % self['id']) err.noUser = True raise err except AttributeError as ae: from .mambuuser import MambuUser self.mambuuserclass = MambuUser try: user = self.mambuuserclass(entid=self['assignedUserKey'], *args, **kwargs) except KeyError as kerr: err = MambuError("La cuenta %s no tiene asignado un usuario" % self['id']) err.noUser = True raise err self['user'] = user return 1
Adds the user for this loan to a 'user' field. User is a MambuUser object. Returns the number of requests done to Mambu.
def get_xpath(stmt, qualified=False, prefix_to_module=False): return mk_path_str(stmt, with_prefixes=qualified, prefix_onchange=True, prefix_to_module=prefix_to_module)
Gets the XPath of the statement. Unless qualified=True, does not include prefixes unless the prefix changes mid-XPath. qualified will add a prefix to each node. prefix_to_module will resolve prefixes to module names instead. For RFC 8040, set prefix_to_module=True: /prefix:root/node/prefix:node/... qualified=True: /prefix:root/prefix:node/prefix:node/... qualified=True, prefix_to_module=True: /module:root/module:node/module:node/... prefix_to_module=True: /module:root/node/module:node/...
def clean_markup(self, markup, parser=None): result_type = type(markup) if isinstance(markup, six.string_types): doc = fromstring(markup, parser=parser) else: doc = copy.deepcopy(markup) self(doc) if issubclass(result_type, six.binary_type): return tostring(doc, encoding='utf-8') elif issubclass(result_type, six.text_type): return tostring(doc, encoding='unicode') else: return doc
Apply ``Cleaner`` to markup string or document and return a cleaned string or document.
def _read_provenance_from_xml(self, root): path = self._special_properties['provenance'] provenance = root.find(path, XML_NS) for step in provenance.iter('provenance_step'): title = step.find('title').text description = step.find('description').text timestamp = step.get('timestamp') if 'IF Provenance' in title: data = {} from safe.metadata35.provenance import IFProvenanceStep keys = IFProvenanceStep.impact_functions_fields for key in keys: value = step.find(key) if value is not None: data[key] = value.text else: data[key] = '' self.append_if_provenance_step( title, description, timestamp, data) else: self.append_provenance_step(title, description, timestamp)
read metadata provenance from xml. :param root: container in which we search :type root: ElementTree.Element
def empty_tree(input_list): for item in input_list: if not isinstance(item, list) or not empty_tree(item): return False return True
Recursively iterate through values in nested lists.
def stp(br=None, state='disable', iface=None): kernel = __grains__['kernel'] if kernel == 'Linux': states = {'enable': 'on', 'disable': 'off'} return _os_dispatch('stp', br, states[state]) elif kernel in SUPPORTED_BSD_LIKE: states = {'enable': 'stp', 'disable': '-stp'} return _os_dispatch('stp', br, states[state], iface) else: return False
Sets Spanning Tree Protocol state for a bridge CLI Example: .. code-block:: bash salt '*' bridge.stp br0 enable salt '*' bridge.stp br0 disable For BSD-like operating systems, it is required to add the interface on which to enable the STP. CLI Example: .. code-block:: bash salt '*' bridge.stp bridge0 enable fxp0 salt '*' bridge.stp bridge0 disable fxp0
def getCanonicalID(iname, xrd_tree): xrd_list = xrd_tree.findall(xrd_tag) xrd_list.reverse() try: canonicalID = xri.XRI(xrd_list[0].findall(canonicalID_tag)[0].text) except IndexError: return None childID = canonicalID.lower() for xrd in xrd_list[1:]: parent_sought = childID[:childID.rindex('!')] parent = xri.XRI(xrd.findtext(canonicalID_tag)) if parent_sought != parent.lower(): raise XRDSFraud("%r can not come from %s" % (childID, parent)) childID = parent_sought root = xri.rootAuthority(iname) if not xri.providerIsAuthoritative(root, childID): raise XRDSFraud("%r can not come from root %r" % (childID, root)) return canonicalID
Return the CanonicalID from this XRDS document. @param iname: the XRI being resolved. @type iname: unicode @param xrd_tree: The XRDS output from the resolver. @type xrd_tree: ElementTree @returns: The XRI CanonicalID or None. @returntype: unicode or None
def _build_ds_from_instruction(instruction, ds_from_file_fn): examples_ds = ds_from_file_fn(instruction["filepath"]) mask_ds = _build_mask_ds( mask_offset=instruction["mask_offset"], mask=instruction["mask"], ) ds = tf.data.Dataset.zip((examples_ds, mask_ds)) ds = ds.filter(lambda example, mask: mask) ds = ds.map(lambda example, mask: example) return ds
Map an instruction to a real datasets for one particular shard. Args: instruction: A `dict` of `tf.Tensor` containing the instruction to load the particular shard (filename, mask,...) ds_from_file_fn: `fct`, function which returns the dataset associated to the filename Returns: dataset: `tf.data.Dataset`, The shard loaded from the instruction
def main(): args = parse_arguments() pid = args.pid title = get_programme_title(pid) broadcast_date = get_broadcast_date(pid) listing = extract_listing(pid) filename = get_output_filename(args) tracklisting = generate_output(listing, title, broadcast_date) output_to_file(filename, tracklisting, args.action) print("Done!")
Get a tracklisting, write to audio file or text.
def copy_file_links(self, src): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized') if src.dr_entries.px_record is None: if src.ce_entries.px_record is None: raise pycdlibexception.PyCdlibInvalidInput('No Rock Ridge file links') num_links = src.ce_entries.px_record.posix_file_links else: num_links = src.dr_entries.px_record.posix_file_links if self.dr_entries.px_record is None: if self.ce_entries.px_record is None: raise pycdlibexception.PyCdlibInvalidInput('No Rock Ridge file links') self.ce_entries.px_record.posix_file_links = num_links else: self.dr_entries.px_record.posix_file_links = num_links
Copy the number of file links from the source Rock Ridge entry into this Rock Ridge entry. Parameters: src - The source Rock Ridge entry to copy from. Returns: Nothing.
def __fill_buffer(self, size=0): read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE) self.__buffer = fetch_data(self.__blob_key, self.__position, self.__position + read_size - 1) self.__buffer_position = 0 self.__eof = len(self.__buffer) < read_size
Fills the internal buffer. Args: size: Number of bytes to read. Will be clamped to [self.__buffer_size, MAX_BLOB_FETCH_SIZE].
def _cast_to_type(self, value): if value in (True, False): return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False self.fail('invalid', value=value)
Convert the value to a boolean and raise error on failures
def _parse_config(self, requires_cfg=True): if len(self.config_paths) > 0: try: self._find_config() except BisonError: if not requires_cfg: return raise try: with open(self.config_file, 'r') as f: parsed = self._fmt_to_parser[self.config_format](f) except Exception as e: raise BisonError( 'Failed to parse config file: {}'.format(self.config_file) ) from e self._full_config = None self._config = parsed
Parse the configuration file, if one is configured, and add it to the `Bison` state. Args: requires_cfg (bool): Specify whether or not parsing should fail if a config file is not found. (default: True)
def op_paths(self, path_prefix=None): url_path = self.path if path_prefix: url_path = path_prefix + url_path yield url_path, self
Yield operations paths stored in containers.
def BoolEncoder(field_number, is_repeated, is_packed): false_byte = b'\x00' true_byte = b'\x01' if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value)) for element in value: if element: write(true_byte) else: write(false_byte) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) if element: write(true_byte) else: write(false_byte) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeField(write, value): write(tag_bytes) if value: return write(true_byte) return write(false_byte) return EncodeField
Returns an encoder for a boolean field.
def get_python(self): if self.multiselect: return super(MultiSelectField, self).get_python() return self._get()
Only return cursor instance if configured for multiselect
def update(self, sequence=None, **mapping): if sequence is not None: if isinstance(sequence, dict): for slot in sequence: self[slot] = sequence[slot] else: for slot, value in sequence: self[slot] = value if mapping: for slot in sequence: self[slot] = sequence[slot]
Add multiple elements to the fact.
def OnUpdateFigurePanel(self, event): if self.updating: return self.updating = True self.figure_panel.update(self.get_figure(self.code)) self.updating = False
Redraw event handler for the figure panel
def triplify(binding): triples = [] if binding.data is None: return None, triples if binding.is_object: return triplify_object(binding) elif binding.is_array: for item in binding.items: _, item_triples = triplify(item) triples.extend(item_triples) return None, triples else: subject = binding.parent.subject triples.append((subject, binding.predicate, binding.object)) if binding.reverse is not None: triples.append((binding.object, binding.reverse, subject)) return subject, triples
Recursively generate RDF statement triples from the data and schema supplied to the application.
def plug(self): if self.__plugged: return for _, method in inspect.getmembers(self, predicate=inspect.ismethod): if hasattr(method, '_callback_messages'): for message in method._callback_messages: global_callbacks[message].add(method) self.__plugged = True
Add the actor's methods to the callback registry.
def validate_create_package(package_format, owner, repo, **kwargs): client = get_packages_api() with catch_raise_api_exception(): check = getattr( client, "packages_validate_upload_%s_with_http_info" % package_format ) _, _, headers = check( owner=owner, repo=repo, data=make_create_payload(**kwargs) ) ratelimits.maybe_rate_limit(client, headers) return True
Validate parameters for creating a package.
def main(args): ui = getUI(args) if ui.optionIsSet("test"): unittest.main(argv=[sys.argv[0]]) elif ui.optionIsSet("help"): ui.usage() else: verbose = ui.optionIsSet("verbose") stranded = ui.optionIsSet("stranded") if stranded: sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.") sys.exit() regions_1 = [e for e in BEDIterator(ui.getArgument(0), verbose=verbose)] regions_2 = [e for e in BEDIterator(ui.getArgument(1), verbose=verbose)] print jaccardIndex(regions_1, regions_2)
main entry point for the GenomicIntJaccard script. :param args: the arguments for this script, as a list of string. Should already have had things like the script name stripped. That is, if there are no args provided, this should be an empty list.
def get_assignable_objective_bank_ids(self, objective_bank_id): mgr = self._get_provider_manager('LEARNING', local=True) lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy) objective_banks = lookup_session.get_objective_banks() id_list = [] for objective_bank in objective_banks: id_list.append(objective_bank.get_id()) return IdList(id_list)
Gets a list of objective banks including and under the given objective bank node in which any objective can be assigned. arg: objective_bank_id (osid.id.Id): the ``Id`` of the ``ObjectiveBank`` return: (osid.id.IdList) - list of assignable objective bank ``Ids`` raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def do_types_conflict(type1: GraphQLOutputType, type2: GraphQLOutputType) -> bool: if is_list_type(type1): return ( do_types_conflict( cast(GraphQLList, type1).of_type, cast(GraphQLList, type2).of_type ) if is_list_type(type2) else True ) if is_list_type(type2): return True if is_non_null_type(type1): return ( do_types_conflict( cast(GraphQLNonNull, type1).of_type, cast(GraphQLNonNull, type2).of_type ) if is_non_null_type(type2) else True ) if is_non_null_type(type2): return True if is_leaf_type(type1) or is_leaf_type(type2): return type1 is not type2 return False
Check whether two types conflict Two types conflict if both types could not apply to a value simultaneously. Composite types are ignored as their individual field types will be compared later recursively. However List and Non-Null types must match.
def plan(self): for invoiceitem in self.invoiceitems.all(): if invoiceitem.plan: return invoiceitem.plan if self.subscription: return self.subscription.plan
Gets the associated plan for this invoice. In order to provide a consistent view of invoices, the plan object should be taken from the first invoice item that has one, rather than using the plan associated with the subscription. Subscriptions (and their associated plan) are updated by the customer and represent what is current, but invoice items are immutable within the invoice and stay static/unchanged. In other words, a plan retrieved from an invoice item will represent the plan as it was at the time an invoice was issued. The plan retrieved from the subscription will be the currently active plan. :returns: The associated plan for the invoice. :rtype: ``djstripe.Plan``
def rotate_concurrent(self, *locations, **kw): timer = Timer() pool = CommandPool(concurrency=10) logger.info("Scanning %s ..", pluralize(len(locations), "backup location")) for location in locations: for cmd in self.rotate_backups(location, prepare=True, **kw): pool.add(cmd) if pool.num_commands > 0: backups = pluralize(pool.num_commands, "backup") logger.info("Preparing to rotate %s (in parallel) ..", backups) pool.run() logger.info("Successfully rotated %s in %s.", backups, timer)
Rotate the backups in the given locations concurrently. :param locations: One or more values accepted by :func:`coerce_location()`. :param kw: Any keyword arguments are passed on to :func:`rotate_backups()`. This function uses :func:`rotate_backups()` to prepare rotation commands for the given locations and then it removes backups in parallel, one backup per mount point at a time. The idea behind this approach is that parallel rotation is most useful when the files to be removed are on different disks and so multiple devices can be utilized at the same time. Because mount points are per system :func:`rotate_concurrent()` will also parallelize over backups located on multiple remote systems.
def join(self): for thread in self.worker_threads: thread.join() WorkerThread.join(self)
Joins the coordinator thread and all worker threads.
def track(cls, obj, ptr): cls._objects.add(cls(obj, ptr))
Track an object which needs destruction when it is garbage collected.
def properties_for(self, index): return vectorize(lambda i: [prop for prop in self.properties() if i in self[prop]], otypes=[list])(index)
Returns a list of properties, such that each entry in the list corresponds to the element of the index given. Example: let properties: 'one':[1,2,3,4], 'two':[3,5,6] >>> properties_for([2,3,5]) [['one'], ['one', 'two'], ['two']]
def schedule( time: Union[datetime.time, datetime.datetime], callback: Callable, *args): dt = _fillDate(time) now = datetime.datetime.now(dt.tzinfo) delay = (dt - now).total_seconds() loop = asyncio.get_event_loop() loop.call_later(delay, callback, *args)
Schedule the callback to be run at the given time with the given arguments. Args: time: Time to run callback. If given as :py:class:`datetime.time` then use today as date. callback: Callable scheduled to run. args: Arguments for to call callback with.
def url_read(url, verbose=True): r if url.find('://') == -1: url = 'http://' + url if verbose: print('Reading data from url=%r' % (url,)) try: file_ = _urllib.request.urlopen(url) except IOError: raise data = file_.read() file_.close() return data
r""" Directly reads data from url
def remove_stale_sockets(self): if self.opts.max_idle_time_seconds is not None: with self.lock: while (self.sockets and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds): sock_info = self.sockets.pop() sock_info.close() while True: with self.lock: if (len(self.sockets) + self.active_sockets >= self.opts.min_pool_size): break if not self._socket_semaphore.acquire(False): break try: sock_info = self.connect() with self.lock: self.sockets.appendleft(sock_info) finally: self._socket_semaphore.release()
Removes stale sockets then adds new ones if pool is too small.
def reset(self): self._elapsed = datetime.timedelta() self._delta = datetime.timedelta() self._starttime = datetime.datetime.now() self.refresh()
Stops the timer and resets its values to 0.
def seek(self, offset, whence=os.SEEK_SET): pos = None if whence == os.SEEK_SET: pos = self.offset + offset elif whence == os.SEEK_CUR: pos = self.tell() + offset elif whence == os.SEEK_END: pos = self.offset + self.len + offset else: raise ValueError("invalid whence {}".format(whence)) if pos > self.offset + self.len or pos < self.offset: raise ValueError("seek position beyond chunk area") self.parent_fd.seek(pos, os.SEEK_SET)
Seek to position in stream, see file.seek
def javadoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): has_explicit_title, title, target = split_explicit_title(text) title = utils.unescape(title) target = utils.unescape(target) if not has_explicit_title: target = target.lstrip('~') if title[0] == '~': title = title[1:].rpartition('.')[2] app = inliner.document.settings.env.app ref = get_javadoc_ref(app, rawtext, target) if not ref: raise ValueError("no Javadoc source found for %s in javadoc_url_map" % (target,)) ref.append(nodes.Text(title, title)) return [ref], []
Role for linking to external Javadoc
def _get_graph(graph, filename): try: rendered = graph.rendered_file except AttributeError: try: graph.render(os.path.join(server.tmpdir, filename), format='png') rendered = filename except OSError: rendered = None graph.rendered_file = rendered return rendered
Retrieve or render a graph.
def read(self, size=None): if size is None or size < 0: raise exceptions.NotYetImplementedError( 'Illegal read of size %s requested on BufferedStream. ' 'Wrapped stream %s is at position %s-%s, ' '%s bytes remaining.' % (size, self.__stream, self.__start_pos, self.__end_pos, self._bytes_remaining)) data = '' if self._bytes_remaining: size = min(size, self._bytes_remaining) data = self.__buffered_data[ self.__buffer_pos:self.__buffer_pos + size] self.__buffer_pos += size return data
Reads from the buffer.
def get_file(self, filename): log.debug('[%s]: reading: //%s/%s', self.name, self.name, filename) try: blob = self.repo.head.commit.tree/filename return blob.data_stream except KeyError as err: raise GitError(err)
Get a file from the repo. Returns a file-like stream with the data.
def is_port_default(self): if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS: return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port
Return whether the URL is using the default port.
def jsonload(model, fp): dumped_list = json.load(fp) for link in dumped_list: if len(link) == 2: sid, (s, p, o, a) = link elif len(link) == 4: (s, p, o, a) = link tt = a.get('@target-type') if tt == '@iri-ref': o = I(o) a.pop('@target-type', None) else: continue model.add(s, p, o, a) return
Load Versa model dumped into JSON form, either raw or canonical