code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _config_win32_search(self, search): search = str(search) split_char = self._determine_split_char(search) search_list = search.split(split_char) for s in search_list: if not s in self.search: self.search.add(dns.name.from_text(s))
Configure a Search registry entry.
def rewrite_url(self, url_info: URLInfo) -> URLInfo: if self._url_rewriter: return self._url_rewriter.rewrite(url_info) else: return url_info
Return a rewritten URL such as escaped fragment.
def remove_identity(cls, sh_db, ident_id): success = False try: api.delete_identity(sh_db, ident_id) logger.debug("Identity %s deleted", ident_id) success = True except Exception as e: logger.debug("Identity not deleted due to %s", str(e)) ...
Delete an identity from SortingHat. :param sh_db: SortingHat database :param ident_id: identity identifier
def _cleanup(self, domains): for option in domains.values(): try: os.remove(option['pot']) except (IOError, OSError): pass
Remove the temporary '.pot' files that were created for the domains.
def set_rating(self, grade_id): if self.get_rating_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(grade_id): raise errors.InvalidArgument() self._my_map['ratingId'] = str(grade_id)
Sets the rating. arg: grade_id (osid.id.Id): the new rating raise: InvalidArgument - ``grade_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def list_user_page_views(self, user_id, end_time=None, start_time=None): path = {} data = {} params = {} path["user_id"] = user_id if start_time is not None: params["start_time"] = start_time if end_time is not None: params["end_time"] = en...
List user page views. Return the user's page view history in json format, similar to the available CSV download. Pagination is used as described in API basics section. Page views are returned in descending order, newest to oldest.
def get_ngroups(self, field=None): field = field if field else self._determine_group_field(field) if 'ngroups' in self.data['grouped'][field]: return self.data['grouped'][field]['ngroups'] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for.
def get(cls, id): if CACHE: if id in _cache['Pool']: log.debug('cache hit for pool %d' % id) return _cache['Pool'][id] log.debug('cache miss for pool %d' % id) try: pool = Pool.list({'id': id})[0] except (IndexError, KeyError): ...
Get the pool with id 'id'.
def _request(self, resource, rtype, action=None, payload=None, offset=None, limit=None, requestId=None, is_crud=False): end = self.__end if end.is_set(): raise LinkShutdownException('Client stopped') rng = None if offset is not None and limit is not None: ...
_request amqp queue publish helper return: RequestEvent object or None for failed to publish
def response_builder(self, response): try: r = response.json() result = r['query']['results'] response = { 'num_result': r['query']['count'] , 'result': result } except (Exception,) as e: print(e) ret...
Try to return a pretty formatted response object
def particle_covariance_mtx(weights,locations): warnings.warn('particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution', DeprecationWarning) mu = particle_meanfn(weights, locations) xs = locations.transpose([1, 0]) ws = weights cov = ( np.einsum...
Returns an estimate of the covariance of a distribution represented by a given set of SMC particle. :param weights: An array containing the weights of each particle. :param location: An array containing the locations of each particle. :rtype: :class:`numpy.ndarray`, shape ``(n_m...
def find_project_config_file(project_root: str) -> str: if project_root: project_config_file = os.path.join(project_root, YCONFIG_FILE) if os.path.isfile(project_config_file): return project_config_file
Return absolute path to project-specific config file, if it exists. :param project_root: Absolute path to project root directory. A project config file is a file named `YCONFIG_FILE` found at the top level of the project root dir. Return `None` if project root dir is not specified, or if no such ...
def add_to(self, parent, additions): "Modify parent to include all elements in additions" for x in additions: if x not in parent: parent.append(x) self.changed()
Modify parent to include all elements in additions
def _write(self, item, labels, features): data = Data([item], [labels], [features]) self._writer.write(data, self.groupname, append=True)
Writes the given item to the owned file.
def semantic_version(tag): try: version = list(map(int, tag.split('.'))) assert len(version) == 3 return tuple(version) except Exception as exc: raise CommandError( 'Could not parse "%s", please use ' 'MAJOR.MINOR.PATCH' % tag ) from exc
Get a valid semantic version for tag
def status(self, value): if self._bug.get('id', None): if value in VALID_STATUS: self._bug['status'] = value else: raise BugException("Invalid status type was used") else: raise BugException("Can not set status unless there is a bug id....
Property for getting or setting the bug status >>> bug.status = "REOPENED"
def check_ts_data_with_ts_target(X, y=None): if y is not None: Nx = len(X) Ny = len(y) if Nx != Ny: raise ValueError("Number of time series different in X (%d) and y (%d)" % (Nx, Ny)) Xt, _ = get_ts_data_parts(X) Ntx = np.array([len(Xt...
Checks time series data with time series target is good. If not raises value error. Parameters ---------- X : array-like, shape [n_series, ...] Time series data and (optionally) contextual data y : array-like, shape [n_series, ...] target data
def genCaCert(self, name, signas=None, outp=None, save=True): pkey, cert = self._genBasePkeyCert(name) ext0 = crypto.X509Extension(b'basicConstraints', False, b'CA:TRUE') cert.add_extensions([ext0]) if signas is not None: self.signCertAs(cert, signas) else: ...
Generates a CA keypair. Args: name (str): The name of the CA keypair. signas (str): The CA keypair to sign the new CA with. outp (synapse.lib.output.Output): The output buffer. Examples: Make a CA named "myca": mycakey, mycacert = cdir.g...
def from_clauses(self, clauses): self.clauses = copy.deepcopy(clauses) for cl in self.clauses: self.nv = max([abs(l) for l in cl] + [self.nv])
This methods copies a list of clauses into a CNF object. :param clauses: a list of clauses. :type clauses: list(list(int)) Example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [1, -2], [...
def link_callback(uri, rel): sUrl = settings.STATIC_URL sRoot = settings.STATIC_ROOT mUrl = settings.MEDIA_URL mRoot = settings.MEDIA_ROOT if uri.startswith(mUrl): path = os.path.join(mRoot, uri.replace(mUrl, "")) elif uri.startswith(sUrl): path = os.path.join(sRoot, uri.replace(...
Convert HTML URIs to absolute system paths so xhtml2pdf can access those resources
def dict_of_lists(self): result = {} for key, value in self._items: if key in result: result[key].append(value) else: result[key] = [value] return result
Returns a dictionary where each key is associated with a list of values.
def is_known_scalar(value): def _is_datetime_or_timedelta(value): return pd.Series(value).dtype.kind in ('M', 'm') return not np.iterable(value) and (isinstance(value, numbers.Number) or _is_datetime_or_timedelta(value))
Return True if value is a type we expect in a dataframe
def register(cls, name, type_): if not issubclass(type_, Entry): raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_)) cls._registry[name.lower()] = type_
Register a new type for an entry-type. The 2nd argument has to be a subclass of structures.Entry.
def _process(self, metric): if not self.enabled: return try: try: self.lock.acquire() self.process(metric) except Exception: self.log.error(traceback.format_exc()) finally: if self.lock.locked(): ...
Decorator for processing handlers with a lock, catching exceptions
def get_privkey(self, address: AddressHex, password: str) -> PrivateKey: address = add_0x_prefix(address).lower() if not self.address_in_keystore(address): raise ValueError('Keystore file not found for %s' % address) with open(self.accounts[address]) as data_file: data = ...
Find the keystore file for an account, unlock it and get the private key Args: address: The Ethereum address for which to find the keyfile in the system password: Mostly for testing purposes. A password can be provided as the function argument here. If it's no...
def create_language_model(self, name, base_model_name, dialect=None, description=None, **kwargs): if name is None: raise ValueError('name must be prov...
Create a custom language model. Creates a new custom language model for a specified base model. The custom language model can be used only with the base model for which it is created. The model is owned by the instance of the service whose credentials are used to create it. **Se...
def _check_cygwin_installed(cyg_arch='x86_64'): path_to_cygcheck = os.sep.join(['C:', _get_cyg_dir(cyg_arch), 'bin', 'cygcheck.exe']) LOG.debug('Path to cygcheck.exe: %s', path_to_cygcheck) if not os.path.exists(path_to_cygcheck): ...
Return True or False if cygwin is installed. Use the cygcheck executable to check install. It is installed as part of the base package, and we use it to check packages
def load_data(directory, num): root = os.path.abspath(os.path.dirname(__file__)) def get_path(i): return os.path.join(root, 'data', directory, str(i) + '.npy') return [np.load(get_path(i)) for i in range(num)]
Load numpy data from the data directory. The files should stored in ``../data/<dir>`` and named ``0.npy, 1.npy, ... <num - 1>.npy``. Returns: list: A list of loaded data, such that ``list[i]`` contains the the contents of ``i.npy``.
def compute_update_ratio(weight_tensors, before_weights, after_weights): deltas = [after - before for after, before in zip(after_weights, before_weights)] delta_norms = [np.linalg.norm(d.ravel()) for d in deltas] weight_norms = [np.linalg.norm(w.ravel()) for w in before_weights] ratios = [...
Compute the ratio of gradient norm to weight norm.
def collection_list(self, resource_id, resource_type="collection"): def fetch_children(children): results = [] for child in children: results.append(child["slug"]) if "children" in child: results.extend(fetch_children(child["children"])...
Fetches a list of slug representing descriptions within the specified parent description. :param resource_id str: The slug of the description to fetch children from. :param resource_type str: no-op; not required or used in this implementation. :return: A list of strings representing the slugs ...
def same_page(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_from_span(_to_span(c[i])).page == bbox_from_span(_to_span(c[0])).page for i in range(len(c)) ] )
Return true if all the components of c are on the same page of the document. Page numbers are based on the PDF rendering of the document. If a PDF file is provided, it is used. Otherwise, if only a HTML/XML document is provided, a PDF is created and then used to determine the page number of a Mention. ...
def expose_event(self, widget, event): x, y, width, height = event.area self.logger.debug("surface is %s" % self.surface) if self.surface is not None: win = widget.get_window() cr = win.cairo_create() cr.rectangle(x, y, width, height) cr.clip() ...
When an area of the window is exposed, we just copy out of the server-side, off-screen surface to that area.
def _urls_for_js(urls=None): if urls is None: from .urls import urlpatterns urls = [url.name for url in urlpatterns if getattr(url, 'name', None)] urls = dict(zip(urls, [get_uri_template(url) for url in urls])) urls.update(getattr(settings, 'LEAFLET_STORAGE_EXTRA_URLS', {})) return urls
Return templated URLs prepared for javascript.
def _parse_hunk_line(self, line): components = line.split('@@') if len(components) >= 2: hunk_info = components[1] groups = self.HUNK_LINE_RE.findall(hunk_info) if len(groups) == 1: try: return int(groups[0]) except ...
Given a hunk line in `git diff` output, return the line number at the start of the hunk. A hunk is a segment of code that contains changes. The format of the hunk line is: @@ -k,l +n,m @@ TEXT where `k,l` represent the start line and length before the changes and ...
def query_array(ncfile, name) -> numpy.ndarray: variable = query_variable(ncfile, name) maskedarray = variable[:] fillvalue_ = getattr(variable, '_FillValue', numpy.nan) if not numpy.isnan(fillvalue_): maskedarray[maskedarray.mask] = numpy.nan return maskedarray.data
Return the data of the variable with the given name from the given NetCDF file. The following example shows that |query_array| returns |nan| entries to represent missing values even when the respective NetCDF variable defines a different fill value: >>> from hydpy import TestIO >>> from hydpy....
def nodes(self, t=None, data=False): return list(self.nodes_iter(t=t, data=data))
Return a list of the nodes in the graph at a given snapshot. Parameters ---------- t : snapshot id (default=None) If None the the method returns all the nodes of the flattened graph. data : boolean, optional (default=False) If False return a list of nodes. If...
def get_root_gradebook_ids(self): if self._catalog_session is not None: return self._catalog_session.get_root_catalog_ids() return self._hierarchy_session.get_roots()
Gets the root gradebook ``Ids`` in this hierarchy. return: (osid.id.IdList) - the root gradebook ``Ids`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def get_elements(self, filter_cls, elem_id=None): result = [] if elem_id is not None: try: result = [self._class_collection_map[filter_cls][elem_id]] except KeyError: result = [] else: for e in self._class_collection_map[filter_...
Get a list of elements from the result and filter the element type by a class. :param filter_cls: :param elem_id: ID of the object :type elem_id: Integer :return: List of available elements :rtype: List
def _setup_converter_graph(self, converter_list, prune_converters): for converter in converter_list: if prune_converters: try: converter.configure() except ConverterUnavailable as e: log.warning('%s unavailable: %s' % ...
Set up directed conversion graph, pruning unavailable converters as necessary
def get_value(self): with self._lock: if self._future_value is not None: return { key: value[:] for key, value in self._future_value.items() } return None
Retrieves the value to inject in the component :return: The value to inject
def get_news(self): headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/login.phtml',"User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/team_news.phtml',headers=headers).content soup = BeautifulSoup(req) ...
Get all the news from first page
def get_instance(self, payload): return AssistantFallbackActionsInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], )
Build an instance of AssistantFallbackActionsInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.As...
def total_vat(self): q = Vat.objects.filter(receipt=self).aggregate(total=Sum('amount')) return q['total'] or 0
Returns the sum of all Vat objects.
def astensor(array: TensorLike) -> BKTensor: tensor = tf.convert_to_tensor(array, dtype=CTYPE) if DEVICE == 'gpu': tensor = tensor.gpu() N = int(math.log2(size(tensor))) tensor = tf.reshape(tensor, ([2]*N)) return tensor
Convert to product tensor
def ListFiles(self, ext_attrs=False): if not self.IsDirectory(): raise IOError("%s is not a directory." % self.path) for path in self.files: try: filepath = utils.JoinPath(self.path, path) response = self._Stat(filepath, ext_attrs=ext_attrs) pathspec = self.pathspec.Copy() ...
List all files in the dir.
def _valid_directory(self, path): abspath = os.path.abspath(path) if not os.path.isdir(abspath): raise argparse.ArgumentTypeError('Not a valid directory: {}'.format(abspath)) return abspath
Ensure that the given path is valid. :param str path: A valid directory path. :raises: :py:class:`argparse.ArgumentTypeError` :returns: An absolute directory path.
def schema_remove(dbname, name, user=None, db_user=None, db_password=None, db_host=None, db_port=None): if not schema_exists(dbname, name, user=None, db_user=db_user, db_password=db_password, db_host=db_host, db_...
Removes a schema from the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.schema_remove dbname schemaname dbname Database name we work on schemaname The schema's name we'll remove user System user all operations should be performed on behalf...
def _include_environment_variables(self, program, executor_vars): env_vars = { 'RESOLWE_HOST_URL': self.settings_actual.get('RESOLWE_HOST_URL', 'localhost'), } set_env = self.settings_actual.get('FLOW_EXECUTOR', {}).get('SET_ENV', {}) env_vars.update(executor_vars) en...
Define environment variables.
def user_list(database=None, user=None, password=None, host=None, port=None): client = _client(user=user, password=password, host=host, port=port) if not database: return client.get_list_cluster_admins() client.switch_database(database) return client.get_list_users()
List cluster admins or database users. If a database is specified: it will return database users list. If a database is not specified: it will return cluster admins list. database The database to list the users from user The user to connect as password The password of the...
def _exchange_refresh_tokens(self): 'Exchanges a refresh token for an access token' if self.token_cache is not None and 'refresh' in self.token_cache: refresh_form = { 'grant_type': 'refresh_token', 'refresh_token': self.token_cache['refresh'], ...
Exchanges a refresh token for an access token
def _parse_domain_id(self, config): match = re.search(r'domain-id (.+)$', config) value = match.group(1) if match else None return dict(domain_id=value)
Scans the config block and parses the domain-id value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict
def clean_new(self, value): value = self.schema_class(value).full_clean() return self.object_class(**value)
Return a new object instantiated with cleaned data.
def create_user_deliveryserver(self, domainid, data): return self.api_call( ENDPOINTS['userdeliveryservers']['new'], dict(domainid=domainid), body=data)
Create a user delivery server
def rewind(self): self.__data = deque() self.__id = None self.__address = None self.__retrieved = 0 self.__killed = False return self
Rewind this cursor to its unevaluated state. Reset this cursor if it has been partially or completely evaluated. Any options that are present on the cursor will remain in effect. Future iterating performed on this cursor will cause new queries to be sent to the server, even if the resul...
def swap(self, a, b): self.mem[a], self.mem[b] = self.mem[b], self.mem[a] self.asm[a], self.asm[b] = self.asm[b], self.asm[a]
Swaps mem positions a and b
def pformat(tree): if tree.empty(): return '' buf = six.StringIO() for line in _pformat(tree.root, 0): buf.write(line + "\n") return buf.getvalue().strip()
Recursively formats a tree into a nice string representation. Example Input: yahoo = tt.Tree(tt.Node("CEO")) yahoo.root.add(tt.Node("Infra")) yahoo.root[0].add(tt.Node("Boss")) yahoo.root[0][0].add(tt.Node("Me")) yahoo.root.add(tt.Node("Mobile")) yahoo.root.add(tt.Node("Mail")) E...
def create_server(cloud, **kwargs): if cloud == 'ec2': _create_server_ec2(**kwargs) elif cloud == 'rackspace': _create_server_rackspace(**kwargs) elif cloud == 'gce': _create_server_gce(**kwargs) else: raise ValueError("Unknown cloud type: {}".format(cloud))
Create a new instance
def wait_for_jobs(self, job_ids, timeout, delay): if self.skip: return logger.debug("Waiting up to %d sec for completion of the job IDs %s", timeout, job_ids) remaining_job_ids = set(job_ids) found_jobs = [] countdown = timeout while countdown > 0: ...
Waits until the jobs appears in the completed job queue.
def S_star(u, dfs_data): s_u = S(u, dfs_data) if u not in s_u: s_u.append(u) return s_u
The set of all descendants of u, with u added.
def _gen_full_path(self, filename, file_system=None): if file_system is None: return '{}/{}'.format(self.dest_file_system, filename) else: if ":" not in file_system: raise ValueError("Invalid file_system specified: {}".format(file_system)) return '{}/{...
Generate full file path on remote device.
def setUser(self, *args, **kwargs): try: user = self.mambuuserclass(entid=self['assignedUserKey'], *args, **kwargs) except KeyError as kerr: err = MambuError("La cuenta %s no tiene asignado un usuario" % self['id']) err.noUser = True raise err exce...
Adds the user for this loan to a 'user' field. User is a MambuUser object. Returns the number of requests done to Mambu.
def get_xpath(stmt, qualified=False, prefix_to_module=False): return mk_path_str(stmt, with_prefixes=qualified, prefix_onchange=True, prefix_to_module=prefix_to_module)
Gets the XPath of the statement. Unless qualified=True, does not include prefixes unless the prefix changes mid-XPath. qualified will add a prefix to each node. prefix_to_module will resolve prefixes to module names instead. For RFC 8040, set prefix_to_module=True: /prefix:root/node/prefi...
def clean_markup(self, markup, parser=None): result_type = type(markup) if isinstance(markup, six.string_types): doc = fromstring(markup, parser=parser) else: doc = copy.deepcopy(markup) self(doc) if issubclass(result_type, six.binary_type): re...
Apply ``Cleaner`` to markup string or document and return a cleaned string or document.
def _read_provenance_from_xml(self, root): path = self._special_properties['provenance'] provenance = root.find(path, XML_NS) for step in provenance.iter('provenance_step'): title = step.find('title').text description = step.find('description').text timestamp ...
read metadata provenance from xml. :param root: container in which we search :type root: ElementTree.Element
def empty_tree(input_list): for item in input_list: if not isinstance(item, list) or not empty_tree(item): return False return True
Recursively iterate through values in nested lists.
def stp(br=None, state='disable', iface=None): kernel = __grains__['kernel'] if kernel == 'Linux': states = {'enable': 'on', 'disable': 'off'} return _os_dispatch('stp', br, states[state]) elif kernel in SUPPORTED_BSD_LIKE: states = {'enable': 'stp', 'disable': '-stp'} return...
Sets Spanning Tree Protocol state for a bridge CLI Example: .. code-block:: bash salt '*' bridge.stp br0 enable salt '*' bridge.stp br0 disable For BSD-like operating systems, it is required to add the interface on which to enable the STP. CLI Example: .. code-block:: bash ...
def getCanonicalID(iname, xrd_tree): xrd_list = xrd_tree.findall(xrd_tag) xrd_list.reverse() try: canonicalID = xri.XRI(xrd_list[0].findall(canonicalID_tag)[0].text) except IndexError: return None childID = canonicalID.lower() for xrd in xrd_list[1:]: parent_sought = chil...
Return the CanonicalID from this XRDS document. @param iname: the XRI being resolved. @type iname: unicode @param xrd_tree: The XRDS output from the resolver. @type xrd_tree: ElementTree @returns: The XRI CanonicalID or None. @returntype: unicode or None
def _build_ds_from_instruction(instruction, ds_from_file_fn): examples_ds = ds_from_file_fn(instruction["filepath"]) mask_ds = _build_mask_ds( mask_offset=instruction["mask_offset"], mask=instruction["mask"], ) ds = tf.data.Dataset.zip((examples_ds, mask_ds)) ds = ds.filter(lambda example, mask: m...
Map an instruction to a real datasets for one particular shard. Args: instruction: A `dict` of `tf.Tensor` containing the instruction to load the particular shard (filename, mask,...) ds_from_file_fn: `fct`, function which returns the dataset associated to the filename Returns: dataset: `t...
def main(): args = parse_arguments() pid = args.pid title = get_programme_title(pid) broadcast_date = get_broadcast_date(pid) listing = extract_listing(pid) filename = get_output_filename(args) tracklisting = generate_output(listing, title, broadcast_date) output_to_file(filename, trackl...
Get a tracklisting, write to audio file or text.
def copy_file_links(self, src): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized') if src.dr_entries.px_record is None: if src.ce_entries.px_record is None: raise pycdlibexception.PyCdlibInvalidInput('...
Copy the number of file links from the source Rock Ridge entry into this Rock Ridge entry. Parameters: src - The source Rock Ridge entry to copy from. Returns: Nothing.
def __fill_buffer(self, size=0): read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE) self.__buffer = fetch_data(self.__blob_key, self.__position, self.__position + read_size - 1) self.__buffer_position = 0 self.__eof = len(self.__buffer) < read_size
Fills the internal buffer. Args: size: Number of bytes to read. Will be clamped to [self.__buffer_size, MAX_BLOB_FETCH_SIZE].
def _cast_to_type(self, value): if value in (True, False): return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False self.fail('invalid', value=value)
Convert the value to a boolean and raise error on failures
def _parse_config(self, requires_cfg=True): if len(self.config_paths) > 0: try: self._find_config() except BisonError: if not requires_cfg: return raise try: with open(self.config_file, 'r') a...
Parse the configuration file, if one is configured, and add it to the `Bison` state. Args: requires_cfg (bool): Specify whether or not parsing should fail if a config file is not found. (default: True)
def op_paths(self, path_prefix=None): url_path = self.path if path_prefix: url_path = path_prefix + url_path yield url_path, self
Yield operations paths stored in containers.
def BoolEncoder(field_number, is_repeated, is_packed): false_byte = b'\x00' true_byte = b'\x01' if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarin...
Returns an encoder for a boolean field.
def get_python(self): if self.multiselect: return super(MultiSelectField, self).get_python() return self._get()
Only return cursor instance if configured for multiselect
def update(self, sequence=None, **mapping): if sequence is not None: if isinstance(sequence, dict): for slot in sequence: self[slot] = sequence[slot] else: for slot, value in sequence: self[slot] = value if m...
Add multiple elements to the fact.
def OnUpdateFigurePanel(self, event): if self.updating: return self.updating = True self.figure_panel.update(self.get_figure(self.code)) self.updating = False
Redraw event handler for the figure panel
def triplify(binding): triples = [] if binding.data is None: return None, triples if binding.is_object: return triplify_object(binding) elif binding.is_array: for item in binding.items: _, item_triples = triplify(item) triples.extend(item_triples) ...
Recursively generate RDF statement triples from the data and schema supplied to the application.
def plug(self): if self.__plugged: return for _, method in inspect.getmembers(self, predicate=inspect.ismethod): if hasattr(method, '_callback_messages'): for message in method._callback_messages: global_callbacks[message].add(method) s...
Add the actor's methods to the callback registry.
def validate_create_package(package_format, owner, repo, **kwargs): client = get_packages_api() with catch_raise_api_exception(): check = getattr( client, "packages_validate_upload_%s_with_http_info" % package_format ) _, _, headers = check( owner=owner, repo=repo...
Validate parameters for creating a package.
def main(args): ui = getUI(args) if ui.optionIsSet("test"): unittest.main(argv=[sys.argv[0]]) elif ui.optionIsSet("help"): ui.usage() else: verbose = ui.optionIsSet("verbose") stranded = ui.optionIsSet("stranded") if stranded: sys.stderr.write("Sorry, stranded mode hasn't been implemen...
main entry point for the GenomicIntJaccard script. :param args: the arguments for this script, as a list of string. Should already have had things like the script name stripped. That is, if there are no args provided, this should be an empty list.
def get_assignable_objective_bank_ids(self, objective_bank_id): mgr = self._get_provider_manager('LEARNING', local=True) lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy) objective_banks = lookup_session.get_objective_banks() id_list = [] for objective_ban...
Gets a list of objective banks including and under the given objective bank node in which any objective can be assigned. arg: objective_bank_id (osid.id.Id): the ``Id`` of the ``ObjectiveBank`` return: (osid.id.IdList) - list of assignable objective bank ``Ids`` ...
def do_types_conflict(type1: GraphQLOutputType, type2: GraphQLOutputType) -> bool: if is_list_type(type1): return ( do_types_conflict( cast(GraphQLList, type1).of_type, cast(GraphQLList, type2).of_type ) if is_list_type(type2) else True ...
Check whether two types conflict Two types conflict if both types could not apply to a value simultaneously. Composite types are ignored as their individual field types will be compared later recursively. However List and Non-Null types must match.
def plan(self): for invoiceitem in self.invoiceitems.all(): if invoiceitem.plan: return invoiceitem.plan if self.subscription: return self.subscription.plan
Gets the associated plan for this invoice. In order to provide a consistent view of invoices, the plan object should be taken from the first invoice item that has one, rather than using the plan associated with the subscription. Subscriptions (and their associated plan) are updated by the customer and repre...
def rotate_concurrent(self, *locations, **kw): timer = Timer() pool = CommandPool(concurrency=10) logger.info("Scanning %s ..", pluralize(len(locations), "backup location")) for location in locations: for cmd in self.rotate_backups(location, prepare=True, **kw): ...
Rotate the backups in the given locations concurrently. :param locations: One or more values accepted by :func:`coerce_location()`. :param kw: Any keyword arguments are passed on to :func:`rotate_backups()`. This function uses :func:`rotate_backups()` to prepare rotation commands for t...
def join(self): for thread in self.worker_threads: thread.join() WorkerThread.join(self)
Joins the coordinator thread and all worker threads.
def track(cls, obj, ptr): cls._objects.add(cls(obj, ptr))
Track an object which needs destruction when it is garbage collected.
def properties_for(self, index): return vectorize(lambda i: [prop for prop in self.properties() if i in self[prop]], otypes=[list])(index)
Returns a list of properties, such that each entry in the list corresponds to the element of the index given. Example: let properties: 'one':[1,2,3,4], 'two':[3,5,6] >>> properties_for([2,3,5]) [['one'], ['one', 'two'], ['two']]
def schedule( time: Union[datetime.time, datetime.datetime], callback: Callable, *args): dt = _fillDate(time) now = datetime.datetime.now(dt.tzinfo) delay = (dt - now).total_seconds() loop = asyncio.get_event_loop() loop.call_later(delay, callback, *args)
Schedule the callback to be run at the given time with the given arguments. Args: time: Time to run callback. If given as :py:class:`datetime.time` then use today as date. callback: Callable scheduled to run. args: Arguments for to call callback with.
def url_read(url, verbose=True): r if url.find('://') == -1: url = 'http://' + url if verbose: print('Reading data from url=%r' % (url,)) try: file_ = _urllib.request.urlopen(url) except IOError: raise data = file_.read() file_.close() return data
r""" Directly reads data from url
def remove_stale_sockets(self): if self.opts.max_idle_time_seconds is not None: with self.lock: while (self.sockets and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds): sock_info = self.sockets.pop() ...
Removes stale sockets then adds new ones if pool is too small.
def reset(self): self._elapsed = datetime.timedelta() self._delta = datetime.timedelta() self._starttime = datetime.datetime.now() self.refresh()
Stops the timer and resets its values to 0.
def seek(self, offset, whence=os.SEEK_SET): pos = None if whence == os.SEEK_SET: pos = self.offset + offset elif whence == os.SEEK_CUR: pos = self.tell() + offset elif whence == os.SEEK_END: pos = self.offset + self.len + offset else: ...
Seek to position in stream, see file.seek
def javadoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): has_explicit_title, title, target = split_explicit_title(text) title = utils.unescape(title) target = utils.unescape(target) if not has_explicit_title: target = target.lstrip('~') if title[0] == '~': ...
Role for linking to external Javadoc
def _get_graph(graph, filename): try: rendered = graph.rendered_file except AttributeError: try: graph.render(os.path.join(server.tmpdir, filename), format='png') rendered = filename except OSError: rendered = None graph.rendered_file = rendered ...
Retrieve or render a graph.
def read(self, size=None): if size is None or size < 0: raise exceptions.NotYetImplementedError( 'Illegal read of size %s requested on BufferedStream. ' 'Wrapped stream %s is at position %s-%s, ' '%s bytes remaining.' % (size, self.__st...
Reads from the buffer.
def get_file(self, filename): log.debug('[%s]: reading: //%s/%s', self.name, self.name, filename) try: blob = self.repo.head.commit.tree/filename return blob.data_stream except KeyError as err: raise GitError(err)
Get a file from the repo. Returns a file-like stream with the data.
def is_port_default(self): if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS: return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port
Return whether the URL is using the default port.
def jsonload(model, fp): dumped_list = json.load(fp) for link in dumped_list: if len(link) == 2: sid, (s, p, o, a) = link elif len(link) == 4: (s, p, o, a) = link tt = a.get('@target-type') if tt == '@iri-ref': o = I(o) ...
Load Versa model dumped into JSON form, either raw or canonical