code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_config_section(self, name): """ Get a section of a configuration """ if self.config.has_section(name): return self.config.items(name) return []
Get a section of a configuration
def _sleep(current_sleep, max_sleep=_MAX_SLEEP, multiplier=_MULTIPLIER): """Sleep and produce a new sleep time. .. _Exponential Backoff And Jitter: https://www.awsarchitectureblog.com/\ 2015/03/backoff.html Select a duration between zero and ``current_sleep``. It might seem counterintuitive to have so much jitter, but `Exponential Backoff And Jitter`_ argues that "full jitter" is the best strategy. Args: current_sleep (float): The current "max" for sleep interval. max_sleep (Optional[float]): Eventual "max" sleep time multiplier (Optional[float]): Multiplier for exponential backoff. Returns: float: Newly doubled ``current_sleep`` or ``max_sleep`` (whichever is smaller) """ actual_sleep = random.uniform(0.0, current_sleep) time.sleep(actual_sleep) return min(multiplier * current_sleep, max_sleep)
Sleep and produce a new sleep time. .. _Exponential Backoff And Jitter: https://www.awsarchitectureblog.com/\ 2015/03/backoff.html Select a duration between zero and ``current_sleep``. It might seem counterintuitive to have so much jitter, but `Exponential Backoff And Jitter`_ argues that "full jitter" is the best strategy. Args: current_sleep (float): The current "max" for sleep interval. max_sleep (Optional[float]): Eventual "max" sleep time multiplier (Optional[float]): Multiplier for exponential backoff. Returns: float: Newly doubled ``current_sleep`` or ``max_sleep`` (whichever is smaller)
def _max_width_formatter(string, cols, separator='\n'): """Returns a freshly formatted :param string: string to be formatted :type string: basestring or clint.textui.colored.ColoredString :param cols: max width the text to be formatted :type cols: int :param separator: separator to break rows :type separator: basestring """ is_color = isinstance(string, ColoredString) if is_color: string_copy = string._new('') string = string.s stack = tsplit(string, NEWLINES) for i, substring in enumerate(stack): stack[i] = substring.split() _stack = [] for row in stack: _row = ['',] _row_i = 0 for word in row: if (len(_row[_row_i]) + len(word)) <= cols: _row[_row_i] += word _row[_row_i] += ' ' elif len(word) > cols: # ensure empty row if len(_row[_row_i]): _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 chunks = schunk(word, cols) for i, chunk in enumerate(chunks): if not (i + 1) == len(chunks): _row[_row_i] += chunk _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 else: _row[_row_i] += chunk _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 _row[_row_i] += word _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row = map(str, _row) _stack.append(separator.join(_row)) _s = '\n'.join(_stack) if is_color: _s = string_copy._new(_s) return _s
Returns a freshly formatted :param string: string to be formatted :type string: basestring or clint.textui.colored.ColoredString :param cols: max width the text to be formatted :type cols: int :param separator: separator to break rows :type separator: basestring
def xoffset(self, value): """gets/sets the xoffset""" if self._xoffset != value and \ isinstance(value, (int, float, long)): self._xoffset = value
gets/sets the xoffset
def document_delete(index, doc_type, id, hosts=None, profile=None): ''' Delete a document from an index index Index name where the document resides doc_type Type of the document id Document identifier CLI example:: salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ ''' es = _get_instance(hosts, profile) try: return es.delete(index=index, doc_type=doc_type, id=id) except elasticsearch.exceptions.NotFoundError: return None except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot delete document {0} in index {1}, server returned code {2} with message {3}".format(id, index, e.status_code, e.error))
Delete a document from an index index Index name where the document resides doc_type Type of the document id Document identifier CLI example:: salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ
def unsafe_peek(init): """ Deserialize all the attributes available in the container and pass them in the same order as they come in the container. This is a factory function; returns the actual `peek` routine. Arguments: init: type constructor. Returns: callable: deserializer (`peek` routine). """ def peek(store, container, _stack=None): return init(*[ store.peek(attr, container, _stack=_stack) for attr in container ]) return peek
Deserialize all the attributes available in the container and pass them in the same order as they come in the container. This is a factory function; returns the actual `peek` routine. Arguments: init: type constructor. Returns: callable: deserializer (`peek` routine).
def set_distribute_verbatim(self, distribute_verbatim=None): """Sets the distribution rights. :param distribute_verbatim: right to distribute verbatim copies :type distribute_verbatim: ``boolean`` :raise: ``InvalidArgument`` -- ``distribute_verbatim`` is invalid :raise: ``NoAccess`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ if distribute_verbatim is None: raise NullArgument() metadata = Metadata(**settings.METADATA['distribute_verbatim']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(distribute_verbatim, metadata, array=False): self._my_map['canDistributeVerbatim'] = distribute_verbatim else: raise InvalidArgument()
Sets the distribution rights. :param distribute_verbatim: right to distribute verbatim copies :type distribute_verbatim: ``boolean`` :raise: ``InvalidArgument`` -- ``distribute_verbatim`` is invalid :raise: ``NoAccess`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
def decode (cls, bytes, cmddict=None): """Decodes sequence command attributes from an array of bytes and returns a new SeqCmdAttrs. """ byte = struct.unpack('B', bytes)[0] self = cls() defval = self.default for bit, name, value0, value1, default in SeqCmdAttrs.Table: mask = 1 << bit bitset = mask & byte defset = mask & defval if bitset != defset: if bitset: self.attrs[name] = value1 else: self.attrs[name] = value0 return self
Decodes sequence command attributes from an array of bytes and returns a new SeqCmdAttrs.
def make_shell_logfiles_url(host, shell_port, _, instance_id=None): """ Make the url for log-files in heron-shell from the info stored in stmgr. If no instance_id is provided, the link will be to the dir for the whole container. If shell port is not present, it returns None. """ if not shell_port: return None if not instance_id: return "http://%s:%d/browse/log-files" % (host, shell_port) else: return "http://%s:%d/file/log-files/%s.log.0" % (host, shell_port, instance_id)
Make the url for log-files in heron-shell from the info stored in stmgr. If no instance_id is provided, the link will be to the dir for the whole container. If shell port is not present, it returns None.
def get_space_information(self, space_key, expand=None, callback=None): """ Returns information about a space. :param space_key (string): A string containing the key of the space. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ params = {} if expand: params["expand"] = expand return self._service_get_request("rest/api/space/{key}".format(key=space_key), params=params, callback=callback)
Returns information about a space. :param space_key (string): A string containing the key of the space. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
def refresh(self): """ Security endpoint for the refresh token, so we can obtain a new token without forcing the user to login again --- post: responses: 200: description: Refresh Successful content: application/json: schema: type: object properties: refresh_token: type: string 401: $ref: '#/components/responses/401' 500: $ref: '#/components/responses/500' """ resp = { API_SECURITY_REFRESH_TOKEN_KEY: create_access_token( identity=get_jwt_identity(), fresh=False ) } return self.response(200, **resp)
Security endpoint for the refresh token, so we can obtain a new token without forcing the user to login again --- post: responses: 200: description: Refresh Successful content: application/json: schema: type: object properties: refresh_token: type: string 401: $ref: '#/components/responses/401' 500: $ref: '#/components/responses/500'
def parseprofile(profilelog, out): ''' Parse a profile log and print the result on screen ''' file = open(out, 'w') # opening the output file print('Opening the profile in %s...' % profilelog) p = pstats.Stats(profilelog, stream=file) # parsing the profile with pstats, and output everything to the file print('Generating the stats, please wait...') file.write("=== All stats:\n") p.strip_dirs().sort_stats(-1).print_stats() file.write("=== Cumulative time:\n") p.sort_stats('cumulative').print_stats(100) file.write("=== Time:\n") p.sort_stats('time').print_stats(100) file.write("=== Time + cumulative time:\n") p.sort_stats('time', 'cum').print_stats(.5, 'init') file.write("=== Callees:\n") p.print_callees() file.write("=== Callers:\n") p.print_callers() #p.print_callers(.5, 'init') #p.add('fooprof') file.close() print('Stats generated and saved to %s.' % out) print('Everything is done. Exiting')
Parse a profile log and print the result on screen
def write_summary_cnts_all(self): """Write summary of level and depth counts for all active GO Terms.""" cnts = self.get_cnts_levels_depths_recs(set(self.obo.values())) self._write_summary_cnts(cnts)
Write summary of level and depth counts for all active GO Terms.
def get_snippet(self, snippet_key = None): '''Get all/one specific snippet by its key Args: key snippet key (default: None i.e. ALL) return (status code, snippet dict or list thereof) ''' uri = '/'.join([ self.api_uri, self.snippets_suffix ]) if snippet_key: uri = '/'.join([ uri, snippet_key ]) code, data = self._req('get', uri) return code, data
Get all/one specific snippet by its key Args: key snippet key (default: None i.e. ALL) return (status code, snippet dict or list thereof)
def generic_find_constraint_name(table, columns, referenced, db): """Utility to find a constraint name in alembic migrations""" t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine) for fk in t.foreign_key_constraints: if fk.referred_table.name == referenced and set(fk.column_keys) == columns: return fk.name
Utility to find a constraint name in alembic migrations
def add_default_import(cls, module: str): """Add a gated default import to the default imports. In particular, we need to avoid importing 'basilisp.core' before we have finished macro-expanding.""" if module in cls.GATED_IMPORTS: cls.DEFAULT_IMPORTS.swap(lambda s: s.cons(sym.symbol(module)))
Add a gated default import to the default imports. In particular, we need to avoid importing 'basilisp.core' before we have finished macro-expanding.
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF): """ Blobs are stored in sequential nodes with increasing index values. most blobs, like scripts start at index 0, long names start at a specified offset. """ startkey = self.makekey(nodeid, tag, start) endkey = self.makekey(nodeid, tag, end) cur = self.btree.find('ge', startkey) data = b'' while cur.getkey() <= endkey: data += cur.getval() cur.next() return data
Blobs are stored in sequential nodes with increasing index values. most blobs, like scripts start at index 0, long names start at a specified offset.
def GetArtifactPathDependencies(rdf_artifact): """Return a set of knowledgebase path dependencies. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"] """ deps = set() for source in rdf_artifact.sources: for arg, value in iteritems(source.attributes): paths = [] if arg in ["path", "query"]: paths.append(value) if arg == "key_value_pairs": # This is a REGISTRY_VALUE {key:blah, value:blah} dict. paths.extend([x["key"] for x in value]) if arg in ["keys", "paths", "path_list", "content_regex_list"]: paths.extend(value) for path in paths: for match in artifact_utils.INTERPOLATED_REGEX.finditer(path): deps.add(match.group()[2:-2]) # Strip off %%. deps.update(GetArtifactParserDependencies(rdf_artifact)) return deps
Return a set of knowledgebase path dependencies. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"]
def set_permissions(self, object, replace=False): """ Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """ if isinstance(self.config.origin, S3Origin): if self.config.origin.origin_access_identity: id = self.config.origin.origin_access_identity.split('/')[-1] oai = self.connection.get_origin_access_identity_info(id) policy = object.get_acl() if replace: policy.acl = ACL() policy.acl.add_user_grant('READ', oai.s3_user_id) object.set_acl(policy) else: object.set_canned_acl('public-read')
Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity.
def _initURL(self, org_url=None, token_url=None, referer_url=None): """ sets proper URLs for AGOL """ if org_url is not None and org_url != '': if not org_url.startswith('http://') and not org_url.startswith('https://'): org_url = 'http://' + org_url self._org_url = org_url if not self._org_url.startswith('http://') and not self._org_url.startswith('https://'): self._org_url = 'http://' + self._org_url if self._org_url.lower().find('/sharing/rest') > -1: self._url = self._org_url else: self._url = self._org_url + "/sharing/rest" if self._url.startswith('http://'): self._surl = self._url.replace('http://', 'https://') else: self._surl = self._url if token_url is None: results = self._get(url= self._surl + '/info', param_dict={'f':'json'}, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'authInfo' in results and 'tokenServicesUrl' in results['authInfo']: self._token_url = results['authInfo']['tokenServicesUrl'] else: self._token_url = self._surl + '/generateToken' else: self._token_url = token_url parsed_url = urlparse(self._org_url) self._parsed_org_url = urlunparse((parsed_url[0],parsed_url[1],"","","","")) if referer_url is None: self._referer_url = parsed_url.netloc
sets proper URLs for AGOL
def count_year(year, **kwargs): ''' Lists occurrence counts by year :param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010`` :return: dict Usage:: from pygbif import occurrences occurrences.count_year(year = '1990,2000') ''' url = gbif_baseurl + 'occurrence/counts/year' out = gbif_GET(url, {'year': year}, **kwargs) return out
Lists occurrence counts by year :param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010`` :return: dict Usage:: from pygbif import occurrences occurrences.count_year(year = '1990,2000')
def handle_connection_repl(client): """ Handles connection. """ client.settimeout(None) # # disable this till we have evidence that it's needed # client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0) # # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html backup = [] old_interval = getinterval() patches = [('r', ('stdin', '__stdin__')), ('w', ('stdout', '__stdout__'))] if _MANHOLE.redirect_stderr: patches.append(('w', ('stderr', '__stderr__'))) try: client_fd = client.fileno() for mode, names in patches: for name in names: backup.append((name, getattr(sys, name))) setattr(sys, name, _ORIGINAL_FDOPEN(client_fd, mode, 1 if PY3 else 0)) try: handle_repl(_MANHOLE.locals) except Exception as exc: _LOG("REPL failed with %r." % exc) _LOG("DONE.") finally: try: # Change the switch/check interval to something ridiculous. We don't want to have other thread try # to write to the redirected sys.__std*/sys.std* - it would fail horribly. setinterval(2147483647) try: client.close() # close before it's too late. it may already be dead except IOError: pass junk = [] # keep the old file objects alive for a bit for name, fh in backup: junk.append(getattr(sys, name)) setattr(sys, name, fh) del backup for fh in junk: try: if hasattr(fh, 'detach'): fh.detach() else: fh.close() except IOError: pass del fh del junk finally: setinterval(old_interval) _LOG("Cleaned up.")
Handles connection.
def encode(data, checksum=True): """Convert binary to base58 using BASE58_ALPHABET.""" if checksum: data = data + utils.hash256(data)[:4] v, prefix = to_long(256, lambda x: x, iter(data)) data = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v]) return data.decode("utf8")
Convert binary to base58 using BASE58_ALPHABET.
def enrich_variants(graph: BELGraph, func: Union[None, str, Iterable[str]] = None): """Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene. """ if func is None: func = {PROTEIN, RNA, MIRNA, GENE} nodes = list(get_nodes_by_function(graph, func)) for u in nodes: parent = u.get_parent() if parent is None: continue if parent not in graph: graph.add_has_variant(parent, u)
Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene.
def json2py(json_obj): """ Converts the inputted JSON object to a python value. :param json_obj | <variant> """ for key, value in json_obj.items(): if type(value) not in (str, unicode): continue # restore a datetime if re.match('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d+$', value): value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S:%f') elif re.match('^\d{4}-\d{2}-\d{2}$', value): year, month, day = map(int, value.split('-')) value = datetime.date(year, month, day) elif re.match('^\d{2}:\d{2}:\d{2}:\d+$', value): hour, minute, second, micro = map(int, value.split(':')) value = datetime.time(hour, minute, second, micro) else: found = False for decoder in _decoders: success, new_value = decoder(value) if success: value = new_value found = True break if not found: continue json_obj[key] = value return json_obj
Converts the inputted JSON object to a python value. :param json_obj | <variant>
def parse_class_names(args): """ parse # classes and class_names if applicable """ num_class = args.num_class if len(args.class_names) > 0: if os.path.isfile(args.class_names): # try to open it to read class names with open(args.class_names, 'r') as f: class_names = [l.strip() for l in f.readlines()] else: class_names = [c.strip() for c in args.class_names.split(',')] assert len(class_names) == num_class, str(len(class_names)) for name in class_names: assert len(name) > 0 else: class_names = None return class_names
parse # classes and class_names if applicable
def get_ips(v6=False): """Returns all available IPs matching to interfaces, using the windows system. Should only be used as a WinPcapy fallback.""" res = {} for iface in six.itervalues(IFACES): ips = [] for ip in iface.ips: if v6 and ":" in ip: ips.append(ip) elif not v6 and ":" not in ip: ips.append(ip) res[iface] = ips return res
Returns all available IPs matching to interfaces, using the windows system. Should only be used as a WinPcapy fallback.
def traverse_commits(self) -> Generator[Commit, None, None]: """ Analyze all the specified commits (all of them by default), returning a generator of commits. """ if isinstance(self._path_to_repo, str): self._path_to_repo = [self._path_to_repo] for path_repo in self._path_to_repo: # if it is a remote repo, clone it first in a temporary folder! if self._isremote(path_repo): tmp_folder = tempfile.TemporaryDirectory() path_repo = self._clone_remote_repos(tmp_folder.name, path_repo) git_repo = GitRepository(path_repo) self._sanity_check_filters(git_repo) self._check_timezones() logger.info('Analyzing git repository in %s', git_repo.path) if self._filepath is not None: self._filepath_commits = git_repo.get_commits_modified_file( self._filepath) for commit in git_repo.get_list_commits(self._only_in_branch, not self._reversed_order): logger.info('Commit #%s in %s from %s', commit.hash, commit.committer_date, commit.author.name) if self._is_commit_filtered(commit): logger.info('Commit #%s filtered', commit.hash) continue yield commit
Analyze all the specified commits (all of them by default), returning a generator of commits.
def process_post_tag(self, bulk_mode, api_tag): """ Create or update a Tag related to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_tag: the API data for the Tag :return: the Tag object """ tag = None # try to get from the ref data map if in bulk mode if bulk_mode: tag = self.ref_data_map["tags"].get(api_tag["ID"]) # double check the db before giving up, we may have sync'd it in a previous run if not tag: tag, created = Tag.objects.get_or_create(site_id=self.site_id, wp_id=api_tag["ID"], defaults=self.api_object_data("tag", api_tag)) if tag and not created: self.update_existing_tag(tag, api_tag) # add to ref data map so later lookups work if tag: self.ref_data_map["tags"][api_tag["ID"]] = tag return tag
Create or update a Tag related to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_tag: the API data for the Tag :return: the Tag object
def plot(x, y, z, ax=None, **kwargs): r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar """ if ax is None: ax = matplotlib.pyplot.gca() # Get inputs colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r) smooth = kwargs.pop('smooth', False) linewidths = kwargs.pop('linewidths', 0.3) contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3]) fineness = kwargs.pop('fineness', 0.5) default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1, fineness) contour_color_levels = kwargs.pop('contour_color_levels', default_color_levels) rasterize_contours = kwargs.pop('rasterize_contours', False) lines = kwargs.pop('lines', True) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) # Convert to sigmas z = numpy.sqrt(2) * scipy.special.erfinv(1 - z) # Gaussian filter if desired the sigmas by a factor of smooth% if smooth: sigma = smooth*numpy.array(z.shape)/100.0 z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0) # Plot the filled contours onto the axis ax cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels) # Rasterize contours (the rest of the figure stays in vector format) if rasterize_contours: for c in cbar.collections: c.set_rasterized(True) # Remove those annoying white lines for c in cbar.collections: c.set_edgecolor("face") # Plot some sigma-based contour lines if lines: ax.contour(x, y, z, colors='k', linewidths=linewidths, levels=contour_line_levels) # Return the contours for use as a colourbar later return cbar
r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar
def working_directory(self): """ Get the current working directory. :rtype: str :return: current working directory """ _complain_ifclosed(self.closed) wd = self.fs.get_working_directory() return wd
Get the current working directory. :rtype: str :return: current working directory
def findOrLoadRenderModel(self, pchRenderModelName): "Purpose: Finds a render model we've already loaded or loads a new one" pRenderModel = None for model in self.m_vecRenderModels: if model.getName() == pchRenderModelName: pRenderModel = model break # load the model if we didn't find one if pRenderModel is None: error = openvr.EVRRenderModelError() while True: error, pModel = openvr.VRRenderModels().loadRenderModel_Async( pchRenderModelName ) if error != openvr.VRRenderModelError_Loading: break threadSleep( 1 ) if error != openvr.VRRenderModelError_None: dprintf( "Unable to load render model %s - %s\n" % ( pchRenderModelName, openvr.VRRenderModels().getRenderModelErrorNameFromEnum( error )) ) return None # move on to the next tracked device while True: error, pTexture = openvr.VRRenderModels().loadTexture_Async( pModel.contents.diffuseTextureId ) if error != openvr.VRRenderModelError_Loading: break threadSleep( 1 ) if error != openvr.VRRenderModelError_None: dprintf( "Unable to load render texture id:%d for render model %s\n" % ( pModel.contents.diffuseTextureId, pchRenderModelName) ) openvr.VRRenderModels().FreeRenderModel( pModel ) return None # move on to the next tracked device pRenderModel = CGLRenderModel( pchRenderModelName ) if not pRenderModel.bInit( pModel.contents, pTexture.contents ): dprintf( "Unable to create GL model from render model %s\n" % pchRenderModelName ) # delete pRenderModel pRenderModel = None else: self.m_vecRenderModels.append( pRenderModel ) openvr.VRRenderModels().freeRenderModel( pModel ) openvr.VRRenderModels().freeTexture( pTexture ) return pRenderModel
Purpose: Finds a render model we've already loaded or loads a new one
def set_font_size(self, size): """Convenience method for just changing font size.""" if self.font.font_size == size: pass else: self.font._set_size(size)
Convenience method for just changing font size.
def login_checking_email(pending_id, ticket, response, detail_url='https://pswdless.appspot.com/rest/detail'): """ Log user in using Passwordless service :param pending_id: PendingExternalToMainUser's id :param ticket: ticket returned from Passwordless :param response: Response object from webapp2 :param detail_url: url to check ticket and user data :return: a Command that log user in when executed """ return LoginCheckingEmail(pending_id, ticket, response, USER_COOKIE_NAME, detail_url)
Log user in using Passwordless service :param pending_id: PendingExternalToMainUser's id :param ticket: ticket returned from Passwordless :param response: Response object from webapp2 :param detail_url: url to check ticket and user data :return: a Command that log user in when executed
def _delete_entity(self): """Delete entity from datastore. Attempts to delete using the key_name stored on the object, whether or not the given key is in the datastore. """ if self._is_ndb(): _NDB_KEY(self._model, self._key_name).delete() else: entity_key = db.Key.from_path(self._model.kind(), self._key_name) db.delete(entity_key)
Delete entity from datastore. Attempts to delete using the key_name stored on the object, whether or not the given key is in the datastore.
def _copy_each_include_files_to_include_dir(self): """Copy include header files for each directory to include directory. Copy include header files from rpm/ rpmio/*.h lib/*.h build/*.h sign/*.h to rpm/ include/ rpm/*.h . This is a status after running "make" on actual rpm build process. """ src_header_dirs = [ 'rpmio', 'lib', 'build', 'sign', ] with Cmd.pushd('..'): src_include_dir = os.path.abspath('./include') for header_dir in src_header_dirs: if not os.path.isdir(header_dir): message_format = "Skip not existing header directory '{0}'" Log.debug(message_format.format(header_dir)) continue header_files = Cmd.find(header_dir, '*.h') for header_file in header_files: pattern = '^{0}/'.format(header_dir) (dst_header_file, subs_num) = re.subn(pattern, '', header_file) if subs_num == 0: message = 'Failed to replace header_file: {0}'.format( header_file) raise ValueError(message) dst_header_file = os.path.abspath( os.path.join(src_include_dir, 'rpm', dst_header_file) ) dst_dir = os.path.dirname(dst_header_file) if not os.path.isdir(dst_dir): Cmd.mkdir_p(dst_dir) shutil.copyfile(header_file, dst_header_file)
Copy include header files for each directory to include directory. Copy include header files from rpm/ rpmio/*.h lib/*.h build/*.h sign/*.h to rpm/ include/ rpm/*.h . This is a status after running "make" on actual rpm build process.
def event_gen( self, timeout_s=None, yield_nones=True, filter_predicate=None, terminal_events=_DEFAULT_TERMINAL_EVENTS): """Yield one event after another. If `timeout_s` is provided, we'll break when no event is received for that many seconds. """ # We will either return due to the optional filter or because of a # timeout. The former will always set this. The latter will never set # this. self.__last_success_return = None last_hit_s = time.time() while True: block_duration_s = self.__get_block_duration() # Poll, but manage signal-related errors. try: events = self.__epoll.poll(block_duration_s) except IOError as e: if e.errno != EINTR: raise if timeout_s is not None: time_since_event_s = time.time() - last_hit_s if time_since_event_s > timeout_s: break continue # Process events. for fd, event_type in events: # (fd) looks to always match the inotify FD. names = self._get_event_names(event_type) _LOGGER.debug("Events received from epoll: {}".format(names)) for (header, type_names, path, filename) \ in self._handle_inotify_event(fd): last_hit_s = time.time() e = (header, type_names, path, filename) for type_name in type_names: if filter_predicate is not None and \ filter_predicate(type_name, e) is False: self.__last_success_return = (type_name, e) return elif type_name in terminal_events: raise TerminalEventException(type_name, e) yield e if timeout_s is not None: time_since_event_s = time.time() - last_hit_s if time_since_event_s > timeout_s: break if yield_nones is True: yield None
Yield one event after another. If `timeout_s` is provided, we'll break when no event is received for that many seconds.
def function(self,p): """Selects and returns one of the patterns in the list.""" pg = p.generator motion_orientation=p.orientation+pi/2.0 new_x = p.x+p.size*pg.x new_y = p.y+p.size*pg.y image_array = pg(xdensity=p.xdensity,ydensity=p.ydensity,bounds=p.bounds, x=new_x + p.speed*p.step*np.cos(motion_orientation), y=new_y + p.speed*p.step*np.sin(motion_orientation), orientation=p.orientation, scale=pg.scale*p.scale,offset=pg.offset+p.offset) return image_array
Selects and returns one of the patterns in the list.
def add_event(self, event): """Adds events to the queue. Will ignore events that occur before the settle time for that pin/direction. Such events are assumed to be bouncing. """ # print("Trying to add event:") # print(event) # find out the pin settle time for pin_function_map in self.pin_function_maps: if _event_matches_pin_function_map(event, pin_function_map): # if pin_function_map.pin_num == event.pin_num and ( # pin_function_map.direction == event.direction or # pin_function_map.direction == IODIR_BOTH): pin_settle_time = pin_function_map.settle_time # print("EventQueue: Found event in map.") break else: # Couldn't find event in map, don't bother adding it to the queue # print("EventQueue: Couldn't find event in map:") # for pin_function_map in self.pin_function_maps: # print(pin_function_map) return threshold_time = self.last_event_time[event.pin_num] + pin_settle_time if event.timestamp > threshold_time: self.put(event) self.last_event_time[event.pin_num] = event.timestamp
Adds events to the queue. Will ignore events that occur before the settle time for that pin/direction. Such events are assumed to be bouncing.
def _run(self): """ Runs the interval loop. """ def get_next_interval(): start_time = time.time() start = 0 if self.eager else 1 for count in itertools.count(start=start): yield max(start_time + count * self.interval - time.time(), 0) interval = get_next_interval() sleep_time = next(interval) while True: # sleep for `sleep_time`, unless `should_stop` fires, in which # case we leave the while loop and stop entirely with Timeout(sleep_time, exception=False): self.should_stop.wait() break self.handle_timer_tick() self.worker_complete.wait() self.worker_complete.reset() sleep_time = next(interval)
Runs the interval loop.
def get_bidi_paired_bracket_type_property(value, is_bytes=False): """Get `BPT` property.""" obj = unidata.ascii_bidi_paired_bracket_type if is_bytes else unidata.unicode_bidi_paired_bracket_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['bidipairedbrackettype'].get(negated, negated) else: value = unidata.unicode_alias['bidipairedbrackettype'].get(value, value) return obj[value]
Get `BPT` property.
def to_project_config(self, with_packages=False): """Return a dict representation of the config that could be written to disk with `yaml.safe_dump` to get this configuration. :param with_packages bool: If True, include the serialized packages file in the root. :returns dict: The serialized profile. """ result = deepcopy({ 'name': self.project_name, 'version': self.version, 'project-root': self.project_root, 'profile': self.profile_name, 'source-paths': self.source_paths, 'macro-paths': self.macro_paths, 'data-paths': self.data_paths, 'test-paths': self.test_paths, 'analysis-paths': self.analysis_paths, 'docs-paths': self.docs_paths, 'target-path': self.target_path, 'archive-paths': self.archive_paths, 'clean-targets': self.clean_targets, 'log-path': self.log_path, 'quoting': self.quoting, 'models': self.models, 'on-run-start': self.on_run_start, 'on-run-end': self.on_run_end, 'archive': self.archive, 'seeds': self.seeds, 'require-dbt-version': [ v.to_version_string() for v in self.dbt_version ], }) if with_packages: result.update(self.packages.serialize()) return result
Return a dict representation of the config that could be written to disk with `yaml.safe_dump` to get this configuration. :param with_packages bool: If True, include the serialized packages file in the root. :returns dict: The serialized profile.
def _collect_paths(element): """ Collect all possible path which leads to `element`. Function returns standard path from root element to this, reverse path, which uses negative indexes for path, also some pattern matches, like "this is element, which has neighbour with id 7" and so on. Args: element (obj): HTMLElement instance. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects. """ output = [] # look for element by parameters - sometimes the ID is unique path = vectors.el_to_path_vector(element) root = path[0] params = element.params if element.params else None match = root.find(element.getTagName(), params) if len(match) == 1: output.append( PathCall("find", 0, [element.getTagName(), params]) ) # look for element by neighbours output.extend(path_patterns.neighbours_pattern(element)) # look for elements by patterns - element, which parent has tagname, and # which parent has tagname .. output.extend(path_patterns.predecesors_pattern(element, root)) index_backtrack = [] last_index_backtrack = [] params_backtrack = [] last_params_backtrack = [] # look for element by paths from root to element for el in reversed(path): # skip root elements if not el.parent: continue tag_name = el.getTagName() match = el.parent.wfind(tag_name).childs index = match.index(el) index_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_index_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) # if element has some parameters, use them for lookup if el.params: match = el.parent.wfind(tag_name, el.params).childs index = match.index(el) params_backtrack.append( PathCall("wfind", index, [tag_name, el.params]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name, el.params]) ) else: params_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) output.extend([ Chained(reversed(params_backtrack)), Chained(reversed(last_params_backtrack)), Chained(reversed(index_backtrack)), Chained(reversed(last_index_backtrack)), ]) return output
Collect all possible path which leads to `element`. Function returns standard path from root element to this, reverse path, which uses negative indexes for path, also some pattern matches, like "this is element, which has neighbour with id 7" and so on. Args: element (obj): HTMLElement instance. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects.
def check_version(version: str): """ Checks given version against code version and determines compatibility. Throws if versions are incompatible. :param version: Given version. """ code_version = parse_version(__version__) given_version = parse_version(version) check_condition(code_version[0] == given_version[0], "Given release version (%s) does not match release code version (%s)" % (version, __version__)) check_condition(code_version[1] == given_version[1], "Given major version (%s) does not match major code version (%s)" % (version, __version__))
Checks given version against code version and determines compatibility. Throws if versions are incompatible. :param version: Given version.
def find_in_line(line): # type: (str) -> Optional[str] """ Find a version in a line. :param line: :return: """ if not line: return None for method in [find_by_ast, find_version_by_string_lib, find_version_by_regex]: by = method(line) by = validate_string(by) if by: return by return None
Find a version in a line. :param line: :return:
def add_group(data_api, data_setters, group_index): """Add the data for a whole group. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param group_index the index for this group""" group_type_ind = data_api.group_type_list[group_index] atom_count = len(data_api.group_list[group_type_ind]["atomNameList"]) insertion_code = data_api.ins_code_list[group_index] data_setters.set_group_info(data_api.group_list[group_type_ind]["groupName"], data_api.group_id_list[group_index], insertion_code, data_api.group_list[group_type_ind]["chemCompType"], atom_count, data_api.num_bonds, data_api.group_list[group_type_ind]["singleLetterCode"], data_api.sequence_index_list[group_index], data_api.sec_struct_list[group_index]) for group_atom_ind in range(atom_count): add_atom_data(data_api, data_setters, data_api.group_list[group_type_ind]["atomNameList"], data_api.group_list[group_type_ind]["elementList"], data_api.group_list[group_type_ind]["formalChargeList"], group_atom_ind) data_api.atom_counter +=1 add_group_bonds(data_setters, data_api.group_list[group_type_ind]["bondAtomList"], data_api.group_list[group_type_ind]["bondOrderList"]) return atom_count
Add the data for a whole group. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param group_index the index for this group
def require_metadata(): "Prevent improper installs without necessary metadata. See #659" egg_info_dir = os.path.join(here, 'setuptools.egg-info') if not os.path.exists(egg_info_dir): msg = ( "Cannot build setuptools without metadata. " "Run `bootstrap.py`." ) raise RuntimeError(msg)
Prevent improper installs without necessary metadata. See #659
def revert(self): """Revert file from disk""" index = self.get_stack_index() finfo = self.data[index] filename = finfo.filename if finfo.editor.document().isModified(): self.msgbox = QMessageBox( QMessageBox.Warning, self.title, _("All changes to <b>%s</b> will be lost." "<br>Do you want to revert file from disk?" ) % osp.basename(filename), QMessageBox.Yes | QMessageBox.No, self) answer = self.msgbox.exec_() if answer != QMessageBox.Yes: return self.reload(index)
Revert file from disk
def magic_mprun(self, parameter_s=''): """ Execute a statement under the line-by-line memory profiler from the memory_profiler module. Usage: %mprun -f func1 -f func2 <statement> The given statement (which doesn't require quote marks) is run via the LineProfiler. Profiling is enabled for the functions specified by the -f options. The statistics will be shown side-by-side with the code through the pager once the statement has completed. Options: -f <function>: LineProfiler only profiles functions and methods it is told to profile. This option tells the profiler about these functions. Multiple -f options may be used. The argument may be any expression that gives a Python function or method object. However, one must be careful to avoid spaces that may confuse the option parser. Additionally, functions defined in the interpreter at the In[] prompt or via %run currently cannot be displayed. Write these functions out to a separate file and import them. One or more -f options are required to get any useful results. -T <filename>: dump the text-formatted statistics with the code side-by-side out to a text file. -r: return the LineProfiler object after it has completed profiling. -c: If present, add the memory usage of any children process to the report. """ try: from StringIO import StringIO except ImportError: # Python 3.x from io import StringIO # Local imports to avoid hard dependency. from distutils.version import LooseVersion import IPython ipython_version = LooseVersion(IPython.__version__) if ipython_version < '0.11': from IPython.genutils import page from IPython.ipstruct import Struct from IPython.ipapi import UsageError else: from IPython.core.page import page from IPython.utils.ipstruct import Struct from IPython.core.error import UsageError # Escape quote markers. opts_def = Struct(T=[''], f=[]) parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'") opts, arg_str = self.parse_options(parameter_s, 'rf:T:c', list_all=True) opts.merge(opts_def) global_ns = self.shell.user_global_ns local_ns = self.shell.user_ns # Get the requested functions. funcs = [] for name in opts.f: try: funcs.append(eval(name, global_ns, local_ns)) except Exception as e: raise UsageError('Could not find function %r.\n%s: %s' % (name, e.__class__.__name__, e)) include_children = 'c' in opts profile = LineProfiler(include_children=include_children) for func in funcs: profile(func) # Add the profiler to the builtins for @profile. try: import builtins except ImportError: # Python 3x import __builtin__ as builtins if 'profile' in builtins.__dict__: had_profile = True old_profile = builtins.__dict__['profile'] else: had_profile = False old_profile = None builtins.__dict__['profile'] = profile try: try: profile.runctx(arg_str, global_ns, local_ns) message = '' except SystemExit: message = "*** SystemExit exception caught in code being profiled." except KeyboardInterrupt: message = ("*** KeyboardInterrupt exception caught in code being " "profiled.") finally: if had_profile: builtins.__dict__['profile'] = old_profile # Trap text output. stdout_trap = StringIO() show_results(profile, stdout_trap) output = stdout_trap.getvalue() output = output.rstrip() if ipython_version < '0.11': page(output, screen_lines=self.shell.rc.screen_length) else: page(output) print(message,) text_file = opts.T[0] if text_file: with open(text_file, 'w') as pfile: pfile.write(output) print('\n*** Profile printout saved to text file %s. %s' % (text_file, message)) return_value = None if 'r' in opts: return_value = profile return return_value
Execute a statement under the line-by-line memory profiler from the memory_profiler module. Usage: %mprun -f func1 -f func2 <statement> The given statement (which doesn't require quote marks) is run via the LineProfiler. Profiling is enabled for the functions specified by the -f options. The statistics will be shown side-by-side with the code through the pager once the statement has completed. Options: -f <function>: LineProfiler only profiles functions and methods it is told to profile. This option tells the profiler about these functions. Multiple -f options may be used. The argument may be any expression that gives a Python function or method object. However, one must be careful to avoid spaces that may confuse the option parser. Additionally, functions defined in the interpreter at the In[] prompt or via %run currently cannot be displayed. Write these functions out to a separate file and import them. One or more -f options are required to get any useful results. -T <filename>: dump the text-formatted statistics with the code side-by-side out to a text file. -r: return the LineProfiler object after it has completed profiling. -c: If present, add the memory usage of any children process to the report.
def try_read(self, address, size): """Try to read memory content at specified address. If any location was not written before, it returns a tuple (False, None). Otherwise, it returns (True, memory content). """ value = 0x0 for i in range(0, size): addr = address + i if addr in self._memory: value |= self._read_byte(addr) << (i * 8) else: return False, None return True, value
Try to read memory content at specified address. If any location was not written before, it returns a tuple (False, None). Otherwise, it returns (True, memory content).
def release(self, *args, **kwargs): """ Really release the lock only if it's not a sub-lock. Then save the sub-lock status and mark the model as unlocked. """ if not self.field.lockable: return if self.sub_lock_mode: return super(FieldLock, self).release(*args, **kwargs) self.already_locked_by_model = self.sub_lock_mode = False
Really release the lock only if it's not a sub-lock. Then save the sub-lock status and mark the model as unlocked.
def create(self, company, timezone, country): """Creates a client.""" body = { "CompanyName": company, "TimeZone": timezone, "Country": country} response = self._post("/clients.json", json.dumps(body)) self.client_id = json_to_py(response) return self.client_id
Creates a client.
def generate_private_key(self): """ Generates a private key based on the password. SHA-256 is a member of the SHA-2 cryptographic hash functions designed by the NSA. SHA stands for Secure Hash Algorithm. The password is converted to bytes and hashed with SHA-256. The binary output is converted to a hex representation. Args: data (str): The data to be hashed with SHA-256. Returns: bytes: The hexadecimal representation of the hashed binary data. """ random_string = base64.b64encode(os.urandom(4096)).decode('utf-8') binary_data = bytes(random_string, 'utf-8') hash_object = hashlib.sha256(binary_data) message_digest_bin = hash_object.digest() message_digest_hex = binascii.hexlify(message_digest_bin) return message_digest_hex
Generates a private key based on the password. SHA-256 is a member of the SHA-2 cryptographic hash functions designed by the NSA. SHA stands for Secure Hash Algorithm. The password is converted to bytes and hashed with SHA-256. The binary output is converted to a hex representation. Args: data (str): The data to be hashed with SHA-256. Returns: bytes: The hexadecimal representation of the hashed binary data.
def remove_segments(self, segments_to_remove): ''' Remove the faces and vertices for given segments, keeping all others. Args: segments_to_remove: a list of segnments whose vertices will be removed ''' v_ind = self.vertex_indices_in_segments(segments_to_remove) self.segm = {name: faces for name, faces in self.segm.iteritems() if name not in segments_to_remove} self.remove_vertices(v_ind)
Remove the faces and vertices for given segments, keeping all others. Args: segments_to_remove: a list of segnments whose vertices will be removed
def logical_cores(self): """Return the number of cpu cores as reported to the os. May be different from physical_cores if, ie, intel's hyperthreading is enabled. """ try: return self._logical_cores() except Exception as e: from rez.utils.logging_ import print_error print_error("Error detecting logical core count, defaulting to 1: %s" % str(e)) return 1
Return the number of cpu cores as reported to the os. May be different from physical_cores if, ie, intel's hyperthreading is enabled.
def _get_rename_function(mapper): """ Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function. """ if isinstance(mapper, (abc.Mapping, ABCSeries)): def f(x): if x in mapper: return mapper[x] else: return x else: f = mapper return f
Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function.
def get_block_info(self, block): """ Args: block: block number (eg: 223212) block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa) word "last" - this will always return the latest block word "first" - this will always return the first block Returns: basic block data """ url = '{}/block/info/{}'.format(self._url, block) return self.make_request(url)
Args: block: block number (eg: 223212) block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa) word "last" - this will always return the latest block word "first" - this will always return the first block Returns: basic block data
def get_col_sep(self): """Return the column separator""" if self.tab_btn.isChecked(): return u"\t" elif self.ws_btn.isChecked(): return None return to_text_string(self.line_edt.text())
Return the column separator
def set_value(self, value): """Set value of the checkbox. Parameters ---------- value : bool value for the checkbox """ if value: self.setCheckState(Qt.Checked) else: self.setCheckState(Qt.Unchecked)
Set value of the checkbox. Parameters ---------- value : bool value for the checkbox
def parse_DID(did, name_type=None): """ Given a DID string, parse it into {'address': ..., 'index': ..., 'name_type'} Raise on invalid DID """ did_pattern = '^did:stack:v0:({}{{25,35}})-([0-9]+)$'.format(OP_BASE58CHECK_CLASS) m = re.match(did_pattern, did) assert m, 'Invalid DID: {}'.format(did) original_address = str(m.groups()[0]) name_index = int(m.groups()[1]) vb = keylib.b58check.b58check_version_byte(original_address) name_type = None if vb in [SUBDOMAIN_ADDRESS_VERSION_BYTE, SUBDOMAIN_ADDRESS_MULTISIG_VERSION_BYTE]: name_type = 'subdomain' # decode version if vb == SUBDOMAIN_ADDRESS_VERSION_BYTE: vb = bitcoin_blockchain.version_byte else: vb = bitcoin_blockchain.multisig_version_byte original_address = virtualchain.address_reencode(original_address, version_byte=vb) else: name_type = 'name' original_address = virtualchain.address_reencode(original_address) return {'address': original_address, 'index': name_index, 'name_type': name_type}
Given a DID string, parse it into {'address': ..., 'index': ..., 'name_type'} Raise on invalid DID
def _handle_template(self, token): """Handle a case where a template is at the head of the tokens.""" params = [] default = 1 self._push() while self._tokens: token = self._tokens.pop() if isinstance(token, tokens.TemplateParamSeparator): if not params: name = self._pop() param = self._handle_parameter(default) params.append(param) if not param.showkey: default += 1 elif isinstance(token, tokens.TemplateClose): if not params: name = self._pop() return Template(name, params) else: self._write(self._handle_token(token)) raise ParserError("_handle_template() missed a close token")
Handle a case where a template is at the head of the tokens.
def setNetworkName(self, networkName='GRL'): """set Thread Network name Args: networkName: the networkname string to be set Returns: True: successful to set the Thread Networkname False: fail to set the Thread Networkname """ print '%s call setNetworkName' % self.port print networkName try: cmd = 'networkname %s' % networkName datasetCmd = 'dataset networkname %s' % networkName self.hasActiveDatasetToCommit = True return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("setNetworkName() Error: " + str(e))
set Thread Network name Args: networkName: the networkname string to be set Returns: True: successful to set the Thread Networkname False: fail to set the Thread Networkname
def setAttributeNS(self, namespaceURI, localName, value): ''' Keyword arguments: namespaceURI -- namespace of attribute to create, None is for attributes in no namespace. localName -- local name of new attribute value -- value of new attribute ''' prefix = None if namespaceURI: try: prefix = self.getPrefix(namespaceURI) except KeyError, ex: prefix = 'ns2' self.setNamespaceAttribute(prefix, namespaceURI) qualifiedName = localName if prefix: qualifiedName = '%s:%s' %(prefix, localName) self._setAttributeNS(namespaceURI, qualifiedName, value)
Keyword arguments: namespaceURI -- namespace of attribute to create, None is for attributes in no namespace. localName -- local name of new attribute value -- value of new attribute
def get_ecommerce_client(url_postfix='', site_code=None): """ Get client for fetching data from ecommerce API. Arguments: site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value. Returns: EdxRestApiClient object """ ecommerce_api_root = get_configuration('ECOMMERCE_API_ROOT', site_code=site_code) signing_key = get_configuration('JWT_SECRET_KEY', site_code=site_code) issuer = get_configuration('JWT_ISSUER', site_code=site_code) service_username = get_configuration('ECOMMERCE_SERVICE_USERNAME', site_code=site_code) return EdxRestApiClient( ecommerce_api_root + url_postfix, signing_key=signing_key, issuer=issuer, username=service_username)
Get client for fetching data from ecommerce API. Arguments: site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value. Returns: EdxRestApiClient object
def namespace_splitter(self, value): """ Setter for **self.__namespace_splitter** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "namespace_splitter", value) assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("namespace_splitter", value) assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format( "namespace_splitter", value) self.__namespace_splitter = value
Setter for **self.__namespace_splitter** attribute. :param value: Attribute value. :type value: unicode
def capture(returns, factor_returns, period=DAILY): """ Compute capture ratio. Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns : pd.Series or np.ndarray Noncumulative returns of the factor to which beta is computed. Usually a benchmark such as the market. - This is in the same style as returns. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 Returns ------- capture_ratio : float Note ---- See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for details. """ return (annual_return(returns, period=period) / annual_return(factor_returns, period=period))
Compute capture ratio. Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns : pd.Series or np.ndarray Noncumulative returns of the factor to which beta is computed. Usually a benchmark such as the market. - This is in the same style as returns. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 Returns ------- capture_ratio : float Note ---- See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for details.
def _add_thread(self, aThread): """ Private method to add a thread object to the snapshot. @type aThread: L{Thread} @param aThread: Thread object. """ ## if not isinstance(aThread, Thread): ## if hasattr(aThread, '__class__'): ## typename = aThread.__class__.__name__ ## else: ## typename = str(type(aThread)) ## msg = "Expected Thread, got %s instead" % typename ## raise TypeError(msg) dwThreadId = aThread.dwThreadId ## if dwThreadId in self.__threadDict: ## msg = "Already have a Thread object with ID %d" % dwThreadId ## raise KeyError(msg) aThread.set_process(self) self.__threadDict[dwThreadId] = aThread
Private method to add a thread object to the snapshot. @type aThread: L{Thread} @param aThread: Thread object.
def backprop(self, input_data, targets, cache=None): """ Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ if cache is not None: activations = cache else: activations = self.feed_forward(input_data, prediction=False) if activations.shape != targets.shape: raise ValueError('Activations (shape = %s) and targets (shape = %s) are different sizes' % (activations.shape, targets.shape)) delta = substract_matrix(activations, targets) nan_to_zeros(delta, delta) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt input df_input = linalg.dot(delta, self.W, transb='T') # L1 penalty if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 penalty if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input.
def set_edge_label(self, edge, label): """ Set the label of an edge. @type edge: edge @param edge: One edge. @type label: string @param label: Edge label. """ self.set_edge_properties(edge, label=label ) if not self.DIRECTED: self.set_edge_properties((edge[1], edge[0]) , label=label )
Set the label of an edge. @type edge: edge @param edge: One edge. @type label: string @param label: Edge label.
def shell(self): """ Opens a Django focussed Python shell. Essentially the equivalent of running `manage.py shell`. """ r = self.local_renderer if '@' in self.genv.host_string: r.env.shell_host_string = self.genv.host_string else: r.env.shell_host_string = '{user}@{host_string}' r.env.shell_default_dir = self.genv.shell_default_dir_template r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template r.run_or_local('ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"')
Opens a Django focussed Python shell. Essentially the equivalent of running `manage.py shell`.
def extract_tar(url, target_dir, additional_compression="", remove_common_prefix=False, overwrite=False): """ extract a targz and install to the target directory """ try: if not os.path.exists(target_dir): os.makedirs(target_dir) tf = tarfile.TarFile.open(fileobj=download_to_bytesio(url)) if not os.path.exists(target_dir): os.makedirs(target_dir) common_prefix = os.path.commonprefix(tf.getnames()) if not common_prefix.endswith('/'): common_prefix += "/" for tfile in tf.getmembers(): if remove_common_prefix: tfile.name = tfile.name.replace(common_prefix, "", 1) if tfile.name != "": target_path = os.path.join(target_dir, tfile.name) if target_path != target_dir and os.path.exists(target_path): if overwrite: remove_path(target_path) else: continue tf.extract(tfile, target_dir) except OSError: e = sys.exc_info()[1] raise ExtractException(str(e)) except IOError: e = sys.exc_info()[1] raise ExtractException(str(e))
extract a targz and install to the target directory
def find_bind_module(name, verbose=False): """Find the bind module matching the given name. Args: name (str): Name of package to find bind module for. verbose (bool): If True, print extra output. Returns: str: Filepath to bind module .py file, or None if not found. """ bindnames = get_bind_modules(verbose=verbose) bindfile = bindnames.get(name) if bindfile: return bindfile if not verbose: return None # suggest close matches fuzzy_matches = get_close_pkgs(name, bindnames.keys()) if fuzzy_matches: rows = [(x[0], bindnames[x[0]]) for x in fuzzy_matches] print "'%s' not found. Close matches:" % name print '\n'.join(columnise(rows)) else: print "No matches." return None
Find the bind module matching the given name. Args: name (str): Name of package to find bind module for. verbose (bool): If True, print extra output. Returns: str: Filepath to bind module .py file, or None if not found.
def train(cls, new_data, old=None): """ Train a continuous scale Parameters ---------- new_data : array_like New values old : array_like Old range. Most likely a tuple of length 2. Returns ------- out : tuple Limits(range) of the scale """ if not len(new_data): return old if not hasattr(new_data, 'dtype'): new_data = np.asarray(new_data) if new_data.dtype.kind not in CONTINUOUS_KINDS: raise TypeError( "Discrete value supplied to continuous scale") if old is not None: new_data = np.hstack([new_data, old]) return min_max(new_data, na_rm=True, finite=True)
Train a continuous scale Parameters ---------- new_data : array_like New values old : array_like Old range. Most likely a tuple of length 2. Returns ------- out : tuple Limits(range) of the scale
def setPotential(self, columnIndex, potential): """ Sets the potential mapping for a given column. ``potential`` size must match the number of inputs, and must be greater than ``stimulusThreshold``. :param columnIndex: (int) column index to set potential for. :param potential: (list) value to set. """ assert(columnIndex < self._numColumns) potentialSparse = numpy.where(potential > 0)[0] if len(potentialSparse) < self._stimulusThreshold: raise Exception("This is likely due to a " + "value of stimulusThreshold that is too large relative " + "to the input size.") self._potentialPools.replace(columnIndex, potentialSparse)
Sets the potential mapping for a given column. ``potential`` size must match the number of inputs, and must be greater than ``stimulusThreshold``. :param columnIndex: (int) column index to set potential for. :param potential: (list) value to set.
def sum(cls, iresults): """ Sum the data transfer information of a set of results """ res = object.__new__(cls) res.received = [] res.sent = 0 for iresult in iresults: res.received.extend(iresult.received) res.sent += iresult.sent name = iresult.name.split('#', 1)[0] if hasattr(res, 'name'): assert res.name.split('#', 1)[0] == name, (res.name, name) else: res.name = iresult.name.split('#')[0] return res
Sum the data transfer information of a set of results
def clean_all(self): """ *clean and sync all the bookkeeping tables* **Return:** - ``bookkeeper`` **Usage:** .. code-block:: python from rockAtlas.bookkeeping import bookkeeper bk = bookkeeper( log=log, settings=settings, fullUpdate=False ) bk.clean_all() """ self.log.info('starting the ``get`` method') if self.fullUpdate: recent = False else: recent = True self.import_new_atlas_pointings(recent) self._run_bookkeeping_sql_scripts() self.log.info('completed the ``get`` method') return bookkeeper
*clean and sync all the bookkeeping tables* **Return:** - ``bookkeeper`` **Usage:** .. code-block:: python from rockAtlas.bookkeeping import bookkeeper bk = bookkeeper( log=log, settings=settings, fullUpdate=False ) bk.clean_all()
def _debug_off(): """ turns off debugging by removing hidden tmp file """ if _os.path.exists(__debugflag__): _os.remove(__debugflag__) __loglevel__ = "ERROR" _LOGGER.info("debugging turned off") _set_debug_dict(__loglevel__)
turns off debugging by removing hidden tmp file
def federation_payment(self, fed_address, amount, asset_code='XLM', asset_issuer=None, source=None, allow_http=False): """Append a :class:`Payment <stellar_base.operation.Payment>` operation to the list of operations using federation on the destination address. Translates the destination stellar address to an account ID via :func:`federation <stellar_base.federation.federation>`, before creating a new payment operation via :meth:`append_payment_op`. :param str fed_address: A Stellar Address that needs to be translated into a valid account ID via federation. :param str amount: The amount of the currency to send in the payment. :param str asset_code: The asset code for the asset to send. :param str asset_issuer: The address of the issuer of the asset. :param str source: The source address of the payment. :param bool allow_http: When set to `True`, connections to insecure http protocol federation servers will be allowed. Must be set to `False` in production. Default: `False`. :return: This builder instance. """ fed_info = federation( address_or_id=fed_address, fed_type='name', allow_http=allow_http) if not fed_info or not fed_info.get('account_id'): raise FederationError( 'Cannot determine Stellar Address to Account ID translation ' 'via Federation server.') self.append_payment_op(fed_info['account_id'], amount, asset_code, asset_issuer, source) memo_type = fed_info.get('memo_type') if memo_type is not None and memo_type in ('text', 'id', 'hash'): getattr(self, 'add_' + memo_type.lower() + '_memo')(fed_info['memo'])
Append a :class:`Payment <stellar_base.operation.Payment>` operation to the list of operations using federation on the destination address. Translates the destination stellar address to an account ID via :func:`federation <stellar_base.federation.federation>`, before creating a new payment operation via :meth:`append_payment_op`. :param str fed_address: A Stellar Address that needs to be translated into a valid account ID via federation. :param str amount: The amount of the currency to send in the payment. :param str asset_code: The asset code for the asset to send. :param str asset_issuer: The address of the issuer of the asset. :param str source: The source address of the payment. :param bool allow_http: When set to `True`, connections to insecure http protocol federation servers will be allowed. Must be set to `False` in production. Default: `False`. :return: This builder instance.
def getGraphFieldList(self, graph_name): """Returns list of names of fields for graph with name graph_name. @param graph_name: Graph Name @return: List of field names for graph. """ graph = self._getGraph(graph_name, True) return graph.getFieldList()
Returns list of names of fields for graph with name graph_name. @param graph_name: Graph Name @return: List of field names for graph.
def create_for_object_attributes(item_type, faulty_attribute_name: str, hint): """ Helper method for constructor attributes :param item_type: :return: """ # this leads to infinite loops # try: # prt_type = get_pretty_type_str(item_type) # except: # prt_type = str(item_type) return TypeInformationRequiredError("Cannot create instances of type {t}: constructor attribute '{a}' has an" " invalid PEP484 type hint: {h}.".format(t=str(item_type), a=faulty_attribute_name, h=hint))
Helper method for constructor attributes :param item_type: :return:
def extractPrintSaveIntermittens(): """ This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored. :return: None """ # extract intermittents from collected failed tests global g_summary_dict_intermittents localtz = time.tzname[0] for ind in range(len(g_summary_dict_all["TestName"])): if g_summary_dict_all["TestInfo"][ind]["FailureCount"] >= g_threshold_failure: addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, ind) # save dict in file if len(g_summary_dict_intermittents["TestName"]) > 0: json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, 'w')) with open(g_summary_csv_filename, 'w') as summaryFile: for ind in range(len(g_summary_dict_intermittents["TestName"])): testName = g_summary_dict_intermittents["TestName"][ind] numberFailure = g_summary_dict_intermittents["TestInfo"][ind]["FailureCount"] firstFailedTS = parser.parse(time.ctime(min(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) firstFailedStr = firstFailedTS.strftime("%a %b %d %H:%M:%S %Y %Z") recentFail = parser.parse(time.ctime(max(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) recentFailStr = recentFail.strftime("%a %b %d %H:%M:%S %Y %Z") eachTest = "{0}, {1}, {2}, {3}\n".format(testName, recentFailStr, numberFailure, g_summary_dict_intermittents["TestInfo"][ind]["TestCategory"][0]) summaryFile.write(eachTest) print("Intermittent: {0}, Last failed: {1}, Failed {2} times since " "{3}".format(testName, recentFailStr, numberFailure, firstFailedStr))
This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored. :return: None
def set_window_override_redirect(self, window, override_redirect): """ Set the override_redirect value for a window. This generally means whether or not a window manager will manage this window. If you set it to 1, the window manager will usually not draw borders on the window, etc. If you set it to 0, the window manager will see it like a normal application window. """ _libxdo.xdo_set_window_override_redirect( self._xdo, window, override_redirect)
Set the override_redirect value for a window. This generally means whether or not a window manager will manage this window. If you set it to 1, the window manager will usually not draw borders on the window, etc. If you set it to 0, the window manager will see it like a normal application window.
def setAutoRaise(self, state): """ Sets whether or not this combo box should automatically raise up. :param state | <bool> """ self._autoRaise = state self.setMouseTracking(state) try: self.lineEdit().setVisible(not state) except AttributeError: pass
Sets whether or not this combo box should automatically raise up. :param state | <bool>
def dfs(args=None, properties=None, hadoop_conf_dir=None): """ Run the Hadoop file system shell. All arguments are passed to :func:`run_class`. """ # run FsShell directly (avoids "hadoop dfs" deprecation) return run_class( "org.apache.hadoop.fs.FsShell", args, properties, hadoop_conf_dir=hadoop_conf_dir, keep_streams=True )
Run the Hadoop file system shell. All arguments are passed to :func:`run_class`.
def make_compare(key, value, obj): "Map a key name to a specific comparison function" if '__' not in key: # If no __ exists, default to doing an "exact" comparison key, comp = key, 'exact' else: key, comp = key.rsplit('__', 1) # Check if comp is valid if hasattr(Compare, comp): return getattr(Compare, comp)(key, value, obj) raise AttributeError("No comparison '%s'" % comp)
Map a key name to a specific comparison function
def generate(self, *args, **kwargs): """For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`. """ vars = dict(*args, **kwargs) try: for event in self.root_render_func(self.new_context(vars)): yield event except: exc_info = sys.exc_info() else: return yield self.environment.handle_exception(exc_info, True)
For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`.
def _create_id(self): """Returns a freshly created DB-wide unique ID.""" cursor = self._db.cursor() cursor.execute('INSERT INTO Ids DEFAULT VALUES') return cursor.lastrowid
Returns a freshly created DB-wide unique ID.
def xpathRegisterVariable(self, name, ns_uri, value): """Register a variable with the XPath context """ ret = libxml2mod.xmlXPathRegisterVariable(self._o, name, ns_uri, value) return ret
Register a variable with the XPath context
def format_number(number): """ >>> format_number(1) 1 >>> format_number(22) 22 >>> format_number(333) 333 >>> format_number(4444) '4,444' >>> format_number(55555) '55,555' >>> format_number(666666) '666,666' >>> format_number(7777777) '7,777,777' """ char_list = list(str(number)) length = len(char_list) if length <= 3: return number result = '' if length % 3 != 0: while len(char_list) % 3 != 0: c = char_list[0] result += c char_list.remove(c) result += ',' i = 0 while len(char_list) > 0: c = char_list[0] result += c char_list.remove(c) i += 1 if i % 3 == 0: result += ',' return result[0:-1] if result[-1] == ',' else result
>>> format_number(1) 1 >>> format_number(22) 22 >>> format_number(333) 333 >>> format_number(4444) '4,444' >>> format_number(55555) '55,555' >>> format_number(666666) '666,666' >>> format_number(7777777) '7,777,777'
def image_mime_type(data): """Return the MIME type of the image data (a bytestring). """ # This checks for a jpeg file with only the magic bytes (unrecognized by # imghdr.what). imghdr.what returns none for that type of file, so # _wider_test_jpeg is run in that case. It still returns None if it didn't # match such a jpeg file. kind = _imghdr_what_wrapper(data) if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']: return 'image/{0}'.format(kind) elif kind == 'pgm': return 'image/x-portable-graymap' elif kind == 'pbm': return 'image/x-portable-bitmap' elif kind == 'ppm': return 'image/x-portable-pixmap' elif kind == 'xbm': return 'image/x-xbitmap' else: return 'image/x-{0}'.format(kind)
Return the MIME type of the image data (a bytestring).
def _clean_intenum(obj): """Remove all IntEnum classes from a map.""" if isinstance(obj, dict): for key, value in obj.items(): if isinstance(value, IntEnum): obj[key] = value.value elif isinstance(value, (dict, list)): obj[key] = _clean_intenum(value) elif isinstance(obj, list): for i, value in enumerate(obj): if isinstance(value, IntEnum): obj[i] = value.value elif isinstance(value, (dict, list)): obj[i] = _clean_intenum(value) return obj
Remove all IntEnum classes from a map.
def delete_record_set(self, record_set): """Append a record set to the 'deletions' for the change set. :type record_set: :class:`google.cloud.dns.resource_record_set.ResourceRecordSet` :param record_set: the record set to append. :raises: ``ValueError`` if ``record_set`` is not of the required type. """ if not isinstance(record_set, ResourceRecordSet): raise ValueError("Pass a ResourceRecordSet") self._deletions += (record_set,)
Append a record set to the 'deletions' for the change set. :type record_set: :class:`google.cloud.dns.resource_record_set.ResourceRecordSet` :param record_set: the record set to append. :raises: ``ValueError`` if ``record_set`` is not of the required type.
def read(self): """ Read and return the contents of the file. """ with open(self.path) as f: d = f.read() return d
Read and return the contents of the file.
def get_ref(self): """ Returns a `DBRef` for this object or ``None``. """ _id = self.id if _id is None: return None else: return DBRef(self.collection, _id)
Returns a `DBRef` for this object or ``None``.
def set_object_metadata(self, container, obj, metadata, clear=False, extra_info=None, prefix=None): """ Accepts a dictionary of metadata key/value pairs and updates the specified object metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the object's metadata. 'extra_info; is an optional dictionary which will be populated with 'status', 'reason', and 'headers' keys from the underlying swiftclient call. By default, the standard object metadata prefix ('X-Object-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. """ return container.set_object_metadata(obj, metadata, clear=clear, prefix=prefix)
Accepts a dictionary of metadata key/value pairs and updates the specified object metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the object's metadata. 'extra_info; is an optional dictionary which will be populated with 'status', 'reason', and 'headers' keys from the underlying swiftclient call. By default, the standard object metadata prefix ('X-Object-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
def save_object(self, obj): """ Save object to disk as JSON. Generally shouldn't be called directly. """ obj.pre_save(self.jurisdiction.jurisdiction_id) filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-') self.info('save %s %s as %s', obj._type, obj, filename) self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())), cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': '))) self.output_names[obj._type].add(filename) with open(os.path.join(self.datadir, filename), 'w') as f: json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus) # validate after writing, allows for inspection on failure try: obj.validate() except ValueError as ve: if self.strict_validation: raise ve else: self.warning(ve) # after saving and validating, save subordinate objects for obj in obj._related: self.save_object(obj)
Save object to disk as JSON. Generally shouldn't be called directly.
def ocsp_urls(self): """ :return: A list of zero or more unicode strings of the OCSP URLs for this cert """ if not self.authority_information_access_value: return [] output = [] for entry in self.authority_information_access_value: if entry['access_method'].native == 'ocsp': location = entry['access_location'] if location.name != 'uniform_resource_identifier': continue url = location.native if url.lower().startswith(('http://', 'https://', 'ldap://', 'ldaps://')): output.append(url) return output
:return: A list of zero or more unicode strings of the OCSP URLs for this cert
def translate_labels(val): ''' Can either be a list of label names, or a list of name=value pairs. The API can accept either a list of label names or a dictionary mapping names to values, so the value we translate will be different depending on the input. ''' if not isinstance(val, dict): if not isinstance(val, list): val = split(val) new_val = {} for item in val: if isinstance(item, dict): if len(item) != 1: raise SaltInvocationError('Invalid label(s)') key = next(iter(item)) val = item[key] else: try: key, val = split(item, '=', 1) except ValueError: key = item val = '' if not isinstance(key, six.string_types): key = six.text_type(key) if not isinstance(val, six.string_types): val = six.text_type(val) new_val[key] = val val = new_val return val
Can either be a list of label names, or a list of name=value pairs. The API can accept either a list of label names or a dictionary mapping names to values, so the value we translate will be different depending on the input.
def contains_field_list(self, path, name): """ Returns True if a multi-valued field exists at the specified path, otherwise False. :param path: str or Path instance :param name: :type name: str :return: :raises ValueError: A component of path is a field name. :raises TypeError: The field name is a component of a path. """ try: self.get_field_list(path, name) return True except KeyError: return False
Returns True if a multi-valued field exists at the specified path, otherwise False. :param path: str or Path instance :param name: :type name: str :return: :raises ValueError: A component of path is a field name. :raises TypeError: The field name is a component of a path.
def is_user_id_available(self, user_id, note=None, loglevel=logging.DEBUG): """Determine whether the specified user_id available. @param user_id: User id to be checked. @param note: See send() @type user_id: integer @rtype: boolean @return: True is the specified user id is not used yet, False if it's already been assigned to a user. """ shutit = self.shutit shutit.handle_note(note) # v the space is intentional, to avoid polluting bash history. self.send(ShutItSendSpec(self, send=' command cut -d: -f3 /etc/paswd | grep -w ^' + user_id + '$ | wc -l', expect=self.default_expect, echo=False, loglevel=loglevel, ignore_background=True)) shutit.handle_note_after(note=note) if shutit.match_string(self.pexpect_child.before, '^([0-9]+)$') == '1': return False return True
Determine whether the specified user_id available. @param user_id: User id to be checked. @param note: See send() @type user_id: integer @rtype: boolean @return: True is the specified user id is not used yet, False if it's already been assigned to a user.