code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def rst_to_html(in_rst, stderr): if not in_rst: return '', 0 orig_sys_exit = sys.exit orig_sys_stderr = sys.stderr returncodes = [] try: sys.exit = returncodes.append sys.stderr = stderr pp = publish_parts(in_rst, writer_name='html', settings_overrides=dict(exit_status_level=2, report_level=2), enable_exit_status=True) finally: sys.exit = orig_sys_exit sys.stderr = orig_sys_stderr return_value = '' if 'title' in pp and pp['title']: return_value += '<title>{0}</title>\n<p style="font: 200% bold">{0}</p>\n'.format(pp['title']) return_value += pp['body'].strip() return return_value, returncodes.pop() if returncodes else 0
Renders HTML from an RST fragment. :param string in_rst: An rst formatted string. :param stderr: An open stream to use for docutils stderr output. :returns: A tuple of (html rendered rst, return code)
def in_(self, qfield, *values): qfield = resolve_name(self.type, qfield) self.filter(QueryExpression({ qfield : { '$in' : [qfield.wrap_value(value) for value in values]}})) return self
Check to see that the value of ``qfield`` is one of ``values`` :param qfield: Instances of :class:`ommongo.query_expression.QueryExpression` :param values: Values should be python values which ``qfield`` \ understands
def hex2pub(pub_hex: str) -> PublicKey: uncompressed = decode_hex(pub_hex) if len(uncompressed) == 64: uncompressed = b"\x04" + uncompressed return PublicKey(uncompressed)
Convert ethereum hex to EllipticCurvePublicKey The hex should be 65 bytes, but ethereum public key only has 64 bytes So have to add \x04 Parameters ---------- pub_hex: str Ethereum public key hex string Returns ------- coincurve.PublicKey A secp256k1 public key calculated from ethereum public key hex string >>> data = b'0'*32 >>> data_hash = sha256(data) >>> eth_prv = generate_eth_key() >>> cc_prv = hex2prv(eth_prv.to_hex()) >>> eth_prv.sign_msg_hash(data_hash).to_bytes() == cc_prv.sign_recoverable(data) True >>> pubhex = eth_prv.public_key.to_hex() >>> computed_pub = hex2pub(pubhex) >>> computed_pub == cc_prv.public_key True
def _getNextArticleBatch(self): self._articlePage += 1 if self._totalPages != None and self._articlePage > self._totalPages: return self.setRequestedResult(RequestArticlesInfo(page=self._articlePage, sortBy=self._sortBy, sortByAsc=self._sortByAsc, returnInfo = self._returnInfo)) if self._er._verboseOutput: print("Downloading article page %d..." % (self._articlePage)) res = self._er.execQuery(self) if "error" in res: print("Error while obtaining a list of articles: " + res["error"]) else: self._totalPages = res.get("articles", {}).get("pages", 0) results = res.get("articles", {}).get("results", []) self._articleList.extend(results)
download next batch of articles based on the article uris in the uri list
def _resolve_base_image(self, build_json): spec = build_json.get("spec") try: image_id = spec['triggeredBy'][0]['imageChangeBuild']['imageID'] except (TypeError, KeyError, IndexError): base_image = self.workflow.builder.base_image self.log.info("using %s as base image.", base_image) else: self.log.info("using %s from build spec[triggeredBy] as base image.", image_id) base_image = ImageName.parse(image_id) return base_image
If this is an auto-rebuild, adjust the base image to use the triggering build
def _serialize_datetime(value): if not isinstance(value, (datetime, arrow.Arrow)): raise ValueError(u'The received object was not a datetime: ' u'{} {}'.format(type(value), value)) return value.isoformat()
Serialize a DateTime object to its proper ISO-8601 representation.
def go_to_parent_directory(self): self.chdir(osp.abspath(osp.join(getcwd_or_home(), os.pardir)))
Go to parent directory
def get_search_fields(self): if self.search_fields: return self.search_fields raise NotImplementedError('%s, must implement "search_fields".' % self.__class__.__name__)
Return list of lookup names.
def make_node( op_type, inputs, outputs, name=None, doc_string=None, domain=None, **kwargs ): node = NodeProto() node.op_type = op_type node.input.extend(inputs) node.output.extend(outputs) if name: node.name = name if doc_string: node.doc_string = doc_string if domain is not None: node.domain = domain if kwargs: node.attribute.extend( make_attribute(key, value) for key, value in sorted(kwargs.items())) return node
Construct a NodeProto. Arguments: op_type (string): The name of the operator to construct inputs (list of string): list of input names outputs (list of string): list of output names name (string, default None): optional unique identifier for NodeProto doc_string (string, default None): optional documentation string for NodeProto domain (string, default None): optional domain for NodeProto. If it's None, we will just use default domain (which is empty) **kwargs (dict): the attributes of the node. The acceptable values are documented in :func:`make_attribute`.
def add(self, item): check_not_none(item, "Value can't be None") element_data = self._to_data(item) return self._encode_invoke(list_add_codec, value=element_data)
Adds the specified item to the end of this list. :param item: (object), the specified item to be appended to this list. :return: (bool), ``true`` if item is added, ``false`` otherwise.
def iter_followers(self, login=None, number=-1, etag=None): if login: return self.user(login).iter_followers() return self._iter_follow('followers', int(number), etag=etag)
If login is provided, iterate over a generator of followers of that login name; otherwise return a generator of followers of the authenticated user. :param str login: (optional), login of the user to check :param int number: (optional), number of followers to return. Default: -1 returns all followers :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`User <github3.users.User>`\ s
def _SetYaraRules(self, yara_rules_string): if not yara_rules_string: return analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance( 'yara') analyzer_object.SetRules(yara_rules_string) self._analyzers.append(analyzer_object)
Sets the Yara rules. Args: yara_rules_string (str): unparsed Yara rule definitions.
def _strip_colors(self, message: str) -> str: for c in self.COLORS: message = message.replace(c, "") return message
Remove all of the color tags from this message.
def to_dict(self): return {"name": self.table_name, "kind": self.table_kind, "data": [r.to_dict() for r in self]}
Converts the table to a dict.
def remove_record(self, record): assert self.has_record(record) record['_oai']['sets'] = [ s for s in record['_oai']['sets'] if s != self.spec]
Remove a record from the OAISet. :param record: Record to be removed. :type record: `invenio_records.api.Record` or derivative.
def union(cls): assert isinstance(cls, type) return type(cls.__name__, (cls,), { '_is_union': True, })
A class decorator which other classes can specify that they can resolve to with `UnionRule`. Annotating a class with @union allows other classes to use a UnionRule() instance to indicate that they can be resolved to this base union class. This class will never be instantiated, and should have no members -- it is used as a tag only, and will be replaced with whatever object is passed in as the subject of a `yield Get(...)`. See the following example: @union class UnionBase(object): pass @rule(B, [X]) def get_some_union_type(x): result = yield Get(ResultType, UnionBase, x.f()) # ... If there exists a single path from (whatever type the expression `x.f()` returns) -> `ResultType` in the rule graph, the engine will retrieve and execute that path to produce a `ResultType` from `x.f()`. This requires also that whatever type `x.f()` returns was registered as a union member of `UnionBase` with a `UnionRule`. Unions allow @rule bodies to be written without knowledge of what types may eventually be provided as input -- rather, they let the engine check that there is a valid path to the desired result.
def create_client(): result = False if g.client_id in drivers: result = True return jsonify({'Success': result})
Create a new client driver. The driver is automatically created in before_request function.
def provider(self, value): result = None defaulted_value = value or ProviderArchitecture.DEFAULT try: parsed_value = int(defaulted_value) except ValueError: pass else: if parsed_value in ProviderArchitecture: result = parsed_value if result is None: self.logger.error(u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value) self._provider = result or ProviderArchitecture.DEFAULT
Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
def get_if_set(self, addresses): with self._lock: results = [] for add in addresses: results.append(self._get_if_set(add)) return results
Returns the value set in this context, or None, for each address in addresses. Args: addresses (list of str): The addresses to return values for, if set within this context. Returns: (list): bytes set at the address or None
def remove(self, oid): port = self.lookup_by_oid(oid) adapter = self.parent if 'network-port-uris' in adapter.properties: port_uris = adapter.properties['network-port-uris'] port_uris.remove(port.uri) if 'storage-port-uris' in adapter.properties: port_uris = adapter.properties['storage-port-uris'] port_uris.remove(port.uri) super(FakedPortManager, self).remove(oid)
Remove a faked Port resource. This method also updates the 'network-port-uris' or 'storage-port-uris' property in the parent Adapter resource, by removing the URI for the faked Port resource. Parameters: oid (string): The object ID of the faked Port resource.
def sort_aliases(self, aliases): self._cache_init() if not aliases: return aliases parent_aliases = self._cache_get_entry(self.CACHE_NAME_PARENTS).keys() return [parent_alias for parent_alias in parent_aliases if parent_alias in aliases]
Sorts the given aliases list, returns a sorted list. :param list aliases: :return: sorted aliases list
def get_instance(self, payload): return DocumentInstance(self._version, payload, service_sid=self._solution['service_sid'], )
Build an instance of DocumentInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.sync.service.document.DocumentInstance :rtype: twilio.rest.preview.sync.service.document.DocumentInstance
def sbo_case_insensitive(self): if "--case-ins" in self.flag: data = SBoGrep(name="").names() data_dict = Utils().case_sensitive(data) for key, value in data_dict.iteritems(): if key == self.name.lower(): self.name = value
Matching packages distinguish between uppercase and lowercase for sbo repository
def release(self): "increment the counter, waking up a waiter if there was any" if self._waiters: scheduler.state.awoken_from_events.add(self._waiters.popleft()) else: self._value += 1
increment the counter, waking up a waiter if there was any
def _center_tile(self, position, size): x, y = position w, h = size return x + (self.cell_width - w) / 2, y + (self.cell_height - h) / 2
Calculate the centre of a tile given the top-left corner and the size of the image.
def subgraph(graph, nodes: Iterable[BaseEntity]): sg = graph.subgraph(nodes) result = graph.fresh_copy() result.graph.update(sg.graph) for node, data in sg.nodes(data=True): result.add_node(node, **data) result.add_edges_from( (u, v, key, datadict.copy()) for u, v, key, datadict in sg.edges(keys=True, data=True) ) return result
Induce a sub-graph over the given nodes. :rtype: BELGraph
def get_task(task_id, completed=True): tasks = get_tasks(task_id=task_id, completed=completed) if len(tasks) == 0: return None assert len(tasks) == 1, 'get_task should return at max 1 task for a task id' return tasks[0]
Get a task by task id where a task_id is required. :param task_id: task ID :type task_id: str :param completed: include completed tasks? :type completed: bool :return: a task :rtype: obj
def get_session_identifiers(cls, folder=None, inputfile=None): sessions = [] if folder is None or not os.path.isdir(folder): return sessions for root, dirs, files in os.walk(folder): for filename in files: if filename.startswith('Session ') \ and filename.endswith('.mqo'): session = filename.split()[1] if session not in sessions: sessions.append(session) return sessions
Retrieve the list of session identifiers contained in the data on the folder. :kwarg folder: the path to the folder containing the files to check. This folder may contain sub-folders. :kwarg inputfile: the path to the input file to use
def AddLeafNodes(self, prefix, node): if not node: self.AddPath(prefix) for name in node: child_path = prefix + '.' + name self.AddLeafNodes(child_path, node[name])
Adds leaf nodes begin with prefix to this tree.
def set_window_title(self): if DEV is not None: title = u"Spyder %s (Python %s.%s)" % (__version__, sys.version_info[0], sys.version_info[1]) else: title = u"Spyder (Python %s.%s)" % (sys.version_info[0], sys.version_info[1]) if get_debug_level(): title += u" [DEBUG MODE %d]" % get_debug_level() if self.window_title is not None: title += u' -- ' + to_text_string(self.window_title) if self.projects is not None: path = self.projects.get_active_project_path() if path: path = path.replace(get_home_dir(), u'~') title = u'{0} - {1}'.format(path, title) self.base_title = title self.setWindowTitle(self.base_title)
Set window title.
def _file_write(path, content): with salt.utils.files.fopen(path, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(content)) fp_.close()
Write content to a file
def list(self): for i in range(len(self.sections)): self.sections[i].list(walkTrace=(i+1,))
Get an overview of the report content list
def AdaptiveOpticsCorrect(pupils,diameter,maxRadial,numRemove=None): gridSize=pupils.shape[-1] pupilsVector=np.reshape(pupils,(-1,gridSize**2)) zernikes=np.reshape(ZernikeGrid(gridSize,maxRadial,diameter),(-1,gridSize**2)) if numRemove is None: numRemove=zernikes.shape[0] numScreen=pupilsVector.shape[0] normalisation=1.0/np.sum(zernikes[0]) for i in list(range(numRemove))+[0,]: amplitudes=np.inner(zernikes[i],pupilsVector)*normalisation pupilsVector=pupilsVector-zernikes[i]*amplitudes[:,np.newaxis] return np.reshape(pupilsVector,pupils.shape)
Correct a wavefront using Zernike rejection up to some maximal order. Can operate on multiple telescopes in parallel. Note that this version removes the piston mode as well
def add_tokens_for_group(self, with_pass=False): kls = self.groups.super_kls name = self.groups.kls_name self.reset_indentation('') self.result.extend(self.tokens.make_describe(kls, name)) if with_pass: self.add_tokens_for_pass() self.groups.finish_signature()
Add the tokens for the group signature
def _append_base_arguments(self): if self.exc and self.only: raise PackerException('Cannot provide both "except" and "only"') elif self.exc: self._add_opt('-except={0}'.format(self._join_comma(self.exc))) elif self.only: self._add_opt('-only={0}'.format(self._join_comma(self.only))) for var, value in self.vars.items(): self._add_opt("-var") self._add_opt("{0}={1}".format(var, value)) if self.var_file: self._add_opt('-var-file={0}'.format(self.var_file))
Appends base arguments to packer commands. -except, -only, -var and -var-file are appeneded to almost all subcommands in packer. As such this can be called to add these flags to the subcommand.
def remove_profile(self): profile_name = self.profile_combo.currentText() button_selected = QMessageBox.warning( None, 'Remove Profile', self.tr('Remove %s.') % profile_name, QMessageBox.Ok, QMessageBox.Cancel ) if button_selected == QMessageBox.Ok: self.profile_combo.removeItem( self.profile_combo.currentIndex() ) self.minimum_needs.remove_profile(profile_name) self.select_profile(self.profile_combo.currentIndex())
Remove the current profile. Make sure the user is sure.
def max_spline_jump(self): sp = self.spline() return max(self.energies - sp(range(len(self.energies))))
Get maximum difference between spline and energy trend.
def status(self, name=''): super(SystemD, self).status(name=name) svc_list = sh.systemctl('--no-legend', '--no-pager', t='service') svcs_info = [self._parse_service_info(svc) for svc in svc_list] if name: names = (name, name + '.service') svcs_info = [s for s in svcs_info if s['name'] in names] self.services['services'] = svcs_info return self.services
Return a list of the statuses of the `name` service, or if name is omitted, a list of the status of all services for this specific init system. There should be a standardization around the status fields. There currently isn't. `self.services` is set in `base.py`
def get_xsession(self, item): subj_label, sess_label = self._get_item_labels(item) with self: xproject = self._login.projects[self.project_id] try: xsubject = xproject.subjects[subj_label] except KeyError: xsubject = self._login.classes.SubjectData( label=subj_label, parent=xproject) try: xsession = xsubject.experiments[sess_label] except KeyError: xsession = self._login.classes.MrSessionData( label=sess_label, parent=xsubject) if item.derived: xsession.fields[ self.DERIVED_FROM_FIELD] = self._get_item_labels( item, no_from_study=True)[1] return xsession
Returns the XNAT session and cache dir corresponding to the item.
def find_entry(self, entry, exact=True): if exact: self.log([u"Finding entry '%s' with exact=True", entry]) if entry in self.entries: self.log([u"Found entry '%s'", entry]) return entry else: self.log([u"Finding entry '%s' with exact=False", entry]) for ent in self.entries: if os.path.basename(ent) == entry: self.log([u"Found entry '%s'", ent]) return ent self.log([u"Entry '%s' not found", entry]) return None
Return the full path to the first entry whose file name equals the given ``entry`` path. Return ``None`` if the entry cannot be found. If ``exact`` is ``True``, the path must be exact, otherwise the comparison is done only on the file name. Example: :: entry = "config.txt" matches: :: config.txt (if exact == True or exact == False) foo/config.txt (if exact == False) foo/bar/config.txt (if exact == False) :param string entry: the entry name to be searched for :param bool exact: look for the exact entry path :rtype: string :raises: same as :func:`~aeneas.container.Container.entries`
def from_pb(cls, cell_pb): if cell_pb.labels: return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels) else: return cls(cell_pb.value, cell_pb.timestamp_micros)
Create a new cell from a Cell protobuf. :type cell_pb: :class:`._generated.data_pb2.Cell` :param cell_pb: The protobuf to convert. :rtype: :class:`Cell` :returns: The cell corresponding to the protobuf.
def search_files(source: str, extensions: List[str]) -> List[Path]: files = [ path for path in Path(source).glob('**/*') if path.is_file() and path.suffix.lstrip('.') in extensions] nb_files = len(files) LOGGER.debug("Total files found: %d", nb_files) if nb_files < NB_FILES_MIN: LOGGER.error("Too few source files") raise GuesslangError( '{} source files found in {}. {} files minimum is required'.format( nb_files, source, NB_FILES_MIN)) random.shuffle(files) return files
Retrieve files located the source directory and its subdirectories, whose extension match one of the listed extensions. :raise GuesslangError: when there is not enough files in the directory :param source: directory name :param extensions: list of file extensions :return: filenames
def getAttribute(self, attr: str) -> _AttrValueType: if attr == 'class': if self.classList: return self.classList.toString() return None attr_node = self.getAttributeNode(attr) if attr_node is None: return None return attr_node.value
Get attribute of this node as string format. If this node does not have ``attr``, return None.
def save_function(elements, module_path): for elem, signature in elements.items(): if isinstance(signature, dict): save_function(signature, module_path + (elem,)) elif signature.isstaticfunction(): functions.setdefault(elem, []).append((module_path, signature,)) elif isinstance(signature, Class): save_function(signature.fields, module_path + (elem,))
Recursively save functions with module name and signature.
def format_time(x): if isinstance(x, (datetime64, datetime)): return format_timestamp(x) elif isinstance(x, (timedelta64, timedelta)): return format_timedelta(x) elif isinstance(x, ndarray): return list(x) if x.ndim else x[()] return x
Formats date values This function formats :class:`datetime.datetime` and :class:`datetime.timedelta` objects (and the corresponding numpy objects) using the :func:`xarray.core.formatting.format_timestamp` and the :func:`xarray.core.formatting.format_timedelta` functions. Parameters ---------- x: object The value to format. If not a time object, the value is returned Returns ------- str or `x` Either the formatted time object or the initial `x`
def _generate_arg_types(coordlist_length, shape_name): from .ds9_region_parser import ds9_shape_defs from .ds9_attr_parser import ds9_shape_in_comment_defs if shape_name in ds9_shape_defs: shape_def = ds9_shape_defs[shape_name] else: shape_def = ds9_shape_in_comment_defs[shape_name] initial_arg_types = shape_def.args_list arg_repeats = shape_def.args_repeat if arg_repeats is None: return initial_arg_types n1, n2 = arg_repeats arg_types = list(initial_arg_types[:n1]) num_of_repeats = coordlist_length - (len(initial_arg_types) - n2) arg_types.extend((num_of_repeats - n1) // (n2 - n1) * initial_arg_types[n1:n2]) arg_types.extend(initial_arg_types[n2:]) return arg_types
Find coordinate types based on shape name and coordlist length This function returns a list of coordinate types based on which coordinates can be repeated for a given type of shap Parameters ---------- coordlist_length : int The number of coordinates or arguments used to define the shape. shape_name : str One of the names in `pyregion.ds9_shape_defs`. Returns ------- arg_types : list A list of objects from `pyregion.region_numbers` with a length equal to coordlist_length.
def _assign_as_root(self, id_): rfc = self._ras.get_relationship_form_for_create(self._phantom_root_id, id_, []) rfc.set_display_name('Implicit Root to ' + str(id_) + ' Parent-Child Relationship') rfc.set_description(self._relationship_type.get_display_name().get_text() + ' relationship for implicit root and child: ' + str(id_)) rfc.set_genus_type(self._relationship_type) self._ras.create_relationship(rfc)
Assign an id_ a root object in the hierarchy
def j9urlGenerator(nameDict = False): start = "https://images.webofknowledge.com/images/help/WOS/" end = "_abrvjt.html" if nameDict: urls = {"0-9" : start + "0-9" + end} for c in string.ascii_uppercase: urls[c] = start + c + end else: urls = [start + "0-9" + end] for c in string.ascii_uppercase: urls.append(start + c + end) return urls
How to get all the urls for the WOS Journal Title Abbreviations. Each is varies by only a few characters. These are the currently in use urls they may change. They are of the form: > "https://images.webofknowledge.com/images/help/WOS/{VAL}_abrvjt.html" > Where {VAL} is a capital letter or the string "0-9" # Returns `list[str]` > A list of all the url's strings
def translate(self, text): self.count = 0 return self._make_regex().sub(self, text)
Translate text, returns the modified text.
def parse_access_token(self): access_file = os.path.join(self.file_path, 'access_token') if os.path.isfile(access_file): access_list = list() with open(access_file, 'r') as access_token: for line in access_token: value, data = line.split('=') access_list.append(data.rstrip()) self.access_secret = access_list[0] self.access_token = access_list[1] else: print('Missing access_token') self.get_request_token() self.get_access_token()
Extract the secret and token values from the access_token file
def parse_string(self): aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower()) if aliased_value is not None: return aliased_value return String(self.current_token.value)
Parse a regular unquoted string from the token stream.
async def create_server(self, worker, protocol_factory, address=None, sockets=None, idx=0): cfg = self.cfg max_requests = cfg.max_requests if max_requests: max_requests = int(lognormvariate(log(max_requests), 0.2)) server = self.server_factory( protocol_factory, loop=worker._loop, max_requests=max_requests, keep_alive=cfg.keep_alive, name=self.name, logger=self.logger, server_software=cfg.server_software, cfg=cfg, idx=idx ) for event in ('connection_made', 'pre_request', 'post_request', 'connection_lost'): callback = getattr(cfg, event) if callback != pass_through: server.event(event).bind(callback) await server.start_serving( sockets=sockets, address=address, backlog=cfg.backlog, sslcontext=self.sslcontext() ) return server
Create the Server which will listen for requests. :return: a :class:`.TcpServer`.
def _lincomb(self, a, x1, b, x2, out): _lincomb_impl(a, x1, b, x2, out)
Implement the linear combination of ``x1`` and ``x2``. Compute ``out = a*x1 + b*x2`` using optimized BLAS routines if possible. This function is part of the subclassing API. Do not call it directly. Parameters ---------- a, b : `TensorSpace.field` element Scalars to multiply ``x1`` and ``x2`` with. x1, x2 : `NumpyTensor` Summands in the linear combination. out : `NumpyTensor` Tensor to which the result is written. Examples -------- >>> space = odl.rn(3) >>> x = space.element([0, 1, 1]) >>> y = space.element([0, 0, 1]) >>> out = space.element() >>> result = space.lincomb(1, x, 2, y, out) >>> result rn(3).element([ 0., 1., 3.]) >>> result is out True
def cached(func): cache = {} @f.wraps(func) def wrapper(*args, **kwargs): key = func.__name__ + str(sorted(args)) + str(sorted(kwargs.items())) if key not in cache: cache[key] = func(*args, **kwargs) return cache[key] return wrapper
A decorator function to cache values. It uses the decorated function's arguments as the keys to determine if the function has been called previously.
def _list_nodes(call=None): local = salt.client.LocalClient() ret = local.cmd('salt-cloud:driver:vagrant', 'grains.items', '', tgt_type='grain') return ret
List the nodes, ask all 'vagrant' minions, return dict of grains.
def get_all_importing_namespace_hashes( self ): cur = self.db.cursor() namespace_hashes = namedb_get_all_importing_namespace_hashes( cur, self.lastblock ) return namespace_hashes
Get the set of all preordered and revealed namespace hashes that have not expired.
def get(self, url, status): crl = self.cobj try: crl.setopt(pycurl.URL, url) except UnicodeEncodeError: crl.setopt(pycurl.URL, url.encode('utf-8')) if not self.silent: print(status, file=sys.stderr) if self.DISABLED: print("Requests DISABLED", file=sys.stderr) else: return self.curl_perform(crl)
in favor of python-requests for speed
def _include_term_list(self, termlist): ref_needed = False for term in termlist: ref_needed = ref_needed or self._include_term(term) return ref_needed
Add terms from a TermList to the ontology.
def __remove_service(self, key, service): try: prop_services = self._future_value[key] prop_services.remove(service) if not prop_services: del self._future_value[key] except KeyError: pass
Removes the given service from the future dictionary :param key: Dictionary key :param service: Service to remove from the dictionary
def isdir(self, path): try: self.remote_context.check_output(["test", "-d", path]) except subprocess.CalledProcessError as e: if e.returncode == 1: return False else: raise return True
Return `True` if directory at `path` exist, False otherwise.
def get_currency_symbol(self): locale = locales.getLocale('en') setup = api.get_setup() currency = setup.getCurrency() return locale.numbers.currencies[currency].symbol
Get the currency Symbol
def _make_summary_tables(self): try: self._Bhat except: raise Exception("Regression hasn't been fit yet. run .fit()") else: num_pcs = self._basis_object.get_params()['num_components'] total_dof = self._X.shape[0] - self._X.shape[1] - num_pcs if total_dof <= 0.0: raise ValueError("degrees of freedom <= 0, Hotellings T2 not defined") cat_table = self._catalog_object.get_params().items() bas_table = self._basis_object.get_params().items() print tabulate(cat_table+bas_table,tablefmt='plain') headers = self._results[0] table = self._results[1:] print tabulate(table, headers, tablefmt="rst") print "Formula Used: %s" % self._designmatrix_object._formula print "Degrees of Freedom (n - p - k): %s" % str(total_dof) print "Condition Number of X^T*X: %.2f" % np.linalg.cond(np.dot(self._X.T, self._X))
prints the summary of the regression. It shows the waveform metadata, diagnostics of the fit, and results of the hypothesis tests for each comparison encoded in the design matrix
def _get_full_paths(fastq_dir, config, config_file): if fastq_dir: fastq_dir = utils.add_full_path(fastq_dir) config_dir = utils.add_full_path(os.path.dirname(config_file)) galaxy_config_file = utils.add_full_path(config.get("galaxy_config", "universe_wsgi.ini"), config_dir) return fastq_dir, os.path.dirname(galaxy_config_file), config_dir
Retrieve full paths for directories in the case of relative locations.
def to_glyphs_family_user_data_from_ufo(self, ufo): target_user_data = self.font.userData try: for key, value in ufo.lib[FONT_USER_DATA_KEY].items(): if key not in target_user_data.keys(): target_user_data[key] = value except KeyError: pass
Set the GSFont userData from the UFO family-wide lib data.
def sentinel2_toa_cloud_mask(input_img): qa_img = input_img.select(['QA60']) cloud_mask = qa_img.rightShift(10).bitwiseAnd(1).neq(0)\ .Or(qa_img.rightShift(11).bitwiseAnd(1).neq(0)) return cloud_mask.Not()
Extract cloud mask from the Sentinel 2 TOA QA60 band Parameters ---------- input_img : ee.Image Image from the COPERNICUS/S2 collection with a QA60 band. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Bits 10: Opaque clouds present 11: Cirrus clouds present The Sentinel 2 TOA and SR cloud masks functions are currently identical References ---------- https://sentinel.esa.int/documents/247904/685211/Sentinel-2_User_Handbook https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-1c/cloud-masks
def list_route_advertised_from_bgp_speaker(self, speaker_id, **_params): return self.get((self.bgp_speaker_path % speaker_id) + "/get_advertised_routes", params=_params)
Fetches a list of all routes advertised by BGP speaker.
def generate_airflow_spec(name, pipeline_spec): task_definitions = '' up_steam_statements = '' parameters = pipeline_spec.get('parameters') for (task_id, task_details) in sorted(pipeline_spec['tasks'].items()): task_def = PipelineGenerator._get_operator_definition(task_id, task_details, parameters) task_definitions = task_definitions + task_def dependency_def = PipelineGenerator._get_dependency_definition( task_id, task_details.get('up_stream', [])) up_steam_statements = up_steam_statements + dependency_def schedule_config = pipeline_spec.get('schedule', {}) default_args = PipelineGenerator._get_default_args(schedule_config, pipeline_spec.get('emails', {})) dag_definition = PipelineGenerator._get_dag_definition( name, schedule_config.get('interval', '@once'), schedule_config.get('catchup', False)) return PipelineGenerator._imports + default_args + dag_definition + task_definitions + \ up_steam_statements
Gets the airflow python spec for the Pipeline object.
def pattern_logic_srt(): if Config.options.pattern_files and Config.options.regex: return prep_regex(prep_patterns(Config.options.pattern_files)) elif Config.options.pattern_files: return prep_patterns(Config.options.pattern_files) elif Config.options.regex: return prep_regex(Config.REGEX) else: return Config.TERMS
Return patterns to be used for searching srt subtitles.
def dereference(self, session, ref, allow_none=False): from ommongo.document import collection_registry ref.type = collection_registry['global'][ref.collection] obj = session.dereference(ref, allow_none=allow_none) return obj
Dereference a pymongo "DBRef" to this field's underlying type
def _set_k8s_attribute(obj, attribute, value): current_value = None attribute_name = None for python_attribute, json_attribute in obj.attribute_map.items(): if json_attribute == attribute: attribute_name = python_attribute break else: raise ValueError('Attribute must be one of {}'.format(obj.attribute_map.values())) if hasattr(obj, attribute_name): current_value = getattr(obj, attribute_name) if current_value is not None: current_value = SERIALIZATION_API_CLIENT.sanitize_for_serialization( current_value ) if isinstance(current_value, dict): setattr(obj, attribute_name, merge_dictionaries(current_value, value)) elif isinstance(current_value, list): setattr(obj, attribute_name, current_value + value) else: setattr(obj, attribute_name, value)
Set a specific value on a kubernetes object's attribute obj an object from Kubernetes Python API client attribute Should be a Kubernetes API style attribute (with camelCase) value Can be anything (string, list, dict, k8s objects) that can be accepted by the k8s python client
def save_texts(fname:PathOrStr, texts:Collection[str]): "Save in `fname` the content of `texts`." with open(fname, 'w') as f: for t in texts: f.write(f'{t}\n')
Save in `fname` the content of `texts`.
def init_farm(farm_name): utils.check_for_cloud_server() utils.check_for_cloud_user() old_farm_name = config["cloud_server"]["farm_name"] if old_farm_name and old_farm_name != farm_name: raise click.ClickException( "Farm \"{}\" already initialized. Run `openag cloud deinit_farm` " "to deinitialize it".format(old_farm_name) ) if config["local_server"]["url"]: utils.replicate_per_farm_dbs(farm_name=farm_name) config["cloud_server"]["farm_name"] = farm_name
Select a farm to use. This command sets up the replication between your local database and the selected cloud server if you have already initialized your local database with the `openag db init` command.
def mkdirs(filename, mode=0o777): dirname = os.path.dirname(filename) if not dirname: return _compat.makedirs(dirname, mode=mode, exist_ok=True)
Recursively create directories up to the path of ``filename`` as needed.
def hist(hist_function, *, options={}, **interact_params): params = { 'marks': [{ 'sample': _array_or_placeholder(hist_function), 'bins': _get_option('bins'), 'normalized': _get_option('normalized'), 'scales': ( lambda opts: {'sample': opts['x_sc'], 'count': opts['y_sc']} ), }], } fig = options.get('_fig', False) or _create_fig(options=options) [hist] = _create_marks( fig=fig, marks=[bq.Hist], options=options, params=params ) _add_marks(fig, [hist]) def wrapped(**interact_params): hist.sample = util.maybe_call(hist_function, interact_params) controls = widgets.interactive(wrapped, **interact_params) return widgets.VBox([controls, fig])
Generates an interactive histogram that allows users to change the parameters of the input hist_function. Args: hist_function (Array | (*args -> Array int | Array float)): Function that takes in parameters to interact with and returns an array of numbers. These numbers will be plotted in the resulting histogram. Kwargs: {options} interact_params (dict): Keyword arguments in the same format as `ipywidgets.interact`. One argument is required for each argument of `hist_function`. Returns: VBox with two children: the interactive controls and the figure. >>> def gen_random(n_points): ... return np.random.normal(size=n_points) >>> hist(gen_random, n_points=(0, 1000, 10)) VBox(...)
def compose(self, parser: Any, grammar: Any = None, attr_of: str = None) -> str: if type(self.left) is Condition: left = "({0})".format(parser.compose(self.left, grammar=grammar, attr_of=attr_of)) else: left = parser.compose(self.left, grammar=grammar, attr_of=attr_of) if getattr(self, 'op', None): if type(self.right) is Condition: right = "({0})".format(parser.compose(self.right, grammar=grammar, attr_of=attr_of)) else: right = parser.compose(self.right, grammar=grammar, attr_of=attr_of) op = parser.compose(self.op, grammar=grammar, attr_of=attr_of) result = "{0} {1} {2}".format(left, op, right) else: result = left return result
Return the Condition as string format :param parser: Parser instance :param grammar: Grammar :param attr_of: Attribute of...
def ConvBPDNMaskOptionsDefaults(method='admm'): dflt = copy.deepcopy(cbpdnmsk_class_label_lookup(method).Options.defaults) if method == 'admm': dflt.update({'MaxMainIter': 1, 'AutoRho': {'Period': 10, 'AutoScaling': False, 'RsdlRatio': 10.0, 'Scaling': 2.0, 'RsdlTarget': 1.0}}) else: dflt.update({'MaxMainIter': 1, 'BackTrack': {'gamma_u': 1.2, 'MaxIter': 50}}) return dflt
Get defaults dict for the ConvBPDNMask class specified by the ``method`` parameter.
def moment(expr, order, central=False): if not isinstance(order, six.integer_types): raise ValueError('Only integer-ordered moments are supported.') if order < 0: raise ValueError('Only non-negative orders are supported.') output_type = _stats_type(expr) return _reduction(expr, Moment, output_type, _order=order, _center=central)
Calculate the n-th order moment of the sequence :param expr: :param order: moment order, must be an integer :param central: if central moments are to be computed. :return:
def cf_t(self, temp): index = int((temp - A4TempComp.__MIN_TEMP) // A4TempComp.__INTERVAL) if temp % A4TempComp.__INTERVAL == 0: return self.__values[index] y1 = self.__values[index] y2 = self.__values[index + 1] delta_y = y2 - y1 delta_x = float(temp % A4TempComp.__INTERVAL) / A4TempComp.__INTERVAL cf_t = y1 + (delta_y * delta_x) return cf_t
Compute the linear-interpolated temperature compensation factor.
def get_changed_columns(self): return [k for k, v in self._values.items() if v.changed]
Returns a list of the columns that have been updated since instantiation or save
def curl_couchdb(url, method='GET', base_url=BASE_URL, data=None): (username, password) = get_admin() if username is None: auth = None else: auth = (username, password) if method == 'PUT': req = requests.put('{}{}'.format(base_url, url), auth=auth, data=data) elif method == 'DELETE': req = requests.delete('{}{}'.format(base_url, url), auth=auth) else: req = requests.get('{}{}'.format(base_url, url), auth=auth) if req.status_code not in [200, 201]: raise HTTPError('{}: {}'.format(req.status_code, req.text)) return req
Launch a curl on CouchDB instance
def _urls(self): info = ( self.model._meta.app_label, self.model._meta.model_name, self.name, ) urlpatterns = [ url(r'^%s/$' % self.name, self._view, name='%s_%s_%s' % info) ] return urlpatterns
URL patterns for tool linked to _view method.
def parse_json_body(req): content_type = req.headers.get("Content-Type") if content_type and core.is_json(content_type): try: return core.parse_json(req.body) except TypeError: pass except json.JSONDecodeError as e: if e.doc == "": return core.missing else: raise return {}
Return the decoded JSON body from the request.
def run_in_subprocess(code, filename_suffix, arguments, working_directory): temporary_file = tempfile.NamedTemporaryFile(mode='wb', suffix=filename_suffix) temporary_file.write(code.encode('utf-8')) temporary_file.flush() process = subprocess.Popen(arguments + [temporary_file.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory) def run(): raw_result = process.communicate() if process.returncode != 0: return (raw_result[1].decode(get_encoding()), temporary_file.name) return run
Return None on success.
def _FetchLinuxFlags(self): if platform.system() != "Linux": return 0 if self.IsSymlink(): return 0 try: fd = os.open(self._path, os.O_RDONLY) except (IOError, OSError): return 0 try: import fcntl buf = array.array(compatibility.NativeStr("l"), [0]) fcntl.ioctl(fd, self.FS_IOC_GETFLAGS, buf) return buf[0] except (IOError, OSError): return 0 finally: os.close(fd)
Fetches Linux extended file flags.
def create_host(self, host_id, name, ipaddr, rack_id = None): return hosts.create_host(self, host_id, name, ipaddr, rack_id)
Create a host. @param host_id: The host id. @param name: Host name @param ipaddr: IP address @param rack_id: Rack id. Default None. @return: An ApiHost object
def wait_for_postgres(database, host, port, username, password): connecting_string = 'Checking for PostgreSQL...' if port is not None: port = int(port) while True: try: logger.info(connecting_string) connection = psycopg2.connect( database=database, host=host, port=port, user=username, password=password, connect_timeout=3 ) connection.close() logger.info('PostgreSQL is running!') break except psycopg2.OperationalError: time.sleep(1)
Waits for PostgreSQL database to be up Args: database (Optional[str]): Database name host (Optional[str]): Host where database is located port (Union[int, str, None]): Database port username (Optional[str]): Username to log into database password (Optional[str]): Password to log into database Returns: None
def set_state(self, newState, timer=0): if _debug: SSM._debug("set_state %r (%s) timer=%r", newState, SSM.transactionLabels[newState], timer) if (self.state == COMPLETED) or (self.state == ABORTED): e = RuntimeError("invalid state transition from %s to %s" % (SSM.transactionLabels[self.state], SSM.transactionLabels[newState])) SSM._exception(e) raise e self.stop_timer() self.state = newState if timer: self.start_timer(timer)
This function is called when the derived class wants to change state.
def merge(self, other): for key, value in other.items(): if key in self and (isinstance(self[key], Compound) and isinstance(value, dict)): self[key].merge(value) else: self[key] = value
Recursively merge tags from another compound.
def _resolve_categorical(self): if self._is_array_cat: return DT.MR_CAT if self._has_selected_category else DT.CA_CAT return DT.LOGICAL if self._has_selected_category else DT.CAT
Return one of the categorical members of DIMENSION_TYPE. This method distinguishes between CAT, CA_CAT, MR_CAT, and LOGICAL dimension types, all of which have the base type 'categorical'. The return value is only meaningful if the dimension is known to be one of the categorical types (has base-type 'categorical').
def estimate_rotation(self, camera, ransac_threshold=7.0): if self.axis is None: x = self.points[:, 0, :].T y = self.points[:, -1, :].T inlier_ratio = 0.5 R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y, camera, ransac_threshold, inlier_ratio=inlier_ratio, do_translation=False) if R is not None: self.axis, self.angle = rotations.rotation_matrix_to_axis_angle(R) if self.angle < 0: self.angle = -self.angle self.axis = -self.axis self.inliers = idx return self.axis is not None
Estimate the rotation between first and last frame It uses RANSAC where the error metric is the reprojection error of the points from the last frame to the first frame. Parameters ----------------- camera : CameraModel Camera model ransac_threshold : float Distance threshold (in pixels) for a reprojected point to count as an inlier
def enable_cell_picking(self, mesh=None, callback=None): if mesh is None: if not hasattr(self, 'mesh'): raise Exception('Input a mesh into the Plotter class first or ' + 'or set it in this function') mesh = self.mesh def pick_call_back(picker, event_id): extract = vtk.vtkExtractGeometry() mesh.cell_arrays['orig_extract_id'] = np.arange(mesh.n_cells) extract.SetInputData(mesh) extract.SetImplicitFunction(picker.GetFrustum()) extract.Update() self.picked_cells = vtki.wrap(extract.GetOutput()) if callback is not None: callback(self.picked_cells) area_picker = vtk.vtkAreaPicker() area_picker.AddObserver(vtk.vtkCommand.EndPickEvent, pick_call_back) self.enable_rubber_band_style() self.iren.SetPicker(area_picker)
Enables picking of cells. Press r to enable retangle based selection. Press "r" again to turn it off. Selection will be saved to self.picked_cells. Uses last input mesh for input Parameters ---------- mesh : vtk.UnstructuredGrid, optional UnstructuredGrid grid to select cells from. Uses last input grid by default. callback : function, optional When input, calls this function after a selection is made. The picked_cells are input as the first parameter to this function.
def value(self, t): fraction = min(float(t) / max(1, self.schedule_timesteps), 1.0) return self.initial_p + fraction * (self.final_p - self.initial_p)
See Schedule.value
def cuid(self): identifier = "c" millis = int(time.time() * 1000) identifier += _to_base36(millis) count = _pad(_to_base36(self.counter), BLOCK_SIZE) identifier += count identifier += self.fingerprint identifier += _random_block() identifier += _random_block() return identifier
Generate a full-length cuid as a string.
def ts_to_dt(jwt_dict): d = jwt_dict.copy() for k, v in [v[:2] for v in CLAIM_LIST if v[2]]: if k in jwt_dict: d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k]) return d
Convert timestamps in JWT to datetime objects. Args: jwt_dict: dict JWT with some keys containing timestamps. Returns: dict: Copy of input dict where timestamps have been replaced with datetime.datetime() objects.
def _list_object_parts(self, bucket_name, object_name, upload_id): is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) is_non_empty_string(upload_id) query = { 'uploadId': upload_id, 'max-parts': '1000' } is_truncated = True part_number_marker = '' while is_truncated: if part_number_marker: query['part-number-marker'] = str(part_number_marker) response = self._url_open('GET', bucket_name=bucket_name, object_name=object_name, query=query) parts, is_truncated, part_number_marker = parse_list_parts( response.data, bucket_name=bucket_name, object_name=object_name, upload_id=upload_id ) for part in parts: yield part
List all parts. :param bucket_name: Bucket name to list parts for. :param object_name: Object name to list parts for. :param upload_id: Upload id of the previously uploaded object name.
def population_analysis_summary_report(feature, parent): _ = feature, parent analysis_dir = get_analysis_dir(exposure_population['key']) if analysis_dir: return get_impact_report_as_string(analysis_dir) return None
Retrieve an HTML population analysis table report from a multi exposure analysis.
def getRvaFromOffset(self, offset): rva = -1 s = self.getSectionByOffset(offset) if s: rva = (offset - self.sectionHeaders[s].pointerToRawData.value) + self.sectionHeaders[s].virtualAddress.value return rva
Converts a RVA to an offset. @type offset: int @param offset: The offset value to be converted to RVA. @rtype: int @return: The RVA obtained from the given offset.
def _ring_2d(m, n): if m == 1: return [(0, i) for i in range(n)] if n == 1: return [(i, 0) for i in range(m)] if m % 2 != 0: tf.logging.warning("Odd dimension") return [(i % m, i // m) for i in range(n * m)] ret = [(0, 0)] for i in range(m // 2): for j in range(1, n): ret.append((2 * i, j)) for j in range(n-1, 0, -1): ret.append((2 * i + 1, j)) for i in range(m-1, 0, -1): ret.append((i, 0)) return ret
Ring-order of a mxn mesh. Args: m: an integer n: an integer Returns: a list of mxn pairs
def translate_file(self, root, file_name, target_language): logger.info('filling up translations for locale `{}`'.format(target_language)) po = polib.pofile(os.path.join(root, file_name)) strings = self.get_strings_to_translate(po) tl = get_translator() translated_strings = tl.translate_strings(strings, target_language, 'en', False) self.update_translations(po, translated_strings) po.save()
convenience method for translating a pot file :param root: the absolute path of folder where the file is present :param file_name: name of the file to be translated (it should be a pot file) :param target_language: language in which the file needs to be translated
def list_containers(self, page_size=None): params = {} if page_size is not None: params['limit'] = page_size return pagination.Iterator( client=self._client, path='/mdb/{}/containers'.format(self._instance), params=params, response_class=mdb_pb2.ListContainersResponse, items_key='container', item_mapper=Container, )
Lists the containers visible to this client. Containers are returned in lexicographical order. :rtype: :class:`.Container` iterator