_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q265000
_get_column_nums_from_args
validation
def _get_column_nums_from_args(columns): """Turn column inputs from user into list of simple numbers. Inputs can be: - individual number: 1 - range: 1-3 - comma separated list: 1,2,3,4-6 """ nums = [] for c in columns: for p in c.split(','): p = p.strip() try: c = int(p) nums.append(c) except (TypeError, ValueError): start, ignore, end = p.partition('-') try: start = int(start) end = int(end) except (TypeError, ValueError): raise ValueError(
python
{ "resource": "" }
q265001
_get_printable_columns
validation
def _get_printable_columns(columns, row): """Return only the part of the row which should be printed. """ if not columns:
python
{ "resource": "" }
q265002
VisualFormatWriter.writerow
validation
def writerow(self, observation_data): """ Writes a single observation to the output file. If the ``observation_data`` parameter is a dictionary, it is converted to a list to keep a consisted field order (as described in format specification). Otherwise it is assumed that the data is a raw record ready to be written to file. :param
python
{ "resource": "" }
q265003
VisualFormatWriter.dict_to_row
validation
def dict_to_row(cls, observation_data): """ Takes a dictionary of observation data and converts it to a list of fields according to AAVSO visual format specification. :param cls: current class :param observation_data: a single observation as a dictionary """ row = [] row.append(observation_data['name']) row.append(observation_data['date']) row.append(observation_data['magnitude']) comment_code = observation_data.get('comment_code', 'na') if not comment_code: comment_code = 'na' row.append(comment_code) comp1 = observation_data.get('comp1', 'na') if not comp1:
python
{ "resource": "" }
q265004
VisualFormatReader.row_to_dict
validation
def row_to_dict(cls, row): """ Converts a raw input record to a dictionary of observation data. :param cls: current class :param row: a single observation as a list or tuple """ comment_code = row[3] if comment_code.lower() == 'na': comment_code = '' comp1 = row[4] if comp1.lower() == 'na':
python
{ "resource": "" }
q265005
get_default_tag
validation
def get_default_tag(app): '''Get the name of the view function used to prevent having to set the tag
python
{ "resource": "" }
q265006
download_observations
validation
def download_observations(observer_code): """ Downloads all variable star observations by a given observer. Performs a series of HTTP requests to AAVSO's WebObs search and downloads the results page by page. Each page is then passed to :py:class:`~pyaavso.parsers.webobs.WebObsResultsParser` and parse results are added to the final observation list. """ page_number = 1 observations = [] while True: logger.info('Downloading page %d...', page_number) response = requests.get(WEBOBS_RESULTS_URL, params={ 'obscode': observer_code, 'num_results': 200, 'obs_types':
python
{ "resource": "" }
q265007
image_path
validation
def image_path(instance, filename): """Generates likely unique image path using md5 hashes""" filename, ext = os.path.splitext(filename.lower()) instance_id_hash = hashlib.md5(str(instance.id)).hexdigest()
python
{ "resource": "" }
q265008
process_lander_page
validation
async def process_lander_page(session, github_api_token, ltd_product_data, mongo_collection=None): """Extract, transform, and load metadata from Lander-based projects. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_data : `dict` Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`. Data for this technote from the LTD Keeper API (``GET /products/<slug>``). Usually obtained via `lsstprojectmeta.ltd.get_ltd_product`. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. Raises ------ NotLanderPageError Raised when the LTD product cannot be interpreted as a Lander page because the ``/metadata.jsonld`` file is absent. This implies that the LTD product *could* be of a different format. .. `GitHub personal access token guide`: https://ls.st/41d """
python
{ "resource": "" }
q265009
_upload_to_mongodb
validation
async def _upload_to_mongodb(collection, jsonld): """Upsert the technote resource into the projectmeta MongoDB collection. Parameters ---------- collection : `motor.motor_asyncio.AsyncIOMotorCollection` The MongoDB collection. jsonld : `dict` The JSON-LD document that represents the document resource.
python
{ "resource": "" }
q265010
json_doc_to_xml
validation
def json_doc_to_xml(json_obj, lang='en', custom_namespace=None): """Converts a Open511 JSON document to XML. lang: the appropriate language code Takes a dict deserialized from JSON, returns an lxml Element. Accepts only the full root-level JSON object from an Open511 response.""" if 'meta' not in json_obj: raise Exception("This function requires a conforming Open511 JSON document with a 'meta' section.") json_obj = dict(json_obj) meta = json_obj.pop('meta') elem = get_base_open511_element(lang=lang, version=meta.pop('version'))
python
{ "resource": "" }
q265011
json_struct_to_xml
validation
def json_struct_to_xml(json_obj, root, custom_namespace=None): """Converts a Open511 JSON fragment to XML. Takes a dict deserialized from JSON, returns an lxml Element. This won't provide a conforming document if you pass in a full JSON document; it's for translating little fragments, and is mostly used internally.""" if isinstance(root, (str, unicode)): if root.startswith('!'): root = etree.Element('{%s}%s' % (NS_PROTECTED, root[1:])) elif root.startswith('+'): if not custom_namespace: raise Exception("JSON fields starts with +, but no custom namespace provided") root = etree.Element('{%s}%s' % (custom_namespace, root[1:])) else: root = etree.Element(root) if root.tag in ('attachments', 'grouped_events', 'media_files'): for link in json_obj: root.append(json_link_to_xml(link)) elif isinstance(json_obj, (str, unicode)): root.text = json_obj elif isinstance(json_obj, (int, float)): root.text = unicode(json_obj) elif isinstance(json_obj, dict): if frozenset(json_obj.keys()) == frozenset(('type', 'coordinates')): root.append(geojson_to_gml(json_obj)) else: for key, val in json_obj.items(): if key == 'url' or key.endswith('_url'):
python
{ "resource": "" }
q265012
geojson_to_gml
validation
def geojson_to_gml(gj, set_srs=True): """Given a dict deserialized from a GeoJSON object, returns an lxml Element of the corresponding GML geometry.""" tag = G(gj['type']) if set_srs: tag.set('srsName', 'urn:ogc:def:crs:EPSG::4326') if gj['type'] == 'Point': tag.append(G.pos(_reverse_geojson_coords(gj['coordinates']))) elif gj['type'] == 'LineString': tag.append(G.posList(' '.join(_reverse_geojson_coords(ll) for ll in gj['coordinates']))) elif gj['type'] == 'Polygon': rings = [ G.LinearRing( G.posList(' '.join(_reverse_geojson_coords(ll) for ll in ring)) ) for ring in gj['coordinates']
python
{ "resource": "" }
q265013
geom_to_xml_element
validation
def geom_to_xml_element(geom): """Transform a GEOS or OGR geometry object into an lxml Element for the GML geometry.""" if geom.srs.srid != 4326: raise NotImplementedError("Only WGS 84 lat/long geometries (SRID 4326) are
python
{ "resource": "" }
q265014
remove_comments
validation
def remove_comments(tex_source): """Delete latex comments from TeX source. Parameters ---------- tex_source : str TeX source content. Returns ------- tex_source : str TeX source without comments. """
python
{ "resource": "" }
q265015
replace_macros
validation
def replace_macros(tex_source, macros): r"""Replace macros in the TeX source with their content. Parameters ---------- tex_source : `str` TeX source content. macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the macros. See `lsstprojectmeta.tex.scraper.get_macros`. Returns ------- tex_source : `str` TeX source with known macros replaced. Notes ----- Macros with arguments are not supported. Examples -------- >>> macros = {r'\handle': 'LDM-nnn'} >>> sample = r'This is document \handle.' >>> replace_macros(sample, macros) 'This is document LDM-nnn.' Any trailing slash after the macro command is also replaced by this function. >>> macros = {r'\product': 'Data Management'} >>>
python
{ "resource": "" }
q265016
ensure_format
validation
def ensure_format(doc, format): """ Ensures that the provided document is an lxml Element or json dict. """ assert format in ('xml', 'json') if getattr(doc, 'tag', None) == 'open511': if format == 'json': return xml_to_json(doc) elif isinstance(doc, dict) and
python
{ "resource": "" }
q265017
open511_convert
validation
def open511_convert(input_doc, output_format, serialize=True, **kwargs): """ Convert an Open511 document between formats. input_doc - either an lxml open511 Element or a deserialized JSON dict output_format - short string name of a valid output format, as listed above """ try: output_format_info = FORMATS[output_format] except KeyError:
python
{ "resource": "" }
q265018
LsstLatexDoc.read
validation
def read(cls, root_tex_path): """Construct an `LsstLatexDoc` instance by reading and parsing the LaTeX source. Parameters ---------- root_tex_path : `str` Path to the LaTeX source on the filesystem. For multi-file LaTeX projects this should be the path to the root document. Notes ----- This method implements the following pipeline:
python
{ "resource": "" }
q265019
LsstLatexDoc.format_content
validation
def format_content(self, format='plain', mathjax=False, smart=True, extra_args=None): """Get the document content in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See
python
{ "resource": "" }
q265020
LsstLatexDoc.format_title
validation
def format_title(self, format='html5', deparagraph=True, mathjax=False, smart=True, extra_args=None): """Get the document title in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See
python
{ "resource": "" }
q265021
LsstLatexDoc.format_short_title
validation
def format_short_title(self, format='html5', deparagraph=True, mathjax=False, smart=True, extra_args=None): """Get the document short title in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See
python
{ "resource": "" }
q265022
LsstLatexDoc.format_abstract
validation
def format_abstract(self, format='html5', deparagraph=False, mathjax=False, smart=True, extra_args=None): """Get the document abstract in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup.
python
{ "resource": "" }
q265023
LsstLatexDoc.format_authors
validation
def format_authors(self, format='html5', deparagraph=True, mathjax=False, smart=True, extra_args=None): """Get the document authors in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation.
python
{ "resource": "" }
q265024
LsstLatexDoc._parse_documentclass
validation
def _parse_documentclass(self): """Parse documentclass options. Sets the the ``_document_options`` attribute. """ command = LatexCommand( 'documentclass', {'name': 'options', 'required': False, 'bracket': '['}, {'name': 'class_name', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no documentclass') self._document_options = [] try:
python
{ "resource": "" }
q265025
LsstLatexDoc._parse_title
validation
def _parse_title(self): """Parse the title from TeX source. Sets these attributes: - ``_title`` - ``_short_title`` """ command = LatexCommand( 'title', {'name': 'short_title', 'required': False, 'bracket': '['}, {'name': 'long_title', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no title') self._title = None
python
{ "resource": "" }
q265026
LsstLatexDoc._parse_doc_ref
validation
def _parse_doc_ref(self): """Parse the document handle. Sets the ``_series``, ``_serial``, and ``_handle`` attributes. """ command = LatexCommand( 'setDocRef', {'name': 'handle', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no setDocRef') self._handle = None self._series = None self._serial = None
python
{ "resource": "" }
q265027
LsstLatexDoc._parse_author
validation
def _parse_author(self): r"""Parse the author from TeX source. Sets the ``_authors`` attribute. Goal is to parse:: \author{ A.~Author, B.~Author, and C.~Author} Into:: ['A. Author', 'B. Author', 'C. Author'] """ command = LatexCommand( 'author', {'name': 'authors', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no author') self._authors = [] return try: content = parsed['authors'] except KeyError: self._logger.warning('lsstdoc has no author') self._authors = [] return # Clean content content = content.replace('\n', '
python
{ "resource": "" }
q265028
LsstLatexDoc._parse_abstract
validation
def _parse_abstract(self): """Parse the abstract from the TeX source. Sets the ``_abstract`` attribute. """ command = LatexCommand( 'setDocAbstract', {'name': 'abstract', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no abstract') self._abstract = None return try:
python
{ "resource": "" }
q265029
LsstLatexDoc._prep_snippet_for_pandoc
validation
def _prep_snippet_for_pandoc(self, latex_text): """Process a LaTeX snippet of content for better transformation with pandoc. Currently runs the CitationLinker to convert BibTeX citations to href links. """
python
{ "resource": "" }
q265030
LsstLatexDoc._load_bib_db
validation
def _load_bib_db(self): r"""Load the BibTeX bibliography referenced by the document. This method triggered by the `bib_db` attribute and populates the `_bib_db` private attribute. The ``\bibliography`` command is parsed to identify the bibliographies referenced by the document. """ # Get the names of custom bibtex files by parsing the # \bibliography command and filtering out the default lsstdoc # bibliographies. command = LatexCommand( 'bibliography', {'name': 'bib_names', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) bib_names = [n.strip() for n in parsed['bib_names'].split(',')] except StopIteration: self._logger.warning('lsstdoc has no bibliography command') bib_names = [] custom_bib_names = [n for n in bib_names if n not in KNOWN_LSSTTEXMF_BIB_NAMES] # Read custom bibliographies. custom_bibs = [] for custom_bib_name in custom_bib_names: custom_bib_path = os.path.join(
python
{ "resource": "" }
q265031
LsstLatexDoc._parse_revision_date
validation
def _parse_revision_date(self): r"""Parse the ``\date`` command, falling back to getting the most recent Git commit date and the current datetime. Result is available from the `revision_datetime` attribute. """ doc_datetime = None # First try to parse the \date command in the latex. # \date is ignored for draft documents. if not self.is_draft: date_command = LatexCommand( 'date', {'name': 'content', 'required': True, 'bracket': '{'}) try: parsed = next(date_command.parse(self._tex)) command_content = parsed['content'].strip() except StopIteration: command_content = None self._logger.warning('lsstdoc has no date command') # Try to parse a date from the \date command if command_content is not None and command_content != r'\today': try: doc_datetime = datetime.datetime.strptime(command_content, '%Y-%m-%d') # Assume LSST project time (Pacific) project_tz = timezone('US/Pacific') localized_datetime = project_tz.localize(doc_datetime) # Normalize to UTC doc_datetime = localized_datetime.astimezone(pytz.utc) self._revision_datetime_source = 'tex' except ValueError: self._logger.warning('Could not parse a datetime from '
python
{ "resource": "" }
q265032
LsstLatexDoc.build_jsonld
validation
def build_jsonld(self, url=None, code_url=None, ci_url=None, readme_url=None, license_id=None): """Create a JSON-LD representation of this LSST LaTeX document. Parameters ---------- url : `str`, optional URL where this document is published to the web. Prefer the LSST the Docs URL if possible. Example: ``'https://ldm-151.lsst.io'``. code_url : `str`, optional Path the the document's repository, typically on GitHub. Example: ``'https://github.com/lsst/LDM-151'``. ci_url : `str`, optional Path to the continuous integration service dashboard for this document's repository. Example: ``'https://travis-ci.org/lsst/LDM-151'``. readme_url : `str`, optional URL to the document repository's README file. Example: ``https://raw.githubusercontent.com/lsst/LDM-151/master/README.rst``. license_id : `str`, optional License identifier, if known. The identifier should be from the listing at https://spdx.org/licenses/. Example: ``CC-BY-4.0``. Returns ------- jsonld : `dict` JSON-LD-formatted dictionary. """ jsonld = { '@context': [ "https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/" "codemeta.jsonld", "http://schema.org"], '@type': ['Report', 'SoftwareSourceCode'], 'language': 'TeX', 'reportNumber': self.handle, 'name': self.plain_title, 'description': self.plain_abstract, 'author': [{'@type': 'Person', 'name': author_name} for author_name in self.plain_authors], # This is a datetime.datetime; not a string. If writing to a file, # Need to convert this to a ISO 8601 string. 'dateModified': self.revision_datetime } try: jsonld['articleBody'] = self.plain_content
python
{ "resource": "" }
q265033
PostgresDB.rename
validation
def rename(self, from_name, to_name): """Renames an existing database.""" log.info('renaming database
python
{ "resource": "" }
q265034
PostgresDB.available
validation
def available(self, timeout=5): """Returns True if database server is running, False otherwise.""" host = self._connect_args['host'] port = self._connect_args['port'] try:
python
{ "resource": "" }
q265035
PostgresDB.dump
validation
def dump(self, name, filename): """ Saves the state of a database to a file. Parameters ---------- name: str the database to be backed up. filename: str path to a file where database backup will be written. """ if not self.exists(name):
python
{ "resource": "" }
q265036
PostgresDB.restore
validation
def restore(self, name, filename): """ Loads state of a backup file to a database. Note ---- If database name does not exist, it will be created. Parameters ---------- name: str the database to which backup will be restored. filename: str path to a file contain a postgres database backup. """ if not self.exists(name): self.create(name)
python
{ "resource": "" }
q265037
PostgresDB.connection_dsn
validation
def connection_dsn(self, name=None): """ Provides a connection string for database. Parameters ---------- name: str, optional an override database name for the connection string. Returns ------- str: the connection
python
{ "resource": "" }
q265038
PostgresDB.connection_url
validation
def connection_url(self, name=None): """ Provides a connection string for database as a sqlalchemy compatible URL. NB - this doesn't include special arguments related to SSL connectivity (which are outside the scope of the connection URL format). Parameters ---------- name: str, optional an override database name for the connection string. Returns -------
python
{ "resource": "" }
q265039
PostgresDB.shell
validation
def shell(self, expect=pexpect): """ Connects the database client shell to the database. Parameters ---------- expect_module: str the database to which backup will be restored. """ dsn = self.connection_dsn() log.debug('connection string: %s' % dsn)
python
{ "resource": "" }
q265040
PostgresDB.settings
validation
def settings(self): """Returns settings from the server.""" stmt = "select {fields} from pg_settings".format(fields=', '.join(SETTINGS_FIELDS)) settings = [] for row in self._iter_results(stmt): row['setting']
python
{ "resource": "" }
q265041
Food.breakfast
validation
def breakfast(self, message="Breakfast is ready", shout: bool = False): """Say
python
{ "resource": "" }
q265042
Food.lunch
validation
def lunch(self, message="Time for lunch", shout: bool = False):
python
{ "resource": "" }
q265043
Food.dinner
validation
def dinner(self, message="Dinner is served", shout: bool = False):
python
{ "resource": "" }
q265044
main
validation
def main(): """Command line entrypoint to reduce technote metadata. """ parser = argparse.ArgumentParser( description='Discover and ingest metadata from document sources, ' 'including lsstdoc-based LaTeX documents and ' 'reStructuredText-based technotes. Metadata can be ' 'upserted into the LSST Projectmeta MongoDB.') parser.add_argument( '--ltd-product', dest='ltd_product_url', help='URL of an LSST the Docs product ' '(https://keeper.lsst.codes/products/<slug>). If provided, ' 'only this document will be ingested.') parser.add_argument( '--github-token', help='GitHub personal access token.') parser.add_argument( '--mongodb-uri', help='MongoDB connection URI. If provided, metadata will be loaded ' 'into the Projectmeta database. Omit this argument to just ' 'test the ingest pipeline.') parser.add_argument( '--mongodb-db', default='lsstprojectmeta', help='Name of MongoDB database') parser.add_argument( '--mongodb-collection', default='resources', help='Name of the MongoDB collection for projectmeta resources') args = parser.parse_args() # Configure the root logger stream_handler = logging.StreamHandler() stream_formatter = logging.Formatter( '%(asctime)s %(levelname)8s %(name)s | %(message)s') stream_handler.setFormatter(stream_formatter) root_logger = logging.getLogger() root_logger.addHandler(stream_handler) root_logger.setLevel(logging.WARNING) # Configure
python
{ "resource": "" }
q265045
process_ltd_doc_products
validation
async def process_ltd_doc_products(session, product_urls, github_api_token, mongo_collection=None): """Run a pipeline to process extract, transform, and load metadata for multiple LSST the Docs-hosted projects Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. product_urls : `list` of `str` List of LSST the Docs product URLs. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This
python
{ "resource": "" }
q265046
process_ltd_doc
validation
async def process_ltd_doc(session, github_api_token, ltd_product_url, mongo_collection=None): """Ingest any kind of LSST document hosted on LSST the Docs from its source. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_url : `str` URL of the technote's product resource in the LTD Keeper API. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. .. `GitHub personal access token guide`: https://ls.st/41d """ logger = logging.getLogger(__name__) ltd_product_data = await get_ltd_product(session, url=ltd_product_url) # Ensure the LTD product is a document product_name = ltd_product_data['slug']
python
{ "resource": "" }
q265047
decorator
validation
def decorator(decorator_func): """Allows a decorator to be called with or without keyword arguments.""" assert callable(decorator_func), type(decorator_func) def _decorator(func=None, **kwargs): assert func is None or callable(func), type(func) if func:
python
{ "resource": "" }
q265048
get_installation_token
validation
def get_installation_token(installation_id, integration_jwt): """Create a GitHub token for an integration installation. Parameters ---------- installation_id : `int` Installation ID. This is available in the URL of the integration's **installation** ID. integration_jwt : `bytes` The integration's JSON Web Token (JWT). You can create this with `create_jwt`. Returns ------- token_obj : `dict` GitHub token object. Includes the fields: - ``token``: the token string itself. - ``expires_at``: date time string when the token expires. Example ------- The typical workflow for authenticating to an integration installation is: .. code-block:: python from dochubadapter.github import auth jwt = auth.create_jwt(integration_id, private_key_path) token_obj = auth.get_installation_token(installation_id, jwt)
python
{ "resource": "" }
q265049
create_jwt
validation
def create_jwt(integration_id, private_key_path): """Create a JSON Web Token to authenticate a GitHub Integration or installation. Parameters ---------- integration_id : `int` Integration ID. This is available from the GitHub integration's homepage. private_key_path : `str` Path to the integration's private key (a ``.pem`` file). Returns ------- jwt : `bytes` JSON Web Token that is good for 9 minutes. Notes ----- The JWT is encoded with the RS256 algorithm. It includes a payload with fields: - ``'iat'``: The current time, as an `int` timestamp. - ``'exp'``: Expiration time, as an `int timestamp. The expiration time is set of 9 minutes in the future (maximum allowance is 10 minutes). - ``'iss'``: The integration ID (`int`). For more information, see https://developer.github.com/early-access/integrations/authentication/. """
python
{ "resource": "" }
q265050
get_macros
validation
def get_macros(tex_source): r"""Get all macro definitions from TeX source, supporting multiple declaration patterns. Parameters ---------- tex_source : `str` TeX source content. Returns ------- macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the
python
{ "resource": "" }
q265051
get_def_macros
validation
def get_def_macros(tex_source): r"""Get all ``\def`` macro definition from TeX source. Parameters ---------- tex_source : `str` TeX source content. Returns ------- macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the macros. Notes
python
{ "resource": "" }
q265052
get_newcommand_macros
validation
def get_newcommand_macros(tex_source): r"""Get all ``\newcommand`` macro definition from TeX source. Parameters ---------- tex_source : `str` TeX source content. Returns ------- macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the macros. Notes ----- ``\newcommand`` macros with arguments are not supported. """ macros = {} command = LatexCommand( 'newcommand',
python
{ "resource": "" }
q265053
load
validation
def load(directory_name, module_name): """Try to load and return a module Will add DIRECTORY_NAME to sys.path and tries to import MODULE_NAME. For example: load("~/.yaz", "yaz_extension") """ directory_name = os.path.expanduser(directory_name) if os.path.isdir(directory_name)
python
{ "resource": "" }
q265054
make_aware
validation
def make_aware(value, timezone): """ Makes a naive datetime.datetime in a given time zone aware. """ if hasattr(timezone, 'localize') and value not in (datetime.datetime.min, datetime.datetime.max): # available for pytz time zones
python
{ "resource": "" }
q265055
make_naive
validation
def make_naive(value, timezone): """ Makes an aware datetime.datetime naive in a given time zone. """ value = value.astimezone(timezone) if hasattr(timezone, 'normalize'): # available
python
{ "resource": "" }
q265056
Schedule.to_timezone
validation
def to_timezone(self, dt): """Converts a datetime to the timezone of this Schedule.""" if timezone.is_aware(dt): return
python
{ "resource": "" }
q265057
Schedule.next_interval
validation
def next_interval(self, after=None): """Returns the next Period this event is in effect, or None if the event has no remaining periods.""" if after is None: after = timezone.now()
python
{ "resource": "" }
q265058
_ScheduleRecurring._daily_periods
validation
def _daily_periods(self, range_start, range_end): """Returns an iterator of Period tuples for every day this event is in effect, between range_start and range_end.""" specific = set(self.exceptions.keys()) return heapq.merge(self.exception_periods(range_start, range_end), *[
python
{ "resource": "" }
q265059
_ScheduleRecurring.intervals
validation
def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max): """Returns an iterator of Period tuples for continuous stretches of time during which this event is in effect, between range_start and range_end.""" # At the moment the algorithm works on periods split by calendar day, one at a time, # merging them if they're continuous; to avoid looping infinitely for infinitely long # periods, it splits periods as soon as they reach 60 days. # This algorithm could likely be improved to get rid of this restriction and improve # efficiency, so code should not rely on this behaviour. current_period = None max_continuous_days = 60 range_start = self.to_timezone(range_start) range_end = self.to_timezone(range_end) for period in self._daily_periods(range_start.date(), range_end.date()): if period.end < range_start or period.start > range_end: continue if current_period is None: current_period = period else:
python
{ "resource": "" }
q265060
RecurringScheduleComponent.includes
validation
def includes(self, query_date, query_time=None): """Does this schedule include the provided time? query_date and query_time are date and time objects, interpreted in this schedule's timezone""" if self.start_date and query_date < self.start_date:
python
{ "resource": "" }
q265061
RecurringScheduleComponent.daily_periods
validation
def daily_periods(self, range_start=datetime.date.min, range_end=datetime.date.max, exclude_dates=tuple()): """Returns an iterator of Period tuples for every day this schedule is in effect, between range_start and range_end.""" tz = self.timezone period = self.period weekdays = self.weekdays current_date = max(range_start, self.start_date) end_date = range_end if self.end_date: end_date = min(end_date, self.end_date) while current_date <= end_date: if current_date.weekday() in weekdays and current_date not in exclude_dates:
python
{ "resource": "" }
q265062
RecurringScheduleComponent.period
validation
def period(self): """A Period tuple representing the daily start and end time.""" start_time = self.root.findtext('daily_start_time') if start_time:
python
{ "resource": "" }
q265063
RecurringScheduleComponent.weekdays
validation
def weekdays(self): """A set of integers representing the weekdays the schedule recurs on, with Monday = 0 and Sunday = 6.""" if not self.root.xpath('days'):
python
{ "resource": "" }
q265064
temp_db
validation
def temp_db(db, name=None): """ A context manager that creates a temporary database. Useful for automated tests. Parameters ---------- db: object a preconfigured DB object name: str, optional name of the database to be created. (default: globally unique name) """ if name is None: name = temp_name()
python
{ "resource": "" }
q265065
_download_text
validation
async def _download_text(url, session): """Asynchronously request a URL and get the encoded text content of the body. Parameters ---------- url : `str` URL to download. session : `aiohttp.ClientSession` An open aiohttp session. Returns ------- content : `str`
python
{ "resource": "" }
q265066
_download_lsst_bibtex
validation
async def _download_lsst_bibtex(bibtex_names): """Asynchronously download a set of lsst-texmf BibTeX bibliographies from GitHub. Parameters ---------- bibtex_names : sequence of `str` Names of lsst-texmf BibTeX files to download. For example: .. code-block:: python ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads'] Returns ------- bibtexs : `list` of `str` List of BibTeX file content, in the same order as ``bibtex_names``. """ blob_url_template = ( 'https://raw.githubusercontent.com/lsst/lsst-texmf/master/texmf/' 'bibtex/bib/{name}.bib' ) urls =
python
{ "resource": "" }
q265067
get_lsst_bibtex
validation
def get_lsst_bibtex(bibtex_filenames=None): """Get content of lsst-texmf bibliographies. BibTeX content is downloaded from GitHub (``master`` branch of https://github.com/lsst/lsst-texmf or retrieved from an in-memory cache. Parameters ---------- bibtex_filenames : sequence of `str`, optional List of lsst-texmf BibTeX files to retrieve. These can be the filenames of lsst-bibtex files (for example, ``['lsst.bib', 'lsst-dm.bib']``) or names without an extension (``['lsst', 'lsst-dm']``). The default (recommended) is to get *all* lsst-texmf bibliographies: .. code-block:: python ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads'] Returns ------- bibtex : `dict` Dictionary with keys that are bibtex file names (such as ``'lsst'``, ``'lsst-dm'``). Values are the corresponding bibtex file content (`str`). """ logger = logging.getLogger(__name__) if bibtex_filenames is None: # Default lsst-texmf bibliography files bibtex_names = KNOWN_LSSTTEXMF_BIB_NAMES else: # Sanitize filenames (remove extensions, path) bibtex_names = [] for filename in bibtex_filenames: name = os.path.basename(os.path.splitext(filename)[0]) if name not in KNOWN_LSSTTEXMF_BIB_NAMES:
python
{ "resource": "" }
q265068
get_bibliography
validation
def get_bibliography(lsst_bib_names=None, bibtex=None): """Make a pybtex BibliographyData instance from standard lsst-texmf bibliography files and user-supplied bibtex content. Parameters ---------- lsst_bib_names : sequence of `str`, optional Names of lsst-texmf BibTeX files to include. For example: .. code-block:: python ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads'] Default is `None`, which includes all lsst-texmf bibtex files. bibtex : `str` BibTeX source content not included in lsst-texmf. This can be content from a import ``local.bib`` file. Returns ------- bibliography : `pybtex.database.BibliographyData` A pybtex bibliography database that includes all given sources: lsst-texmf bibliographies and ``bibtex``. """ bibtex_data = get_lsst_bibtex(bibtex_filenames=lsst_bib_names)
python
{ "resource": "" }
q265069
get_url_from_entry
validation
def get_url_from_entry(entry): """Get a usable URL from a pybtex entry. Parameters ---------- entry : `pybtex.database.Entry` A pybtex bibliography entry. Returns ------- url : `str` Best available URL from the ``entry``. Raises ------ NoEntryUrlError Raised when no URL can be made from the bibliography entry. Notes ----- The order of priority is: 1. ``url`` field 2. ``ls.st`` URL from the handle for ``@docushare`` entries. 3. ``adsurl`` 4. DOI """ if 'url' in entry.fields:
python
{ "resource": "" }
q265070
get_authoryear_from_entry
validation
def get_authoryear_from_entry(entry, paren=False): """Get and format author-year text from a pybtex entry to emulate natbib citations. Parameters ---------- entry : `pybtex.database.Entry` A pybtex bibliography entry. parens : `bool`, optional Whether to add parentheses around the year. Default is `False`. Returns ------- authoryear : `str` The author-year citation text. """ def _format_last(person): """Reformat a pybtex Person into a last name. Joins all parts of a last name and strips "{}" wrappers. """ return ' '.join([n.strip('{}') for n in person.last_names]) if len(entry.persons['author']) > 0: # Grab author list persons = entry.persons['author'] elif len(entry.persons['editor']) > 0: # Grab editor list persons = entry.persons['editor'] else: raise AuthorYearError try: year = entry.fields['year'] except KeyError: raise AuthorYearError if paren and len(persons) == 1: template = '{author} ({year})' return template.format(author=_format_last(persons[0]), year=year) elif not paren and len(persons) == 1: template = '{author} {year}' return template.format(author=_format_last(persons[0]),
python
{ "resource": "" }
q265071
process_sphinx_technote
validation
async def process_sphinx_technote(session, github_api_token, ltd_product_data, mongo_collection=None): """Extract, transform, and load Sphinx-based technote metadata. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_data : `dict` Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`. Data for this technote from the LTD Keeper API (``GET /products/<slug>``). Usually obtained via `lsstprojectmeta.ltd.get_ltd_product`. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. Raises ------ NotSphinxTechnoteError Raised when the LTD product cannot be interpreted as a Sphinx-based technote project because it's missing a metadata.yaml file in its GitHub repository. This implies that the LTD product *could* be of a different format. .. `GitHub personal access token guide`: https://ls.st/41d """ logger = logging.getLogger(__name__) github_url = ltd_product_data['doc_repo'] github_url = normalize_repo_root_url(github_url) repo_slug = parse_repo_slug_from_url(github_url) try: metadata_yaml = await download_metadata_yaml(session, github_url) except aiohttp.ClientResponseError as err:
python
{ "resource": "" }
q265072
reduce_technote_metadata
validation
def reduce_technote_metadata(github_url, metadata, github_data, ltd_product_data): """Reduce a technote project's metadata from multiple sources into a single JSON-LD resource. Parameters ---------- github_url : `str` URL of the technote's GitHub repository. metadata : `dict` The parsed contents of ``metadata.yaml`` found in a technote's repository. github_data : `dict` The contents of the ``technote_repo`` GitHub GraphQL API query. ltd_product_data : `dict` JSON dataset for the technote corresponding to the ``/products/<product>`` of LTD Keeper. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. .. `GitHub personal access token guide`: https://ls.st/41d """ repo_slug = parse_repo_slug_from_url(github_url) # Initialize a schema.org/Report and schema.org/SoftwareSourceCode # linked data resource jsonld = { '@context': [ "https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/" "codemeta.jsonld", "http://schema.org"], '@type': ['Report', 'SoftwareSourceCode'], 'codeRepository': github_url } if 'url' in metadata: url = metadata['url'] elif 'published_url' in ltd_product_data: url = ltd_product_data['published_url'] else: raise RuntimeError('No identifying url could be found: ' '{}'.format(github_url)) jsonld['@id'] = url jsonld['url'] = url if 'series' in metadata and 'serial_number' in metadata: jsonld['reportNumber'] = '{series}-{serial_number}'.format(**metadata) else: raise RuntimeError('No reportNumber: {}'.format(github_url)) if 'doc_title' in metadata: jsonld['name'] = metadata['doc_title'] if 'description' in metadata: jsonld['description'] = metadata['description'] if 'authors' in metadata: jsonld['author'] = [{'@type': 'Person', 'name': author_name} for author_name in metadata['authors']] if 'last_revised' in metadata: # Prefer getting the 'last_revised' date from metadata.yaml # since it's considered an override. jsonld['dateModified'] = datetime.datetime.strptime( metadata['last_revised'], '%Y-%m-%d') else: # Fallback to parsing the date of the last commit to the # default branch on GitHub (usually `master`). try: _repo_data = github_data['data']['repository'] _master_data = _repo_data['defaultBranchRef']
python
{ "resource": "" }
q265073
download_metadata_yaml
validation
async def download_metadata_yaml(session, github_url): """Download the metadata.yaml file from a technote's GitHub repository. """ metadata_yaml_url = _build_metadata_yaml_url(github_url) async with session.get(metadata_yaml_url)
python
{ "resource": "" }
q265074
DayOneEntry.tz
validation
def tz(self): """Return the timezone. If none is set use system timezone""" if not self._tz:
python
{ "resource": "" }
q265075
DayOneEntry.time
validation
def time(self, t): """Convert any timestamp into a datetime and save as _time""" _time = arrow.get(t).format('YYYY-MM-DDTHH:mm:ss')
python
{ "resource": "" }
q265076
DayOneEntry.as_dict
validation
def as_dict(self): """Return a dict that represents the DayOneEntry""" entry_dict = {} entry_dict['UUID'] = self.uuid entry_dict['Creation Date'] = self.time entry_dict['Time Zone'] = self.tz if self.tags: entry_dict['Tags'] =
python
{ "resource": "" }
q265077
DayOne.save
validation
def save(self, entry, with_location=True, debug=False): """Saves a DayOneEntry as a plist""" entry_dict = {} if isinstance(entry, DayOneEntry): # Get a dict of the DayOneEntry entry_dict = entry.as_dict() else: entry_dict = entry # Set the UUID entry_dict['UUID'] = uuid.uuid4().get_hex() if with_location and not entry_dict['Location']: entry_dict['Location'] = self.get_location() # Do we have everything needed? if not all ((entry_dict['UUID'], entry_dict['Time Zone'], entry_dict['Entry Text'])):
python
{ "resource": "" }
q265078
DayOne._file_path
validation
def _file_path(self, uid): """Create and return full file path for DayOne entry""" file_name = '%s.doentry' % (uid)
python
{ "resource": "" }
q265079
Collection.combine
validation
def combine(self, members, output_file, dimension=None, start_index=None, stop_index=None, stride=None): """ Combine many files into a single file on disk. Defaults to using the 'time' dimension. """ nco = None try: nco = Nco() except BaseException: # This is not necessarily an import error (could be wrong PATH) raise ImportError("NCO not found. The NCO python bindings are required to use 'Collection.combine'.") if len(members) > 0 and hasattr(members[0], 'path'): # A member DotDoct was passed in, we only need the paths members = [ m.path for m in members ] options = ['-4'] # NetCDF4
python
{ "resource": "" }
q265080
main
validation
def main(argv=None, white_list=None, load_yaz_extension=True): """The entry point for a yaz script This will almost always be called from a python script in the following manner: if __name__ == "__main__": yaz.main() This function will perform the following steps: 1. It will load any additional python code from the yaz_extension python module located in the ~/.yaz directory when LOAD_YAZ_EXTENSION is True and the yaz_extension module exists 2. It collects all yaz tasks and plugins. When WHITE_LIST is a non-empty list, only the tasks and plugins located therein will be considered 3. It will parse arguments from ARGV, or the command line when ARGV is not given, resulting in a yaz task or a parser help message. 4. When a suitable task is found, this task is executed. In case of a task which is part of a plugin, i.e. class, then this plugin is initialized, possibly resulting in other plugins to also be initialized if there are marked as `@yaz.dependency`. """ assert argv is None or isinstance(argv, list), type(argv) assert white_list is None or isinstance(white_list, list), type(white_list) assert isinstance(load_yaz_extension, bool), type(load_yaz_extension) argv = sys.argv if argv is None else argv assert len(argv) > 0, len(argv) if load_yaz_extension: load("~/.yaz", "yaz_extension") parser = Parser(prog=argv[0]) parser.add_task_tree(get_task_tree(white_list)) task, kwargs = parser.parse_arguments(argv) if task: try: result = task(**kwargs) # when the
python
{ "resource": "" }
q265081
get_task_tree
validation
def get_task_tree(white_list=None): """Returns a tree of Task instances The tree is comprised of dictionaries containing strings for keys and either dictionaries or Task instances for values. When WHITE_LIST is given, only the tasks and plugins in this list will become part of the task tree. The WHITE_LIST may contain either strings, corresponding to the task of plugin __qualname__, or, preferable, the WHITE_LIST contains links to the task function or plugin class instead. """ assert white_list is None or isinstance(white_list, list), type(white_list) if white_list is not None: white_list = set(item if isinstance(item, str) else item.__qualname__ for item in white_list) tree = dict((task.qualified_name, task) for task in _task_list.values() if white_list is None or task.qualified_name in white_list) plugins = get_plugin_list() for plugin in [plugin for plugin in plugins.values() if white_list is None or plugin.__qualname__ in white_list]: tasks = [func for _, func
python
{ "resource": "" }
q265082
task
validation
def task(func, **config): """Declare a function or method to be a Yaz task @yaz.task def talk(message: str = "Hello World!"): return message Or... group multiple tasks together class Tools(yaz.Plugin): @yaz.task def say(self, message: str = "Hello World!"): return message
python
{ "resource": "" }
q265083
Task.get_parameters
validation
def get_parameters(self): """Returns a list of parameters""" if self.plugin_class is None: sig = inspect.signature(self.func) for index, parameter in enumerate(sig.parameters.values()): if not parameter.kind in [parameter.POSITIONAL_ONLY, parameter.KEYWORD_ONLY, parameter.POSITIONAL_OR_KEYWORD]: raise RuntimeError("Task {} contains an unsupported {} parameter".format(parameter, parameter.kind)) yield parameter else: var_keyword_seen = set() for cls in inspect.getmro(self.plugin_class): if issubclass(cls, BasePlugin) and hasattr(cls, self.func.__name__): func = getattr(cls, self.func.__name__) logger.debug("Found method %s from class %s", func, cls) var_keyword_found = False sig = inspect.signature(func) for index, parameter in enumerate(sig.parameters.values()): if index == 0: # skip "self" parameter continue if parameter.kind == inspect.Parameter.VAR_KEYWORD: # found "**kwargs" parameter. we will continue to the next class in the mro # to add any keyword parameters we have not yet used (i.e. whose name # we have not yet seen)
python
{ "resource": "" }
q265084
Task.get_configuration
validation
def get_configuration(self, key, default=None): """Returns the configuration for KEY""" if key in self.config:
python
{ "resource": "" }
q265085
get_plugin_instance
validation
def get_plugin_instance(plugin_class, *args, **kwargs): """Returns an instance of a fully initialized plugin class Every plugin class is kept in a plugin cache, effectively making every plugin into a singleton object. When a plugin has a yaz.dependency decorator, it will be called as well, before the instance is returned. """ assert issubclass(plugin_class, BasePlugin), type(plugin_class) global _yaz_plugin_instance_cache qualname = plugin_class.__qualname__ if not qualname in _yaz_plugin_instance_cache: plugin_class = get_plugin_list()[qualname]
python
{ "resource": "" }
q265086
xml_to_json
validation
def xml_to_json(root): """Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.""" j = {} if len(root) == 0: # Tag with no children, return str/int return _maybe_intify(root.text) if len(root) == 1 and root[0].tag.startswith('{' + NS_GML): # GML return gml_to_geojson(root[0]) if root.tag == 'open511': j['meta'] = {'version': root.get('version')} for elem in root: name = elem.tag if name == 'link' and elem.get('rel'): name = elem.get('rel') + '_url' if name == 'self_url': name = 'url' if root.tag == 'open511': j['meta'][name] = elem.get('href') continue elif name.startswith('{' + NS_PROTECTED): name = '!' + name[name.index('}') + 1:] elif name[0] == '{': # Namespace! name = '+' + name[name.index('}') + 1:] if name in j: continue # duplicate elif elem.tag == 'link' and not elem.text: j[name] = elem.get('href') elif len(elem): if name == 'grouped_events':
python
{ "resource": "" }
q265087
gml_to_geojson
validation
def gml_to_geojson(el): """Given an lxml Element of a GML geometry, returns a dict in GeoJSON format.""" if el.get('srsName') not in ('urn:ogc:def:crs:EPSG::4326', None): if el.get('srsName') == 'EPSG:4326': return _gmlv2_to_geojson(el) else: raise NotImplementedError("Unrecognized srsName %s" % el.get('srsName')) tag = el.tag.replace('{%s}' % NS_GML, '') if tag == 'Point': coordinates = _reverse_gml_coords(el.findtext('{%s}pos' % NS_GML))[0] elif tag == 'LineString': coordinates = _reverse_gml_coords(el.findtext('{%s}posList' % NS_GML)) elif tag == 'Polygon': coordinates = [] for ring in el.xpath('gml:exterior/gml:LinearRing/gml:posList', namespaces=NSMAP) \ + el.xpath('gml:interior/gml:LinearRing/gml:posList', namespaces=NSMAP): coordinates.append(_reverse_gml_coords(ring.text))
python
{ "resource": "" }
q265088
_gmlv2_to_geojson
validation
def _gmlv2_to_geojson(el): """Translates a deprecated GML 2.0 geometry to GeoJSON""" tag = el.tag.replace('{%s}' % NS_GML, '') if tag == 'Point': coordinates = [float(c) for c in el.findtext('{%s}coordinates' % NS_GML).split(',')] elif tag == 'LineString': coordinates = [ [float(x) for x in pair.split(',')] for pair in el.findtext('{%s}coordinates' % NS_GML).split(' ') ] elif tag == 'Polygon': coordinates = [] for ring in el.xpath('gml:outerBoundaryIs/gml:LinearRing/gml:coordinates', namespaces=NSMAP) \
python
{ "resource": "" }
q265089
deparagraph
validation
def deparagraph(element, doc): """Panflute filter function that converts content wrapped in a Para to Plain. Use this filter with pandoc as:: pandoc [..] --filter=lsstprojectmeta-deparagraph Only lone paragraphs are affected. Para elements with siblings (like a second Para) are left unaffected. This filter is useful for processing strings like titles or author names so that the output isn't wrapped in paragraph tags. For example, without this filter, pandoc converts a string ``"The title"`` to ``<p>The title</p>`` in HTML. These ``<p>`` tags aren't useful if you intend to put the title text in ``<h1>`` tags using your own templating system. """ if isinstance(element, Para):
python
{ "resource": "" }
q265090
all_subclasses
validation
def all_subclasses(cls): """ Recursively generate of all the subclasses of class cls. """ for subclass in cls.__subclasses__(): yield subclass
python
{ "resource": "" }
q265091
unique_justseen
validation
def unique_justseen(iterable, key=None): "List unique elements, preserving order. Remember only the element just seen." # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B # unique_justseen('ABBCcAD', str.lower) --> A B C A D try: # PY2 support
python
{ "resource": "" }
q265092
generic_masked
validation
def generic_masked(arr, attrs=None, minv=None, maxv=None, mask_nan=True): """ Returns a masked array with anything outside of values masked. The minv and maxv parameters take precendence over any dict values. The valid_range attribute takes precendence over the valid_min and valid_max attributes. """ attrs = attrs or {} if 'valid_min' in attrs: minv = safe_attribute_typing(arr.dtype, attrs['valid_min']) if 'valid_max' in attrs: maxv = safe_attribute_typing(arr.dtype, attrs['valid_max']) if 'valid_range' in attrs: vr = attrs['valid_range'] minv = safe_attribute_typing(arr.dtype, vr[0]) maxv = safe_attribute_typing(arr.dtype, vr[1]) # Get the min/max of values that the
python
{ "resource": "" }
q265093
BasicNumpyEncoder.default
validation
def default(self, obj): """If input object is an ndarray it will be converted into a list """ if isinstance(obj, np.ndarray): return obj.tolist() elif isinstance(obj, np.generic):
python
{ "resource": "" }
q265094
NumpyEncoder.default
validation
def default(self, obj): """If input object is an ndarray it will be converted into a dict holding dtype, shape and the data, base64 encoded. """ if isinstance(obj, np.ndarray): if obj.flags['C_CONTIGUOUS']: obj_data = obj.data else: cont_obj = np.ascontiguousarray(obj) assert(cont_obj.flags['C_CONTIGUOUS']) obj_data = cont_obj.data data_b64 = base64.b64encode(obj_data) return dict(__ndarray__=data_b64,
python
{ "resource": "" }
q265095
update_desc_lsib_path
validation
def update_desc_lsib_path(desc): ''' leftSibling previousSibling leftSib prevSib lsib psib have the same parent,and on the left ''' if(desc['sib_seq']>0):
python
{ "resource": "" }
q265096
update_desc_rsib_path
validation
def update_desc_rsib_path(desc,sibs_len): ''' rightSibling nextSibling rightSib nextSib rsib nsib have the same parent,and on the right ''' if(desc['sib_seq']<(sibs_len-1)):
python
{ "resource": "" }
q265097
update_desc_lcin_path
validation
def update_desc_lcin_path(desc,pdesc_level): ''' leftCousin previousCousin leftCin prevCin lcin pcin parents are neighbors,and on the left ''' parent_breadth = desc['parent_breadth_path'][-1] if(desc['sib_seq']==0): if(parent_breadth==0): pass else: parent_lsib_breadth = parent_breadth - 1 plsib_desc = pdesc_level[parent_lsib_breadth] if(plsib_desc['leaf']):
python
{ "resource": "" }
q265098
update_desc_rcin_path
validation
def update_desc_rcin_path(desc,sibs_len,pdesc_level): ''' rightCousin nextCousin rightCin nextCin rcin ncin parents are neighbors,and on the right ''' psibs_len = pdesc_level.__len__() parent_breadth = desc['parent_breadth_path'][-1] if(desc['sib_seq']==(sibs_len - 1)): if(parent_breadth==(psibs_len -1)): pass else: parent_rsib_breadth = parent_breadth + 1 prsib_desc = pdesc_level[parent_rsib_breadth] #because from left to right to
python
{ "resource": "" }
q265099
PointerCache.child_begin_handler
validation
def child_begin_handler(self,scache,*args): ''' _creat_child_desc update depth,parent_breadth_path,parent_path,sib_seq,path,lsib_path,rsib_path,lcin_path,rcin_path ''' pdesc = self.pdesc depth = scache.depth sib_seq = self.sib_seq sibs_len = self.sibs_len pdesc_level = scache.pdesc_level desc = copy.deepcopy(pdesc)
python
{ "resource": "" }