_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q265000 | _get_column_nums_from_args | validation | def _get_column_nums_from_args(columns):
"""Turn column inputs from user into list of simple numbers.
Inputs can be:
- individual number: 1
- range: 1-3
- comma separated list: 1,2,3,4-6
"""
nums = []
for c in columns:
for p in c.split(','):
p = p.strip()
try:
c = int(p)
nums.append(c)
except (TypeError, ValueError):
start, ignore, end = p.partition('-')
try:
start = int(start)
end = int(end)
except (TypeError, ValueError):
raise ValueError(
'Did not understand %r, expected digit-digit' % c
)
inc = 1 if start < end else -1
nums.extend(range(start, end + inc, inc))
# The user will pass us 1-based indexes, but we need to use
# 0-based indexing with the row.
return [n - 1 for n in nums] | python | {
"resource": ""
} |
q265001 | _get_printable_columns | validation | def _get_printable_columns(columns, row):
"""Return only the part of the row which should be printed.
"""
if not columns:
return row
# Extract the column values, in the order specified.
return tuple(row[c] for c in columns) | python | {
"resource": ""
} |
q265002 | VisualFormatWriter.writerow | validation | def writerow(self, observation_data):
"""
Writes a single observation to the output file.
If the ``observation_data`` parameter is a dictionary, it is
converted to a list to keep a consisted field order (as described
in format specification). Otherwise it is assumed that the data
is a raw record ready to be written to file.
:param observation_data: a single observation as a dictionary or list
"""
if isinstance(observation_data, (list, tuple)):
row = observation_data
else:
row = self.dict_to_row(observation_data)
self.writer.writerow(row) | python | {
"resource": ""
} |
q265003 | VisualFormatWriter.dict_to_row | validation | def dict_to_row(cls, observation_data):
"""
Takes a dictionary of observation data and converts it to a list
of fields according to AAVSO visual format specification.
:param cls: current class
:param observation_data: a single observation as a dictionary
"""
row = []
row.append(observation_data['name'])
row.append(observation_data['date'])
row.append(observation_data['magnitude'])
comment_code = observation_data.get('comment_code', 'na')
if not comment_code:
comment_code = 'na'
row.append(comment_code)
comp1 = observation_data.get('comp1', 'na')
if not comp1:
comp1 = 'na'
row.append(comp1)
comp2 = observation_data.get('comp2', 'na')
if not comp2:
comp2 = 'na'
row.append(comp2)
chart = observation_data.get('chart', 'na')
if not chart:
chart = 'na'
row.append(chart)
notes = observation_data.get('notes', 'na')
if not notes:
notes = 'na'
row.append(notes)
return row | python | {
"resource": ""
} |
q265004 | VisualFormatReader.row_to_dict | validation | def row_to_dict(cls, row):
"""
Converts a raw input record to a dictionary of observation data.
:param cls: current class
:param row: a single observation as a list or tuple
"""
comment_code = row[3]
if comment_code.lower() == 'na':
comment_code = ''
comp1 = row[4]
if comp1.lower() == 'na':
comp1 = ''
comp2 = row[5]
if comp2.lower() == 'na':
comp2 = ''
chart = row[6]
if chart.lower() == 'na':
chart = ''
notes = row[7]
if notes.lower() == 'na':
notes = ''
return {
'name': row[0],
'date': row[1],
'magnitude': row[2],
'comment_code': comment_code,
'comp1': comp1,
'comp2': comp2,
'chart': chart,
'notes': notes,
} | python | {
"resource": ""
} |
q265005 | get_default_tag | validation | def get_default_tag(app):
'''Get the name of the view function used to prevent having to set the tag
manually for every endpoint'''
view_func = get_view_function(app, request.path, request.method)
if view_func:
return view_func.__name__ | python | {
"resource": ""
} |
q265006 | download_observations | validation | def download_observations(observer_code):
"""
Downloads all variable star observations by a given observer.
Performs a series of HTTP requests to AAVSO's WebObs search and
downloads the results page by page. Each page is then passed to
:py:class:`~pyaavso.parsers.webobs.WebObsResultsParser` and parse results
are added to the final observation list.
"""
page_number = 1
observations = []
while True:
logger.info('Downloading page %d...', page_number)
response = requests.get(WEBOBS_RESULTS_URL, params={
'obscode': observer_code,
'num_results': 200,
'obs_types': 'all',
'page': page_number,
})
logger.debug(response.request.url)
parser = WebObsResultsParser(response.text)
observations.extend(parser.get_observations())
# kinda silly, but there's no need for lxml machinery here
if '>Next</a>' not in response.text:
break
page_number += 1
return observations | python | {
"resource": ""
} |
q265007 | image_path | validation | def image_path(instance, filename):
"""Generates likely unique image path using md5 hashes"""
filename, ext = os.path.splitext(filename.lower())
instance_id_hash = hashlib.md5(str(instance.id)).hexdigest()
filename_hash = ''.join(random.sample(hashlib.md5(filename.encode('utf-8')).hexdigest(), 8))
return '{}/{}{}'.format(instance_id_hash, filename_hash, ext) | python | {
"resource": ""
} |
q265008 | process_lander_page | validation | async def process_lander_page(session, github_api_token, ltd_product_data,
mongo_collection=None):
"""Extract, transform, and load metadata from Lander-based projects.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
github_api_token : `str`
A GitHub personal API token. See the `GitHub personal access token
guide`_.
ltd_product_data : `dict`
Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`.
Data for this technote from the LTD Keeper API
(``GET /products/<slug>``). Usually obtained via
`lsstprojectmeta.ltd.get_ltd_product`.
mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional
MongoDB collection. This should be the common MongoDB collection for
LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted
into the MongoDB collection.
Returns
-------
metadata : `dict`
JSON-LD-formatted dictionary.
Raises
------
NotLanderPageError
Raised when the LTD product cannot be interpreted as a Lander page
because the ``/metadata.jsonld`` file is absent. This implies that
the LTD product *could* be of a different format.
.. `GitHub personal access token guide`: https://ls.st/41d
"""
logger = logging.getLogger(__name__)
# Try to download metadata.jsonld from the Landing page site.
published_url = ltd_product_data['published_url']
jsonld_url = urljoin(published_url, '/metadata.jsonld')
try:
async with session.get(jsonld_url) as response:
logger.debug('%s response status %r', jsonld_url, response.status)
response.raise_for_status()
json_data = await response.text()
except aiohttp.ClientResponseError as err:
logger.debug('Tried to download %s, got status %d',
jsonld_url, err.code)
raise NotLanderPageError()
# Use our own json parser to get datetimes
metadata = decode_jsonld(json_data)
if mongo_collection is not None:
await _upload_to_mongodb(mongo_collection, metadata)
return metadata | python | {
"resource": ""
} |
q265009 | _upload_to_mongodb | validation | async def _upload_to_mongodb(collection, jsonld):
"""Upsert the technote resource into the projectmeta MongoDB collection.
Parameters
----------
collection : `motor.motor_asyncio.AsyncIOMotorCollection`
The MongoDB collection.
jsonld : `dict`
The JSON-LD document that represents the document resource.
"""
document = {
'data': jsonld
}
query = {
'data.reportNumber': jsonld['reportNumber']
}
await collection.update(query, document, upsert=True, multi=False) | python | {
"resource": ""
} |
q265010 | json_doc_to_xml | validation | def json_doc_to_xml(json_obj, lang='en', custom_namespace=None):
"""Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response."""
if 'meta' not in json_obj:
raise Exception("This function requires a conforming Open511 JSON document with a 'meta' section.")
json_obj = dict(json_obj)
meta = json_obj.pop('meta')
elem = get_base_open511_element(lang=lang, version=meta.pop('version'))
pagination = json_obj.pop('pagination', None)
json_struct_to_xml(json_obj, elem, custom_namespace=custom_namespace)
if pagination:
elem.append(json_struct_to_xml(pagination, 'pagination', custom_namespace=custom_namespace))
json_struct_to_xml(meta, elem)
return elem | python | {
"resource": ""
} |
q265011 | json_struct_to_xml | validation | def json_struct_to_xml(json_obj, root, custom_namespace=None):
"""Converts a Open511 JSON fragment to XML.
Takes a dict deserialized from JSON, returns an lxml Element.
This won't provide a conforming document if you pass in a full JSON document;
it's for translating little fragments, and is mostly used internally."""
if isinstance(root, (str, unicode)):
if root.startswith('!'):
root = etree.Element('{%s}%s' % (NS_PROTECTED, root[1:]))
elif root.startswith('+'):
if not custom_namespace:
raise Exception("JSON fields starts with +, but no custom namespace provided")
root = etree.Element('{%s}%s' % (custom_namespace, root[1:]))
else:
root = etree.Element(root)
if root.tag in ('attachments', 'grouped_events', 'media_files'):
for link in json_obj:
root.append(json_link_to_xml(link))
elif isinstance(json_obj, (str, unicode)):
root.text = json_obj
elif isinstance(json_obj, (int, float)):
root.text = unicode(json_obj)
elif isinstance(json_obj, dict):
if frozenset(json_obj.keys()) == frozenset(('type', 'coordinates')):
root.append(geojson_to_gml(json_obj))
else:
for key, val in json_obj.items():
if key == 'url' or key.endswith('_url'):
el = json_link_to_xml(val, json_link_key_to_xml_rel(key))
else:
el = json_struct_to_xml(val, key, custom_namespace=custom_namespace)
if el is not None:
root.append(el)
elif isinstance(json_obj, list):
tag_name = root.tag
if tag_name.endswith('ies'):
tag_name = tag_name[:-3] + 'y'
elif tag_name.endswith('s'):
tag_name = tag_name[:-1]
for val in json_obj:
el = json_struct_to_xml(val, tag_name, custom_namespace=custom_namespace)
if el is not None:
root.append(el)
elif json_obj is None:
return None
else:
raise NotImplementedError
return root | python | {
"resource": ""
} |
q265012 | geojson_to_gml | validation | def geojson_to_gml(gj, set_srs=True):
"""Given a dict deserialized from a GeoJSON object, returns an lxml Element
of the corresponding GML geometry."""
tag = G(gj['type'])
if set_srs:
tag.set('srsName', 'urn:ogc:def:crs:EPSG::4326')
if gj['type'] == 'Point':
tag.append(G.pos(_reverse_geojson_coords(gj['coordinates'])))
elif gj['type'] == 'LineString':
tag.append(G.posList(' '.join(_reverse_geojson_coords(ll) for ll in gj['coordinates'])))
elif gj['type'] == 'Polygon':
rings = [
G.LinearRing(
G.posList(' '.join(_reverse_geojson_coords(ll) for ll in ring))
) for ring in gj['coordinates']
]
tag.append(G.exterior(rings.pop(0)))
for ring in rings:
tag.append(G.interior(ring))
elif gj['type'] in ('MultiPoint', 'MultiLineString', 'MultiPolygon'):
single_type = gj['type'][5:]
member_tag = single_type[0].lower() + single_type[1:] + 'Member'
for coord in gj['coordinates']:
tag.append(
G(member_tag, geojson_to_gml({'type': single_type, 'coordinates': coord}, set_srs=False))
)
else:
raise NotImplementedError
return tag | python | {
"resource": ""
} |
q265013 | geom_to_xml_element | validation | def geom_to_xml_element(geom):
"""Transform a GEOS or OGR geometry object into an lxml Element
for the GML geometry."""
if geom.srs.srid != 4326:
raise NotImplementedError("Only WGS 84 lat/long geometries (SRID 4326) are supported.")
# GeoJSON output is far more standard than GML, so go through that
return geojson_to_gml(json.loads(geom.geojson)) | python | {
"resource": ""
} |
q265014 | remove_comments | validation | def remove_comments(tex_source):
"""Delete latex comments from TeX source.
Parameters
----------
tex_source : str
TeX source content.
Returns
-------
tex_source : str
TeX source without comments.
"""
# Expression via http://stackoverflow.com/a/13365453
return re.sub(r'(?<!\\)%.*$', r'', tex_source, flags=re.M) | python | {
"resource": ""
} |
q265015 | replace_macros | validation | def replace_macros(tex_source, macros):
r"""Replace macros in the TeX source with their content.
Parameters
----------
tex_source : `str`
TeX source content.
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros. See
`lsstprojectmeta.tex.scraper.get_macros`.
Returns
-------
tex_source : `str`
TeX source with known macros replaced.
Notes
-----
Macros with arguments are not supported.
Examples
--------
>>> macros = {r'\handle': 'LDM-nnn'}
>>> sample = r'This is document \handle.'
>>> replace_macros(sample, macros)
'This is document LDM-nnn.'
Any trailing slash after the macro command is also replaced by this
function.
>>> macros = {r'\product': 'Data Management'}
>>> sample = r'\title [Test Plan] { \product\ Test Plan}'
>>> replace_macros(sample, macros)
'\\title [Test Plan] { Data Management Test Plan}'
"""
for macro_name, macro_content in macros.items():
# '\\?' suffix matches an optional trailing '\' that might be used
# for spacing.
pattern = re.escape(macro_name) + r"\\?"
# Wrap macro_content in lambda to avoid processing escapes
tex_source = re.sub(pattern, lambda _: macro_content, tex_source)
return tex_source | python | {
"resource": ""
} |
q265016 | ensure_format | validation | def ensure_format(doc, format):
"""
Ensures that the provided document is an lxml Element or json dict.
"""
assert format in ('xml', 'json')
if getattr(doc, 'tag', None) == 'open511':
if format == 'json':
return xml_to_json(doc)
elif isinstance(doc, dict) and 'meta' in doc:
if format == 'xml':
return json_doc_to_xml(doc)
else:
raise ValueError("Unrecognized input document")
return doc | python | {
"resource": ""
} |
q265017 | open511_convert | validation | def open511_convert(input_doc, output_format, serialize=True, **kwargs):
"""
Convert an Open511 document between formats.
input_doc - either an lxml open511 Element or a deserialized JSON dict
output_format - short string name of a valid output format, as listed above
"""
try:
output_format_info = FORMATS[output_format]
except KeyError:
raise ValueError("Unrecognized output format %s" % output_format)
input_doc = ensure_format(input_doc, output_format_info.input_format)
result = output_format_info.func(input_doc, **kwargs)
if serialize:
result = output_format_info.serializer(result)
return result | python | {
"resource": ""
} |
q265018 | LsstLatexDoc.read | validation | def read(cls, root_tex_path):
"""Construct an `LsstLatexDoc` instance by reading and parsing the
LaTeX source.
Parameters
----------
root_tex_path : `str`
Path to the LaTeX source on the filesystem. For multi-file LaTeX
projects this should be the path to the root document.
Notes
-----
This method implements the following pipeline:
1. `lsstprojectmeta.tex.normalizer.read_tex_file`
2. `lsstprojectmeta.tex.scraper.get_macros`
3. `lsstprojectmeta.tex.normalizer.replace_macros`
Thus ``input`` and ``includes`` are resolved along with simple macros.
"""
# Read and normalize the TeX source, replacing macros with content
root_dir = os.path.dirname(root_tex_path)
tex_source = read_tex_file(root_tex_path)
tex_macros = get_macros(tex_source)
tex_source = replace_macros(tex_source, tex_macros)
return cls(tex_source, root_dir=root_dir) | python | {
"resource": ""
} |
q265019 | LsstLatexDoc.format_content | validation | def format_content(self, format='plain', mathjax=False,
smart=True, extra_args=None):
"""Get the document content in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content.
"""
output_text = convert_lsstdoc_tex(
self._tex, format,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text | python | {
"resource": ""
} |
q265020 | LsstLatexDoc.format_title | validation | def format_title(self, format='html5', deparagraph=True, mathjax=False,
smart=True, extra_args=None):
"""Get the document title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the title is not available in
the document.
"""
if self.title is None:
return None
output_text = convert_lsstdoc_tex(
self.title, format,
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text | python | {
"resource": ""
} |
q265021 | LsstLatexDoc.format_short_title | validation | def format_short_title(self, format='html5', deparagraph=True,
mathjax=False, smart=True, extra_args=None):
"""Get the document short title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the short title is not available in
the document.
"""
if self.short_title is None:
return None
output_text = convert_lsstdoc_tex(
self.short_title, 'html5',
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text | python | {
"resource": ""
} |
q265022 | LsstLatexDoc.format_abstract | validation | def format_abstract(self, format='html5', deparagraph=False, mathjax=False,
smart=True, extra_args=None):
"""Get the document abstract in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the title is not available in
the document.
"""
if self.abstract is None:
return None
abstract_latex = self._prep_snippet_for_pandoc(self.abstract)
output_text = convert_lsstdoc_tex(
abstract_latex, format,
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text | python | {
"resource": ""
} |
q265023 | LsstLatexDoc.format_authors | validation | def format_authors(self, format='html5', deparagraph=True, mathjax=False,
smart=True, extra_args=None):
"""Get the document authors in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `list` of `str`
Sequence of author names in the specified output markup format.
"""
formatted_authors = []
for latex_author in self.authors:
formatted_author = convert_lsstdoc_tex(
latex_author, format,
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
# removes Pandoc's terminal newlines
formatted_author = formatted_author.strip()
formatted_authors.append(formatted_author)
return formatted_authors | python | {
"resource": ""
} |
q265024 | LsstLatexDoc._parse_documentclass | validation | def _parse_documentclass(self):
"""Parse documentclass options.
Sets the the ``_document_options`` attribute.
"""
command = LatexCommand(
'documentclass',
{'name': 'options', 'required': False, 'bracket': '['},
{'name': 'class_name', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no documentclass')
self._document_options = []
try:
content = parsed['options']
self._document_options = [opt.strip()
for opt in content.split(',')]
except KeyError:
self._logger.warning('lsstdoc has no documentclass options')
self._document_options = [] | python | {
"resource": ""
} |
q265025 | LsstLatexDoc._parse_title | validation | def _parse_title(self):
"""Parse the title from TeX source.
Sets these attributes:
- ``_title``
- ``_short_title``
"""
command = LatexCommand(
'title',
{'name': 'short_title', 'required': False, 'bracket': '['},
{'name': 'long_title', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no title')
self._title = None
self._short_title = None
self._title = parsed['long_title']
try:
self._short_title = parsed['short_title']
except KeyError:
self._logger.warning('lsstdoc has no short title')
self._short_title = None | python | {
"resource": ""
} |
q265026 | LsstLatexDoc._parse_doc_ref | validation | def _parse_doc_ref(self):
"""Parse the document handle.
Sets the ``_series``, ``_serial``, and ``_handle`` attributes.
"""
command = LatexCommand(
'setDocRef',
{'name': 'handle', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no setDocRef')
self._handle = None
self._series = None
self._serial = None
return
self._handle = parsed['handle']
try:
self._series, self._serial = self._handle.split('-', 1)
except ValueError:
self._logger.warning('lsstdoc handle cannot be parsed into '
'series and serial: %r', self._handle)
self._series = None
self._serial = None | python | {
"resource": ""
} |
q265027 | LsstLatexDoc._parse_author | validation | def _parse_author(self):
r"""Parse the author from TeX source.
Sets the ``_authors`` attribute.
Goal is to parse::
\author{
A.~Author,
B.~Author,
and
C.~Author}
Into::
['A. Author', 'B. Author', 'C. Author']
"""
command = LatexCommand(
'author',
{'name': 'authors', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no author')
self._authors = []
return
try:
content = parsed['authors']
except KeyError:
self._logger.warning('lsstdoc has no author')
self._authors = []
return
# Clean content
content = content.replace('\n', ' ')
content = content.replace('~', ' ')
content = content.strip()
# Split content into list of individual authors
authors = []
for part in content.split(','):
part = part.strip()
for split_part in part.split('and '):
split_part = split_part.strip()
if len(split_part) > 0:
authors.append(split_part)
self._authors = authors | python | {
"resource": ""
} |
q265028 | LsstLatexDoc._parse_abstract | validation | def _parse_abstract(self):
"""Parse the abstract from the TeX source.
Sets the ``_abstract`` attribute.
"""
command = LatexCommand(
'setDocAbstract',
{'name': 'abstract', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return
try:
content = parsed['abstract']
except KeyError:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return
content = content.strip()
self._abstract = content | python | {
"resource": ""
} |
q265029 | LsstLatexDoc._prep_snippet_for_pandoc | validation | def _prep_snippet_for_pandoc(self, latex_text):
"""Process a LaTeX snippet of content for better transformation
with pandoc.
Currently runs the CitationLinker to convert BibTeX citations to
href links.
"""
replace_cite = CitationLinker(self.bib_db)
latex_text = replace_cite(latex_text)
return latex_text | python | {
"resource": ""
} |
q265030 | LsstLatexDoc._load_bib_db | validation | def _load_bib_db(self):
r"""Load the BibTeX bibliography referenced by the document.
This method triggered by the `bib_db` attribute and populates the
`_bib_db` private attribute.
The ``\bibliography`` command is parsed to identify the bibliographies
referenced by the document.
"""
# Get the names of custom bibtex files by parsing the
# \bibliography command and filtering out the default lsstdoc
# bibliographies.
command = LatexCommand(
'bibliography',
{'name': 'bib_names', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
bib_names = [n.strip() for n in parsed['bib_names'].split(',')]
except StopIteration:
self._logger.warning('lsstdoc has no bibliography command')
bib_names = []
custom_bib_names = [n for n in bib_names
if n not in KNOWN_LSSTTEXMF_BIB_NAMES]
# Read custom bibliographies.
custom_bibs = []
for custom_bib_name in custom_bib_names:
custom_bib_path = os.path.join(
os.path.join(self._root_dir),
custom_bib_name + '.bib'
)
if not os.path.exists(custom_bib_path):
self._logger.warning('Could not find bibliography %r',
custom_bib_path)
continue
with open(custom_bib_path, 'r') as file_handle:
custom_bibs.append(file_handle.read())
if len(custom_bibs) > 0:
custom_bibtex = '\n\n'.join(custom_bibs)
else:
custom_bibtex = None
# Get the combined pybtex bibliography
db = get_bibliography(bibtex=custom_bibtex)
self._bib_db = db | python | {
"resource": ""
} |
q265031 | LsstLatexDoc._parse_revision_date | validation | def _parse_revision_date(self):
r"""Parse the ``\date`` command, falling back to getting the
most recent Git commit date and the current datetime.
Result is available from the `revision_datetime` attribute.
"""
doc_datetime = None
# First try to parse the \date command in the latex.
# \date is ignored for draft documents.
if not self.is_draft:
date_command = LatexCommand(
'date',
{'name': 'content', 'required': True, 'bracket': '{'})
try:
parsed = next(date_command.parse(self._tex))
command_content = parsed['content'].strip()
except StopIteration:
command_content = None
self._logger.warning('lsstdoc has no date command')
# Try to parse a date from the \date command
if command_content is not None and command_content != r'\today':
try:
doc_datetime = datetime.datetime.strptime(command_content,
'%Y-%m-%d')
# Assume LSST project time (Pacific)
project_tz = timezone('US/Pacific')
localized_datetime = project_tz.localize(doc_datetime)
# Normalize to UTC
doc_datetime = localized_datetime.astimezone(pytz.utc)
self._revision_datetime_source = 'tex'
except ValueError:
self._logger.warning('Could not parse a datetime from '
'lsstdoc date command: %r',
command_content)
# Fallback to getting the datetime from Git
if doc_datetime is None:
content_extensions = ('tex', 'bib', 'pdf', 'png', 'jpg')
try:
doc_datetime = get_content_commit_date(
content_extensions,
root_dir=self._root_dir)
self._revision_datetime_source = 'git'
except RuntimeError:
self._logger.warning('Could not get a datetime from the Git '
'repository at %r',
self._root_dir)
# Final fallback to the current datetime
if doc_datetime is None:
doc_datetime = pytz.utc.localize(datetime.datetime.now())
self._revision_datetime_source = 'now'
self._datetime = doc_datetime | python | {
"resource": ""
} |
q265032 | LsstLatexDoc.build_jsonld | validation | def build_jsonld(self, url=None, code_url=None, ci_url=None,
readme_url=None, license_id=None):
"""Create a JSON-LD representation of this LSST LaTeX document.
Parameters
----------
url : `str`, optional
URL where this document is published to the web. Prefer
the LSST the Docs URL if possible.
Example: ``'https://ldm-151.lsst.io'``.
code_url : `str`, optional
Path the the document's repository, typically on GitHub.
Example: ``'https://github.com/lsst/LDM-151'``.
ci_url : `str`, optional
Path to the continuous integration service dashboard for this
document's repository.
Example: ``'https://travis-ci.org/lsst/LDM-151'``.
readme_url : `str`, optional
URL to the document repository's README file. Example:
``https://raw.githubusercontent.com/lsst/LDM-151/master/README.rst``.
license_id : `str`, optional
License identifier, if known. The identifier should be from the
listing at https://spdx.org/licenses/. Example: ``CC-BY-4.0``.
Returns
-------
jsonld : `dict`
JSON-LD-formatted dictionary.
"""
jsonld = {
'@context': [
"https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/"
"codemeta.jsonld",
"http://schema.org"],
'@type': ['Report', 'SoftwareSourceCode'],
'language': 'TeX',
'reportNumber': self.handle,
'name': self.plain_title,
'description': self.plain_abstract,
'author': [{'@type': 'Person', 'name': author_name}
for author_name in self.plain_authors],
# This is a datetime.datetime; not a string. If writing to a file,
# Need to convert this to a ISO 8601 string.
'dateModified': self.revision_datetime
}
try:
jsonld['articleBody'] = self.plain_content
jsonld['fileFormat'] = 'text/plain' # MIME type of articleBody
except RuntimeError:
# raised by pypandoc when it can't convert the tex document
self._logger.exception('Could not convert latex body to plain '
'text for articleBody.')
self._logger.warning('Falling back to tex source for articleBody')
jsonld['articleBody'] = self._tex
jsonld['fileFormat'] = 'text/plain' # no mimetype for LaTeX?
if url is not None:
jsonld['@id'] = url
jsonld['url'] = url
else:
# Fallback to using the document handle as the ID. This isn't
# entirely ideal from a linked data perspective.
jsonld['@id'] = self.handle
if code_url is not None:
jsonld['codeRepository'] = code_url
if ci_url is not None:
jsonld['contIntegration'] = ci_url
if readme_url is not None:
jsonld['readme'] = readme_url
if license_id is not None:
jsonld['license_id'] = None
return jsonld | python | {
"resource": ""
} |
q265033 | PostgresDB.rename | validation | def rename(self, from_name, to_name):
"""Renames an existing database."""
log.info('renaming database from %s to %s' % (from_name, to_name))
self._run_stmt('alter database %s rename to %s' % (from_name, to_name)) | python | {
"resource": ""
} |
q265034 | PostgresDB.available | validation | def available(self, timeout=5):
"""Returns True if database server is running, False otherwise."""
host = self._connect_args['host']
port = self._connect_args['port']
try:
sock = socket.create_connection((host, port), timeout=timeout)
sock.close()
return True
except socket.error:
pass
return False | python | {
"resource": ""
} |
q265035 | PostgresDB.dump | validation | def dump(self, name, filename):
"""
Saves the state of a database to a file.
Parameters
----------
name: str
the database to be backed up.
filename: str
path to a file where database backup will be written.
"""
if not self.exists(name):
raise DatabaseError('database %s does not exist!')
log.info('dumping %s to %s' % (name, filename))
self._run_cmd('pg_dump', '--verbose', '--blobs', '--format=custom',
'--file=%s' % filename, name) | python | {
"resource": ""
} |
q265036 | PostgresDB.restore | validation | def restore(self, name, filename):
"""
Loads state of a backup file to a database.
Note
----
If database name does not exist, it will be created.
Parameters
----------
name: str
the database to which backup will be restored.
filename: str
path to a file contain a postgres database backup.
"""
if not self.exists(name):
self.create(name)
else:
log.warn('overwriting contents of database %s' % name)
log.info('restoring %s from %s' % (name, filename))
self._run_cmd('pg_restore', '--verbose', '--dbname=%s' % name, filename) | python | {
"resource": ""
} |
q265037 | PostgresDB.connection_dsn | validation | def connection_dsn(self, name=None):
"""
Provides a connection string for database.
Parameters
----------
name: str, optional
an override database name for the connection string.
Returns
-------
str: the connection string (e.g. 'dbname=db1 user=user1 host=localhost port=5432')
"""
return ' '.join("%s=%s" % (param, value) for param, value in self._connect_options(name)) | python | {
"resource": ""
} |
q265038 | PostgresDB.connection_url | validation | def connection_url(self, name=None):
"""
Provides a connection string for database as a sqlalchemy compatible URL.
NB - this doesn't include special arguments related to SSL connectivity (which are outside the scope
of the connection URL format).
Parameters
----------
name: str, optional
an override database name for the connection string.
Returns
-------
str: the connection URL (e.g. postgresql://user1@localhost:5432/db1)
"""
return 'postgresql://{user}@{host}:{port}/{dbname}'.format(**{k: v for k, v in self._connect_options(name)}) | python | {
"resource": ""
} |
q265039 | PostgresDB.shell | validation | def shell(self, expect=pexpect):
"""
Connects the database client shell to the database.
Parameters
----------
expect_module: str
the database to which backup will be restored.
"""
dsn = self.connection_dsn()
log.debug('connection string: %s' % dsn)
child = expect.spawn('psql "%s"' % dsn)
if self._connect_args['password'] is not None:
child.expect('Password: ')
child.sendline(self._connect_args['password'])
child.interact() | python | {
"resource": ""
} |
q265040 | PostgresDB.settings | validation | def settings(self):
"""Returns settings from the server."""
stmt = "select {fields} from pg_settings".format(fields=', '.join(SETTINGS_FIELDS))
settings = []
for row in self._iter_results(stmt):
row['setting'] = self._vartype_map[row['vartype']](row['setting'])
settings.append(Settings(**row))
return settings | python | {
"resource": ""
} |
q265041 | Food.breakfast | validation | def breakfast(self, message="Breakfast is ready", shout: bool = False):
"""Say something in the morning"""
return self.helper.output(message, shout) | python | {
"resource": ""
} |
q265042 | Food.lunch | validation | def lunch(self, message="Time for lunch", shout: bool = False):
"""Say something in the afternoon"""
return self.helper.output(message, shout) | python | {
"resource": ""
} |
q265043 | Food.dinner | validation | def dinner(self, message="Dinner is served", shout: bool = False):
"""Say something in the evening"""
return self.helper.output(message, shout) | python | {
"resource": ""
} |
q265044 | main | validation | def main():
"""Command line entrypoint to reduce technote metadata.
"""
parser = argparse.ArgumentParser(
description='Discover and ingest metadata from document sources, '
'including lsstdoc-based LaTeX documents and '
'reStructuredText-based technotes. Metadata can be '
'upserted into the LSST Projectmeta MongoDB.')
parser.add_argument(
'--ltd-product',
dest='ltd_product_url',
help='URL of an LSST the Docs product '
'(https://keeper.lsst.codes/products/<slug>). If provided, '
'only this document will be ingested.')
parser.add_argument(
'--github-token',
help='GitHub personal access token.')
parser.add_argument(
'--mongodb-uri',
help='MongoDB connection URI. If provided, metadata will be loaded '
'into the Projectmeta database. Omit this argument to just '
'test the ingest pipeline.')
parser.add_argument(
'--mongodb-db',
default='lsstprojectmeta',
help='Name of MongoDB database')
parser.add_argument(
'--mongodb-collection',
default='resources',
help='Name of the MongoDB collection for projectmeta resources')
args = parser.parse_args()
# Configure the root logger
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter(
'%(asctime)s %(levelname)8s %(name)s | %(message)s')
stream_handler.setFormatter(stream_formatter)
root_logger = logging.getLogger()
root_logger.addHandler(stream_handler)
root_logger.setLevel(logging.WARNING)
# Configure app logger
app_logger = logging.getLogger('lsstprojectmeta')
app_logger.setLevel(logging.DEBUG)
if args.mongodb_uri is not None:
mongo_client = AsyncIOMotorClient(args.mongodb_uri, ssl=True)
collection = mongo_client[args.mongodb_db][args.mongodb_collection]
else:
collection = None
loop = asyncio.get_event_loop()
if args.ltd_product_url is not None:
# Run single technote
loop.run_until_complete(run_single_ltd_doc(args.ltd_product_url,
args.github_token,
collection))
else:
# Run bulk technote processing
loop.run_until_complete(run_bulk_etl(args.github_token,
collection)) | python | {
"resource": ""
} |
q265045 | process_ltd_doc_products | validation | async def process_ltd_doc_products(session, product_urls, github_api_token,
mongo_collection=None):
"""Run a pipeline to process extract, transform, and load metadata for
multiple LSST the Docs-hosted projects
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
product_urls : `list` of `str`
List of LSST the Docs product URLs.
github_api_token : `str`
A GitHub personal API token. See the `GitHub personal access token
guide`_.
mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional
MongoDB collection. This should be the common MongoDB collection for
LSST projectmeta JSON-LD records.
"""
tasks = [asyncio.ensure_future(
process_ltd_doc(session, github_api_token,
product_url,
mongo_collection=mongo_collection))
for product_url in product_urls]
await asyncio.gather(*tasks) | python | {
"resource": ""
} |
q265046 | process_ltd_doc | validation | async def process_ltd_doc(session, github_api_token, ltd_product_url,
mongo_collection=None):
"""Ingest any kind of LSST document hosted on LSST the Docs from its
source.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
github_api_token : `str`
A GitHub personal API token. See the `GitHub personal access token
guide`_.
ltd_product_url : `str`
URL of the technote's product resource in the LTD Keeper API.
mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional
MongoDB collection. This should be the common MongoDB collection for
LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted
into the MongoDB collection.
Returns
-------
metadata : `dict`
JSON-LD-formatted dictionary.
.. `GitHub personal access token guide`: https://ls.st/41d
"""
logger = logging.getLogger(__name__)
ltd_product_data = await get_ltd_product(session, url=ltd_product_url)
# Ensure the LTD product is a document
product_name = ltd_product_data['slug']
doc_handle_match = DOCUMENT_HANDLE_PATTERN.match(product_name)
if doc_handle_match is None:
logger.debug('%s is not a document repo', product_name)
return
# Figure out the format of the document by probing for metadata files.
# reStructuredText-based Sphinx documents have metadata.yaml file.
try:
return await process_sphinx_technote(session,
github_api_token,
ltd_product_data,
mongo_collection=mongo_collection)
except NotSphinxTechnoteError:
# Catch error so we can try the next format
logger.debug('%s is not a Sphinx-based technote.', product_name)
except Exception:
# Something bad happened trying to process the technote.
# Log and just move on.
logger.exception('Unexpected error trying to process %s', product_name)
return
# Try interpreting it as a Lander page with a /metadata.jsonld document
try:
return await process_lander_page(session,
github_api_token,
ltd_product_data,
mongo_collection=mongo_collection)
except NotLanderPageError:
# Catch error so we can try the next format
logger.debug('%s is not a Lander page with a metadata.jsonld file.',
product_name)
except Exception:
# Something bad happened; log and move on
logger.exception('Unexpected error trying to process %s', product_name)
return | python | {
"resource": ""
} |
q265047 | decorator | validation | def decorator(decorator_func):
"""Allows a decorator to be called with or without keyword arguments."""
assert callable(decorator_func), type(decorator_func)
def _decorator(func=None, **kwargs):
assert func is None or callable(func), type(func)
if func:
return decorator_func(func, **kwargs)
else:
def _decorator_helper(func):
return decorator_func(func, **kwargs)
return _decorator_helper
return _decorator | python | {
"resource": ""
} |
q265048 | get_installation_token | validation | def get_installation_token(installation_id, integration_jwt):
"""Create a GitHub token for an integration installation.
Parameters
----------
installation_id : `int`
Installation ID. This is available in the URL of the integration's
**installation** ID.
integration_jwt : `bytes`
The integration's JSON Web Token (JWT). You can create this with
`create_jwt`.
Returns
-------
token_obj : `dict`
GitHub token object. Includes the fields:
- ``token``: the token string itself.
- ``expires_at``: date time string when the token expires.
Example
-------
The typical workflow for authenticating to an integration installation is:
.. code-block:: python
from dochubadapter.github import auth
jwt = auth.create_jwt(integration_id, private_key_path)
token_obj = auth.get_installation_token(installation_id, jwt)
print(token_obj['token'])
Notes
-----
See
https://developer.github.com/early-access/integrations/authentication/#as-an-installation
for more information
"""
api_root = 'https://api.github.com'
url = '{root}/installations/{id_:d}/access_tokens'.format(
api_root=api_root,
id_=installation_id)
headers = {
'Authorization': 'Bearer {0}'.format(integration_jwt.decode('utf-8')),
'Accept': 'application/vnd.github.machine-man-preview+json'
}
resp = requests.post(url, headers=headers)
resp.raise_for_status()
return resp.json() | python | {
"resource": ""
} |
q265049 | create_jwt | validation | def create_jwt(integration_id, private_key_path):
"""Create a JSON Web Token to authenticate a GitHub Integration or
installation.
Parameters
----------
integration_id : `int`
Integration ID. This is available from the GitHub integration's
homepage.
private_key_path : `str`
Path to the integration's private key (a ``.pem`` file).
Returns
-------
jwt : `bytes`
JSON Web Token that is good for 9 minutes.
Notes
-----
The JWT is encoded with the RS256 algorithm. It includes a payload with
fields:
- ``'iat'``: The current time, as an `int` timestamp.
- ``'exp'``: Expiration time, as an `int timestamp. The expiration
time is set of 9 minutes in the future (maximum allowance is 10 minutes).
- ``'iss'``: The integration ID (`int`).
For more information, see
https://developer.github.com/early-access/integrations/authentication/.
"""
integration_id = int(integration_id)
with open(private_key_path, 'rb') as f:
cert_bytes = f.read()
now = datetime.datetime.now()
expiration_time = now + datetime.timedelta(minutes=9)
payload = {
# Issued at time
'iat': int(now.timestamp()),
# JWT expiration time (10 minute maximum)
'exp': int(expiration_time.timestamp()),
# Integration's GitHub identifier
'iss': integration_id
}
return jwt.encode(payload, cert_bytes, algorithm='RS256') | python | {
"resource": ""
} |
q265050 | get_macros | validation | def get_macros(tex_source):
r"""Get all macro definitions from TeX source, supporting multiple
declaration patterns.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
This function uses the following function to scrape macros of different
types:
- `get_def_macros`
- `get_newcommand_macros`
This macro scraping has the following caveats:
- Macro definition (including content) must all occur on one line.
- Macros with arguments are not supported.
"""
macros = {}
macros.update(get_def_macros(tex_source))
macros.update(get_newcommand_macros(tex_source))
return macros | python | {
"resource": ""
} |
q265051 | get_def_macros | validation | def get_def_macros(tex_source):
r"""Get all ``\def`` macro definition from TeX source.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
``\def`` macros with arguments are not supported.
"""
macros = {}
for match in DEF_PATTERN.finditer(tex_source):
macros[match.group('name')] = match.group('content')
return macros | python | {
"resource": ""
} |
q265052 | get_newcommand_macros | validation | def get_newcommand_macros(tex_source):
r"""Get all ``\newcommand`` macro definition from TeX source.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
``\newcommand`` macros with arguments are not supported.
"""
macros = {}
command = LatexCommand(
'newcommand',
{'name': 'name', 'required': True, 'bracket': '{'},
{'name': 'content', 'required': True, 'bracket': '{'})
for macro in command.parse(tex_source):
macros[macro['name']] = macro['content']
return macros | python | {
"resource": ""
} |
q265053 | load | validation | def load(directory_name, module_name):
"""Try to load and return a module
Will add DIRECTORY_NAME to sys.path and tries to import MODULE_NAME.
For example:
load("~/.yaz", "yaz_extension")
"""
directory_name = os.path.expanduser(directory_name)
if os.path.isdir(directory_name) and directory_name not in sys.path:
sys.path.append(directory_name)
try:
return importlib.import_module(module_name)
except ImportError:
pass | python | {
"resource": ""
} |
q265054 | make_aware | validation | def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize') and value not in (datetime.datetime.min, datetime.datetime.max):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone) | python | {
"resource": ""
} |
q265055 | make_naive | validation | def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None) | python | {
"resource": ""
} |
q265056 | Schedule.to_timezone | validation | def to_timezone(self, dt):
"""Converts a datetime to the timezone of this Schedule."""
if timezone.is_aware(dt):
return dt.astimezone(self.timezone)
else:
return timezone.make_aware(dt, self.timezone) | python | {
"resource": ""
} |
q265057 | Schedule.next_interval | validation | def next_interval(self, after=None):
"""Returns the next Period this event is in effect, or None if the event
has no remaining periods."""
if after is None:
after = timezone.now()
after = self.to_timezone(after)
return next(self.intervals(range_start=after), None) | python | {
"resource": ""
} |
q265058 | _ScheduleRecurring._daily_periods | validation | def _daily_periods(self, range_start, range_end):
"""Returns an iterator of Period tuples for every day this event is in effect, between range_start
and range_end."""
specific = set(self.exceptions.keys())
return heapq.merge(self.exception_periods(range_start, range_end), *[
sched.daily_periods(range_start=range_start, range_end=range_end, exclude_dates=specific)
for sched in self._recurring_schedules
]) | python | {
"resource": ""
} |
q265059 | _ScheduleRecurring.intervals | validation | def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max):
"""Returns an iterator of Period tuples for continuous stretches of time during
which this event is in effect, between range_start and range_end."""
# At the moment the algorithm works on periods split by calendar day, one at a time,
# merging them if they're continuous; to avoid looping infinitely for infinitely long
# periods, it splits periods as soon as they reach 60 days.
# This algorithm could likely be improved to get rid of this restriction and improve
# efficiency, so code should not rely on this behaviour.
current_period = None
max_continuous_days = 60
range_start = self.to_timezone(range_start)
range_end = self.to_timezone(range_end)
for period in self._daily_periods(range_start.date(), range_end.date()):
if period.end < range_start or period.start > range_end:
continue
if current_period is None:
current_period = period
else:
if ( ((period.start < current_period.end)
or (period.start - current_period.end) <= datetime.timedelta(minutes=1))
and (current_period.end - current_period.start) < datetime.timedelta(days=max_continuous_days)):
# Merge
current_period = Period(current_period.start, period.end)
else:
yield current_period
current_period = period
if current_period:
yield current_period | python | {
"resource": ""
} |
q265060 | RecurringScheduleComponent.includes | validation | def includes(self, query_date, query_time=None):
"""Does this schedule include the provided time?
query_date and query_time are date and time objects, interpreted
in this schedule's timezone"""
if self.start_date and query_date < self.start_date:
return False
if self.end_date and query_date > self.end_date:
return False
if query_date.weekday() not in self.weekdays:
return False
if not query_time:
return True
if query_time >= self.period.start and query_time <= self.period.end:
return True
return False | python | {
"resource": ""
} |
q265061 | RecurringScheduleComponent.daily_periods | validation | def daily_periods(self, range_start=datetime.date.min, range_end=datetime.date.max, exclude_dates=tuple()):
"""Returns an iterator of Period tuples for every day this schedule is in effect, between range_start
and range_end."""
tz = self.timezone
period = self.period
weekdays = self.weekdays
current_date = max(range_start, self.start_date)
end_date = range_end
if self.end_date:
end_date = min(end_date, self.end_date)
while current_date <= end_date:
if current_date.weekday() in weekdays and current_date not in exclude_dates:
yield Period(
tz.localize(datetime.datetime.combine(current_date, period.start)),
tz.localize(datetime.datetime.combine(current_date, period.end))
)
current_date += datetime.timedelta(days=1) | python | {
"resource": ""
} |
q265062 | RecurringScheduleComponent.period | validation | def period(self):
"""A Period tuple representing the daily start and end time."""
start_time = self.root.findtext('daily_start_time')
if start_time:
return Period(text_to_time(start_time), text_to_time(self.root.findtext('daily_end_time')))
return Period(datetime.time(0, 0), datetime.time(23, 59)) | python | {
"resource": ""
} |
q265063 | RecurringScheduleComponent.weekdays | validation | def weekdays(self):
"""A set of integers representing the weekdays the schedule recurs on,
with Monday = 0 and Sunday = 6."""
if not self.root.xpath('days'):
return set(range(7))
return set(int(d) - 1 for d in self.root.xpath('days/day/text()')) | python | {
"resource": ""
} |
q265064 | temp_db | validation | def temp_db(db, name=None):
"""
A context manager that creates a temporary database.
Useful for automated tests.
Parameters
----------
db: object
a preconfigured DB object
name: str, optional
name of the database to be created. (default: globally unique name)
"""
if name is None:
name = temp_name()
db.create(name)
if not db.exists(name):
raise DatabaseError('failed to create database %s!')
try:
yield name
finally:
db.drop(name)
if db.exists(name):
raise DatabaseError('failed to drop database %s!') | python | {
"resource": ""
} |
q265065 | _download_text | validation | async def _download_text(url, session):
"""Asynchronously request a URL and get the encoded text content of the
body.
Parameters
----------
url : `str`
URL to download.
session : `aiohttp.ClientSession`
An open aiohttp session.
Returns
-------
content : `str`
Content downloaded from the URL.
"""
logger = logging.getLogger(__name__)
async with session.get(url) as response:
# aiohttp decodes the content to a Python string
logger.info('Downloading %r', url)
return await response.text() | python | {
"resource": ""
} |
q265066 | _download_lsst_bibtex | validation | async def _download_lsst_bibtex(bibtex_names):
"""Asynchronously download a set of lsst-texmf BibTeX bibliographies from
GitHub.
Parameters
----------
bibtex_names : sequence of `str`
Names of lsst-texmf BibTeX files to download. For example:
.. code-block:: python
['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']
Returns
-------
bibtexs : `list` of `str`
List of BibTeX file content, in the same order as ``bibtex_names``.
"""
blob_url_template = (
'https://raw.githubusercontent.com/lsst/lsst-texmf/master/texmf/'
'bibtex/bib/{name}.bib'
)
urls = [blob_url_template.format(name=name) for name in bibtex_names]
tasks = []
async with ClientSession() as session:
for url in urls:
task = asyncio.ensure_future(_download_text(url, session))
tasks.append(task)
return await asyncio.gather(*tasks) | python | {
"resource": ""
} |
q265067 | get_lsst_bibtex | validation | def get_lsst_bibtex(bibtex_filenames=None):
"""Get content of lsst-texmf bibliographies.
BibTeX content is downloaded from GitHub (``master`` branch of
https://github.com/lsst/lsst-texmf or retrieved from an in-memory cache.
Parameters
----------
bibtex_filenames : sequence of `str`, optional
List of lsst-texmf BibTeX files to retrieve. These can be the filenames
of lsst-bibtex files (for example, ``['lsst.bib', 'lsst-dm.bib']``)
or names without an extension (``['lsst', 'lsst-dm']``). The default
(recommended) is to get *all* lsst-texmf bibliographies:
.. code-block:: python
['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']
Returns
-------
bibtex : `dict`
Dictionary with keys that are bibtex file names (such as ``'lsst'``,
``'lsst-dm'``). Values are the corresponding bibtex file content
(`str`).
"""
logger = logging.getLogger(__name__)
if bibtex_filenames is None:
# Default lsst-texmf bibliography files
bibtex_names = KNOWN_LSSTTEXMF_BIB_NAMES
else:
# Sanitize filenames (remove extensions, path)
bibtex_names = []
for filename in bibtex_filenames:
name = os.path.basename(os.path.splitext(filename)[0])
if name not in KNOWN_LSSTTEXMF_BIB_NAMES:
logger.warning('%r is not a known lsst-texmf bib file',
name)
continue
bibtex_names.append(name)
# names of bibtex files not in cache
uncached_names = [name for name in bibtex_names
if name not in _LSSTTEXMF_BIB_CACHE]
if len(uncached_names) > 0:
# Download bibtex and put into the cache
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(_download_lsst_bibtex(uncached_names))
loop.run_until_complete(future)
for name, text in zip(bibtex_names, future.result()):
_LSSTTEXMF_BIB_CACHE[name] = text
return {name: _LSSTTEXMF_BIB_CACHE[name] for name in bibtex_names} | python | {
"resource": ""
} |
q265068 | get_bibliography | validation | def get_bibliography(lsst_bib_names=None, bibtex=None):
"""Make a pybtex BibliographyData instance from standard lsst-texmf
bibliography files and user-supplied bibtex content.
Parameters
----------
lsst_bib_names : sequence of `str`, optional
Names of lsst-texmf BibTeX files to include. For example:
.. code-block:: python
['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']
Default is `None`, which includes all lsst-texmf bibtex files.
bibtex : `str`
BibTeX source content not included in lsst-texmf. This can be content
from a import ``local.bib`` file.
Returns
-------
bibliography : `pybtex.database.BibliographyData`
A pybtex bibliography database that includes all given sources:
lsst-texmf bibliographies and ``bibtex``.
"""
bibtex_data = get_lsst_bibtex(bibtex_filenames=lsst_bib_names)
# Parse with pybtex into BibliographyData instances
pybtex_data = [pybtex.database.parse_string(_bibtex, 'bibtex')
for _bibtex in bibtex_data.values()]
# Also parse local bibtex content
if bibtex is not None:
pybtex_data.append(pybtex.database.parse_string(bibtex, 'bibtex'))
# Merge BibliographyData
bib = pybtex_data[0]
if len(pybtex_data) > 1:
for other_bib in pybtex_data[1:]:
for key, entry in other_bib.entries.items():
bib.add_entry(key, entry)
return bib | python | {
"resource": ""
} |
q265069 | get_url_from_entry | validation | def get_url_from_entry(entry):
"""Get a usable URL from a pybtex entry.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
Returns
-------
url : `str`
Best available URL from the ``entry``.
Raises
------
NoEntryUrlError
Raised when no URL can be made from the bibliography entry.
Notes
-----
The order of priority is:
1. ``url`` field
2. ``ls.st`` URL from the handle for ``@docushare`` entries.
3. ``adsurl``
4. DOI
"""
if 'url' in entry.fields:
return entry.fields['url']
elif entry.type.lower() == 'docushare':
return 'https://ls.st/' + entry.fields['handle']
elif 'adsurl' in entry.fields:
return entry.fields['adsurl']
elif 'doi' in entry.fields:
return 'https://doi.org/' + entry.fields['doi']
else:
raise NoEntryUrlError() | python | {
"resource": ""
} |
q265070 | get_authoryear_from_entry | validation | def get_authoryear_from_entry(entry, paren=False):
"""Get and format author-year text from a pybtex entry to emulate
natbib citations.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
parens : `bool`, optional
Whether to add parentheses around the year. Default is `False`.
Returns
-------
authoryear : `str`
The author-year citation text.
"""
def _format_last(person):
"""Reformat a pybtex Person into a last name.
Joins all parts of a last name and strips "{}" wrappers.
"""
return ' '.join([n.strip('{}') for n in person.last_names])
if len(entry.persons['author']) > 0:
# Grab author list
persons = entry.persons['author']
elif len(entry.persons['editor']) > 0:
# Grab editor list
persons = entry.persons['editor']
else:
raise AuthorYearError
try:
year = entry.fields['year']
except KeyError:
raise AuthorYearError
if paren and len(persons) == 1:
template = '{author} ({year})'
return template.format(author=_format_last(persons[0]),
year=year)
elif not paren and len(persons) == 1:
template = '{author} {year}'
return template.format(author=_format_last(persons[0]),
year=year)
elif paren and len(persons) == 2:
template = '{author1} and {author2} ({year})'
return template.format(author1=_format_last(persons[0]),
author2=_format_last(persons[1]),
year=year)
elif not paren and len(persons) == 2:
template = '{author1} and {author2} {year}'
return template.format(author1=_format_last(persons[0]),
author2=_format_last(persons[1]),
year=year)
elif not paren and len(persons) > 2:
template = '{author} et al {year}'
return template.format(author=_format_last(persons[0]),
year=year)
elif paren and len(persons) > 2:
template = '{author} et al ({year})'
return template.format(author=_format_last(persons[0]),
year=year) | python | {
"resource": ""
} |
q265071 | process_sphinx_technote | validation | async def process_sphinx_technote(session, github_api_token, ltd_product_data,
mongo_collection=None):
"""Extract, transform, and load Sphinx-based technote metadata.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
github_api_token : `str`
A GitHub personal API token. See the `GitHub personal access token
guide`_.
ltd_product_data : `dict`
Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`.
Data for this technote from the LTD Keeper API
(``GET /products/<slug>``). Usually obtained via
`lsstprojectmeta.ltd.get_ltd_product`.
mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional
MongoDB collection. This should be the common MongoDB collection for
LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted
into the MongoDB collection.
Returns
-------
metadata : `dict`
JSON-LD-formatted dictionary.
Raises
------
NotSphinxTechnoteError
Raised when the LTD product cannot be interpreted as a Sphinx-based
technote project because it's missing a metadata.yaml file in its
GitHub repository. This implies that the LTD product *could* be of a
different format.
.. `GitHub personal access token guide`: https://ls.st/41d
"""
logger = logging.getLogger(__name__)
github_url = ltd_product_data['doc_repo']
github_url = normalize_repo_root_url(github_url)
repo_slug = parse_repo_slug_from_url(github_url)
try:
metadata_yaml = await download_metadata_yaml(session, github_url)
except aiohttp.ClientResponseError as err:
# metadata.yaml not found; probably not a Sphinx technote
logger.debug('Tried to download %s\'s metadata.yaml, got status %d',
ltd_product_data['slug'], err.code)
raise NotSphinxTechnoteError()
# Extract data from the GitHub API
github_query = GitHubQuery.load('technote_repo')
github_variables = {
"orgName": repo_slug.owner,
"repoName": repo_slug.repo
}
github_data = await github_request(session, github_api_token,
query=github_query,
variables=github_variables)
try:
jsonld = reduce_technote_metadata(
github_url, metadata_yaml, github_data, ltd_product_data)
except Exception as exception:
message = "Issue building JSON-LD for technote %s"
logger.exception(message, github_url, exception)
raise
if mongo_collection is not None:
await _upload_to_mongodb(mongo_collection, jsonld)
logger.info('Ingested technote %s into MongoDB', github_url)
return jsonld | python | {
"resource": ""
} |
q265072 | reduce_technote_metadata | validation | def reduce_technote_metadata(github_url, metadata, github_data,
ltd_product_data):
"""Reduce a technote project's metadata from multiple sources into a
single JSON-LD resource.
Parameters
----------
github_url : `str`
URL of the technote's GitHub repository.
metadata : `dict`
The parsed contents of ``metadata.yaml`` found in a technote's
repository.
github_data : `dict`
The contents of the ``technote_repo`` GitHub GraphQL API query.
ltd_product_data : `dict`
JSON dataset for the technote corresponding to the
``/products/<product>`` of LTD Keeper.
Returns
-------
metadata : `dict`
JSON-LD-formatted dictionary.
.. `GitHub personal access token guide`: https://ls.st/41d
"""
repo_slug = parse_repo_slug_from_url(github_url)
# Initialize a schema.org/Report and schema.org/SoftwareSourceCode
# linked data resource
jsonld = {
'@context': [
"https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/"
"codemeta.jsonld",
"http://schema.org"],
'@type': ['Report', 'SoftwareSourceCode'],
'codeRepository': github_url
}
if 'url' in metadata:
url = metadata['url']
elif 'published_url' in ltd_product_data:
url = ltd_product_data['published_url']
else:
raise RuntimeError('No identifying url could be found: '
'{}'.format(github_url))
jsonld['@id'] = url
jsonld['url'] = url
if 'series' in metadata and 'serial_number' in metadata:
jsonld['reportNumber'] = '{series}-{serial_number}'.format(**metadata)
else:
raise RuntimeError('No reportNumber: {}'.format(github_url))
if 'doc_title' in metadata:
jsonld['name'] = metadata['doc_title']
if 'description' in metadata:
jsonld['description'] = metadata['description']
if 'authors' in metadata:
jsonld['author'] = [{'@type': 'Person', 'name': author_name}
for author_name in metadata['authors']]
if 'last_revised' in metadata:
# Prefer getting the 'last_revised' date from metadata.yaml
# since it's considered an override.
jsonld['dateModified'] = datetime.datetime.strptime(
metadata['last_revised'],
'%Y-%m-%d')
else:
# Fallback to parsing the date of the last commit to the
# default branch on GitHub (usually `master`).
try:
_repo_data = github_data['data']['repository']
_master_data = _repo_data['defaultBranchRef']
jsonld['dateModified'] = datetime.datetime.strptime(
_master_data['target']['committedDate'],
'%Y-%m-%dT%H:%M:%SZ')
except KeyError:
pass
try:
_license_data = github_data['data']['repository']['licenseInfo']
_spdxId = _license_data['spdxId']
if _spdxId is not None:
_spdx_url = 'https://spdx.org/licenses/{}.html'.format(_spdxId)
jsonld['license'] = _spdx_url
except KeyError:
pass
try:
# Find the README(|.md|.rst|*) file in the repo root
_master_data = github_data['data']['repository']['defaultBranchRef']
_files = _master_data['target']['tree']['entries']
for _node in _files:
filename = _node['name']
normalized_filename = filename.lower()
if normalized_filename.startswith('readme'):
readme_url = make_raw_content_url(repo_slug, 'master',
filename)
jsonld['readme'] = readme_url
break
except KeyError:
pass
# Assume Travis is the CI service (always true at the moment)
travis_url = 'https://travis-ci.org/{}'.format(repo_slug.full)
jsonld['contIntegration'] = travis_url
return jsonld | python | {
"resource": ""
} |
q265073 | download_metadata_yaml | validation | async def download_metadata_yaml(session, github_url):
"""Download the metadata.yaml file from a technote's GitHub repository.
"""
metadata_yaml_url = _build_metadata_yaml_url(github_url)
async with session.get(metadata_yaml_url) as response:
response.raise_for_status()
yaml_data = await response.text()
return yaml.safe_load(yaml_data) | python | {
"resource": ""
} |
q265074 | DayOneEntry.tz | validation | def tz(self):
"""Return the timezone. If none is set use system timezone"""
if not self._tz:
self._tz = tzlocal.get_localzone().zone
return self._tz | python | {
"resource": ""
} |
q265075 | DayOneEntry.time | validation | def time(self, t):
"""Convert any timestamp into a datetime and save as _time"""
_time = arrow.get(t).format('YYYY-MM-DDTHH:mm:ss')
self._time = datetime.datetime.strptime(_time, '%Y-%m-%dT%H:%M:%S') | python | {
"resource": ""
} |
q265076 | DayOneEntry.as_dict | validation | def as_dict(self):
"""Return a dict that represents the DayOneEntry"""
entry_dict = {}
entry_dict['UUID'] = self.uuid
entry_dict['Creation Date'] = self.time
entry_dict['Time Zone'] = self.tz
if self.tags:
entry_dict['Tags'] = self.tags
entry_dict['Entry Text'] = self.text
entry_dict['Starred'] = self.starred
entry_dict['Location'] = self.location
return entry_dict | python | {
"resource": ""
} |
q265077 | DayOne.save | validation | def save(self, entry, with_location=True, debug=False):
"""Saves a DayOneEntry as a plist"""
entry_dict = {}
if isinstance(entry, DayOneEntry):
# Get a dict of the DayOneEntry
entry_dict = entry.as_dict()
else:
entry_dict = entry
# Set the UUID
entry_dict['UUID'] = uuid.uuid4().get_hex()
if with_location and not entry_dict['Location']:
entry_dict['Location'] = self.get_location()
# Do we have everything needed?
if not all ((entry_dict['UUID'], entry_dict['Time Zone'],
entry_dict['Entry Text'])):
print "You must provide: Time zone, UUID, Creation Date, Entry Text"
return False
if debug is False:
file_path = self._file_path(entry_dict['UUID'])
plistlib.writePlist(entry_dict, file_path)
else:
plist = plistlib.writePlistToString(entry_dict)
print plist
return True | python | {
"resource": ""
} |
q265078 | DayOne._file_path | validation | def _file_path(self, uid):
"""Create and return full file path for DayOne entry"""
file_name = '%s.doentry' % (uid)
return os.path.join(self.dayone_journal_path, file_name) | python | {
"resource": ""
} |
q265079 | Collection.combine | validation | def combine(self, members, output_file, dimension=None, start_index=None, stop_index=None, stride=None):
""" Combine many files into a single file on disk. Defaults to using the 'time' dimension. """
nco = None
try:
nco = Nco()
except BaseException:
# This is not necessarily an import error (could be wrong PATH)
raise ImportError("NCO not found. The NCO python bindings are required to use 'Collection.combine'.")
if len(members) > 0 and hasattr(members[0], 'path'):
# A member DotDoct was passed in, we only need the paths
members = [ m.path for m in members ]
options = ['-4'] # NetCDF4
options += ['-L', '3'] # Level 3 compression
options += ['-h'] # Don't append to the history global attribute
if dimension is not None:
if start_index is None:
start_index = 0
if stop_index is None:
stop_index = ''
if stride is None:
stride = 1
options += ['-d', '{0},{1},{2},{3}'.format(dimension, start_index, stop_index, stride)]
nco.ncrcat(input=members, output=output_file, options=options) | python | {
"resource": ""
} |
q265080 | main | validation | def main(argv=None, white_list=None, load_yaz_extension=True):
"""The entry point for a yaz script
This will almost always be called from a python script in
the following manner:
if __name__ == "__main__":
yaz.main()
This function will perform the following steps:
1. It will load any additional python code from
the yaz_extension python module located in the
~/.yaz directory when LOAD_YAZ_EXTENSION is True
and the yaz_extension module exists
2. It collects all yaz tasks and plugins. When WHITE_LIST
is a non-empty list, only the tasks and plugins located
therein will be considered
3. It will parse arguments from ARGV, or the command line
when ARGV is not given, resulting in a yaz task or a parser
help message.
4. When a suitable task is found, this task is executed. In
case of a task which is part of a plugin, i.e. class, then
this plugin is initialized, possibly resulting in other
plugins to also be initialized if there are marked as
`@yaz.dependency`.
"""
assert argv is None or isinstance(argv, list), type(argv)
assert white_list is None or isinstance(white_list, list), type(white_list)
assert isinstance(load_yaz_extension, bool), type(load_yaz_extension)
argv = sys.argv if argv is None else argv
assert len(argv) > 0, len(argv)
if load_yaz_extension:
load("~/.yaz", "yaz_extension")
parser = Parser(prog=argv[0])
parser.add_task_tree(get_task_tree(white_list))
task, kwargs = parser.parse_arguments(argv)
if task:
try:
result = task(**kwargs)
# when the result is a boolean, exit with 0 (success) or 1 (failure)
if isinstance(result, bool):
code = 0 if result else 1
output = None
# when the result is an integer, exit with that integer value
elif isinstance(result, int):
code = result % 256
output = None
# otherwise exit with 0 (success) and print the result
else:
code = 0
output = result
# when yaz.Error occurs, exit with the given return code and print the error message
# when any other error occurs, let python handle the exception (i.e. exit(1) and print call stack)
except Error as error:
code = error.return_code
output = error
else:
# when no task is found to execute, exit with 1 (failure) and print the help text
code = 1
output = parser.format_help().rstrip()
if output is not None:
print(output)
sys.exit(code) | python | {
"resource": ""
} |
q265081 | get_task_tree | validation | def get_task_tree(white_list=None):
"""Returns a tree of Task instances
The tree is comprised of dictionaries containing strings for
keys and either dictionaries or Task instances for values.
When WHITE_LIST is given, only the tasks and plugins in this
list will become part of the task tree. The WHITE_LIST may
contain either strings, corresponding to the task of plugin
__qualname__, or, preferable, the WHITE_LIST contains
links to the task function or plugin class instead.
"""
assert white_list is None or isinstance(white_list, list), type(white_list)
if white_list is not None:
white_list = set(item if isinstance(item, str) else item.__qualname__ for item in white_list)
tree = dict((task.qualified_name, task)
for task
in _task_list.values()
if white_list is None or task.qualified_name in white_list)
plugins = get_plugin_list()
for plugin in [plugin for plugin in plugins.values() if white_list is None or plugin.__qualname__ in white_list]:
tasks = [func
for _, func
in inspect.getmembers(plugin)
if inspect.isfunction(func) and hasattr(func, "yaz_task_config")]
if len(tasks) == 0:
continue
node = tree
for name in plugin.__qualname__.split("."):
if not name in node:
node[name] = {}
node = node[name]
for func in tasks:
logger.debug("Found task %s", func)
node[func.__name__] = Task(plugin_class=plugin, func=func, config=func.yaz_task_config)
return tree | python | {
"resource": ""
} |
q265082 | task | validation | def task(func, **config):
"""Declare a function or method to be a Yaz task
@yaz.task
def talk(message: str = "Hello World!"):
return message
Or... group multiple tasks together
class Tools(yaz.Plugin):
@yaz.task
def say(self, message: str = "Hello World!"):
return message
@yaz.task(option__choices=["A", "B", "C"])
def choose(self, option: str = "A"):
return option
"""
if func.__name__ == func.__qualname__:
assert not func.__qualname__ in _task_list, "Can not define the same task \"{}\" twice".format(func.__qualname__)
logger.debug("Found task %s", func)
_task_list[func.__qualname__] = Task(plugin_class=None, func=func, config=config)
else:
func.yaz_task_config = config
return func | python | {
"resource": ""
} |
q265083 | Task.get_parameters | validation | def get_parameters(self):
"""Returns a list of parameters"""
if self.plugin_class is None:
sig = inspect.signature(self.func)
for index, parameter in enumerate(sig.parameters.values()):
if not parameter.kind in [parameter.POSITIONAL_ONLY, parameter.KEYWORD_ONLY, parameter.POSITIONAL_OR_KEYWORD]:
raise RuntimeError("Task {} contains an unsupported {} parameter".format(parameter, parameter.kind))
yield parameter
else:
var_keyword_seen = set()
for cls in inspect.getmro(self.plugin_class):
if issubclass(cls, BasePlugin) and hasattr(cls, self.func.__name__):
func = getattr(cls, self.func.__name__)
logger.debug("Found method %s from class %s", func, cls)
var_keyword_found = False
sig = inspect.signature(func)
for index, parameter in enumerate(sig.parameters.values()):
if index == 0:
# skip "self" parameter
continue
if parameter.kind == inspect.Parameter.VAR_KEYWORD:
# found "**kwargs" parameter. we will continue to the next class in the mro
# to add any keyword parameters we have not yet used (i.e. whose name
# we have not yet seen)
var_keyword_found = True
continue
if parameter.kind in [parameter.POSITIONAL_ONLY, parameter.VAR_POSITIONAL]:
raise RuntimeError("Task {} contains an unsupported parameter \"{}\"".format(func, parameter))
if not parameter.name in var_keyword_seen:
var_keyword_seen.add(parameter.name)
logger.debug("Found parameter %s (%s)", parameter, parameter.kind)
yield parameter
# we only need to look at the next class in the mro
# when "**kwargs" is found
if not var_keyword_found:
break | python | {
"resource": ""
} |
q265084 | Task.get_configuration | validation | def get_configuration(self, key, default=None):
"""Returns the configuration for KEY"""
if key in self.config:
return self.config.get(key)
else:
return default | python | {
"resource": ""
} |
q265085 | get_plugin_instance | validation | def get_plugin_instance(plugin_class, *args, **kwargs):
"""Returns an instance of a fully initialized plugin class
Every plugin class is kept in a plugin cache, effectively making
every plugin into a singleton object.
When a plugin has a yaz.dependency decorator, it will be called
as well, before the instance is returned.
"""
assert issubclass(plugin_class, BasePlugin), type(plugin_class)
global _yaz_plugin_instance_cache
qualname = plugin_class.__qualname__
if not qualname in _yaz_plugin_instance_cache:
plugin_class = get_plugin_list()[qualname]
_yaz_plugin_instance_cache[qualname] = plugin = plugin_class(*args, **kwargs)
# find any yaz.dependency decorators, and call them when necessary
funcs = [func
for _, func
in inspect.getmembers(plugin)
if inspect.ismethod(func) and hasattr(func, "yaz_dependency_config")]
for func in funcs:
signature = inspect.signature(func)
assert all(parameter.kind is parameter.POSITIONAL_OR_KEYWORD and issubclass(parameter.annotation, BasePlugin) for parameter in signature.parameters.values()), "All parameters for {} must type hint to a BasePlugin".format(func)
func(*[get_plugin_instance(parameter.annotation)
for parameter
in signature.parameters.values()])
return _yaz_plugin_instance_cache[qualname] | python | {
"resource": ""
} |
q265086 | xml_to_json | validation | def xml_to_json(root):
"""Convert an Open511 XML document or document fragment to JSON.
Takes an lxml Element object. Returns a dict ready to be JSON-serialized."""
j = {}
if len(root) == 0: # Tag with no children, return str/int
return _maybe_intify(root.text)
if len(root) == 1 and root[0].tag.startswith('{' + NS_GML): # GML
return gml_to_geojson(root[0])
if root.tag == 'open511':
j['meta'] = {'version': root.get('version')}
for elem in root:
name = elem.tag
if name == 'link' and elem.get('rel'):
name = elem.get('rel') + '_url'
if name == 'self_url':
name = 'url'
if root.tag == 'open511':
j['meta'][name] = elem.get('href')
continue
elif name.startswith('{' + NS_PROTECTED):
name = '!' + name[name.index('}') + 1:]
elif name[0] == '{':
# Namespace!
name = '+' + name[name.index('}') + 1:]
if name in j:
continue # duplicate
elif elem.tag == 'link' and not elem.text:
j[name] = elem.get('href')
elif len(elem):
if name == 'grouped_events':
# An array of URLs
j[name] = [xml_link_to_json(child, to_dict=False) for child in elem]
elif name in ('attachments', 'media_files'):
# An array of JSON objects
j[name] = [xml_link_to_json(child, to_dict=True) for child in elem]
elif all((name == pluralize(child.tag) for child in elem)):
# <something><somethings> serializes to a JSON array
j[name] = [xml_to_json(child) for child in elem]
else:
j[name] = xml_to_json(elem)
else:
if root.tag == 'open511' and name.endswith('s') and not elem.text:
# Special case: an empty e.g. <events /> container at the root level
# should be serialized to [], not null
j[name] = []
else:
j[name] = _maybe_intify(elem.text)
return j | python | {
"resource": ""
} |
q265087 | gml_to_geojson | validation | def gml_to_geojson(el):
"""Given an lxml Element of a GML geometry, returns a dict in GeoJSON format."""
if el.get('srsName') not in ('urn:ogc:def:crs:EPSG::4326', None):
if el.get('srsName') == 'EPSG:4326':
return _gmlv2_to_geojson(el)
else:
raise NotImplementedError("Unrecognized srsName %s" % el.get('srsName'))
tag = el.tag.replace('{%s}' % NS_GML, '')
if tag == 'Point':
coordinates = _reverse_gml_coords(el.findtext('{%s}pos' % NS_GML))[0]
elif tag == 'LineString':
coordinates = _reverse_gml_coords(el.findtext('{%s}posList' % NS_GML))
elif tag == 'Polygon':
coordinates = []
for ring in el.xpath('gml:exterior/gml:LinearRing/gml:posList', namespaces=NSMAP) \
+ el.xpath('gml:interior/gml:LinearRing/gml:posList', namespaces=NSMAP):
coordinates.append(_reverse_gml_coords(ring.text))
elif tag in ('MultiPoint', 'MultiLineString', 'MultiPolygon'):
single_type = tag[5:]
member_tag = single_type[0].lower() + single_type[1:] + 'Member'
coordinates = [
gml_to_geojson(member)['coordinates']
for member in el.xpath('gml:%s/gml:%s' % (member_tag, single_type), namespaces=NSMAP)
]
else:
raise NotImplementedError
return {
'type': tag,
'coordinates': coordinates
} | python | {
"resource": ""
} |
q265088 | _gmlv2_to_geojson | validation | def _gmlv2_to_geojson(el):
"""Translates a deprecated GML 2.0 geometry to GeoJSON"""
tag = el.tag.replace('{%s}' % NS_GML, '')
if tag == 'Point':
coordinates = [float(c) for c in el.findtext('{%s}coordinates' % NS_GML).split(',')]
elif tag == 'LineString':
coordinates = [
[float(x) for x in pair.split(',')]
for pair in el.findtext('{%s}coordinates' % NS_GML).split(' ')
]
elif tag == 'Polygon':
coordinates = []
for ring in el.xpath('gml:outerBoundaryIs/gml:LinearRing/gml:coordinates', namespaces=NSMAP) \
+ el.xpath('gml:innerBoundaryIs/gml:LinearRing/gml:coordinates', namespaces=NSMAP):
coordinates.append([
[float(x) for x in pair.split(',')]
for pair in ring.text.split(' ')
])
elif tag in ('MultiPoint', 'MultiLineString', 'MultiPolygon', 'MultiCurve'):
if tag == 'MultiCurve':
single_type = 'LineString'
member_tag = 'curveMember'
else:
single_type = tag[5:]
member_tag = single_type[0].lower() + single_type[1:] + 'Member'
coordinates = [
gml_to_geojson(member)['coordinates']
for member in el.xpath('gml:%s/gml:%s' % (member_tag, single_type), namespaces=NSMAP)
]
else:
raise NotImplementedError
return {
'type': tag,
'coordinates': coordinates
} | python | {
"resource": ""
} |
q265089 | deparagraph | validation | def deparagraph(element, doc):
"""Panflute filter function that converts content wrapped in a Para to
Plain.
Use this filter with pandoc as::
pandoc [..] --filter=lsstprojectmeta-deparagraph
Only lone paragraphs are affected. Para elements with siblings (like a
second Para) are left unaffected.
This filter is useful for processing strings like titles or author names so
that the output isn't wrapped in paragraph tags. For example, without
this filter, pandoc converts a string ``"The title"`` to
``<p>The title</p>`` in HTML. These ``<p>`` tags aren't useful if you
intend to put the title text in ``<h1>`` tags using your own templating
system.
"""
if isinstance(element, Para):
# Check if siblings exist; don't process the paragraph in that case.
if element.next is not None:
return element
elif element.prev is not None:
return element
# Remove the Para wrapper from the lone paragraph.
# `Plain` is a container that isn't rendered as a paragraph.
return Plain(*element.content) | python | {
"resource": ""
} |
q265090 | all_subclasses | validation | def all_subclasses(cls):
""" Recursively generate of all the subclasses of class cls. """
for subclass in cls.__subclasses__():
yield subclass
for subc in all_subclasses(subclass):
yield subc | python | {
"resource": ""
} |
q265091 | unique_justseen | validation | def unique_justseen(iterable, key=None):
"List unique elements, preserving order. Remember only the element just seen."
# unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
# unique_justseen('ABBCcAD', str.lower) --> A B C A D
try:
# PY2 support
from itertools import imap as map
except ImportError:
from builtins import map
return map(next, map(operator.itemgetter(1), itertools.groupby(iterable, key))) | python | {
"resource": ""
} |
q265092 | generic_masked | validation | def generic_masked(arr, attrs=None, minv=None, maxv=None, mask_nan=True):
"""
Returns a masked array with anything outside of values masked.
The minv and maxv parameters take precendence over any dict values.
The valid_range attribute takes precendence over the valid_min and
valid_max attributes.
"""
attrs = attrs or {}
if 'valid_min' in attrs:
minv = safe_attribute_typing(arr.dtype, attrs['valid_min'])
if 'valid_max' in attrs:
maxv = safe_attribute_typing(arr.dtype, attrs['valid_max'])
if 'valid_range' in attrs:
vr = attrs['valid_range']
minv = safe_attribute_typing(arr.dtype, vr[0])
maxv = safe_attribute_typing(arr.dtype, vr[1])
# Get the min/max of values that the hardware supports
try:
info = np.iinfo(arr.dtype)
except ValueError:
info = np.finfo(arr.dtype)
minv = minv if minv is not None else info.min
maxv = maxv if maxv is not None else info.max
if mask_nan is True:
arr = np.ma.fix_invalid(arr)
return np.ma.masked_outside(
arr,
minv,
maxv
) | python | {
"resource": ""
} |
q265093 | BasicNumpyEncoder.default | validation | def default(self, obj):
"""If input object is an ndarray it will be converted into a list
"""
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.generic):
return np.asscalar(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder(self, obj) | python | {
"resource": ""
} |
q265094 | NumpyEncoder.default | validation | def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
if isinstance(obj, np.ndarray):
if obj.flags['C_CONTIGUOUS']:
obj_data = obj.data
else:
cont_obj = np.ascontiguousarray(obj)
assert(cont_obj.flags['C_CONTIGUOUS'])
obj_data = cont_obj.data
data_b64 = base64.b64encode(obj_data)
return dict(__ndarray__=data_b64,
dtype=str(obj.dtype),
shape=obj.shape)
elif isinstance(obj, np.generic):
return np.asscalar(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder(self, obj) | python | {
"resource": ""
} |
q265095 | update_desc_lsib_path | validation | def update_desc_lsib_path(desc):
'''
leftSibling
previousSibling
leftSib
prevSib
lsib
psib
have the same parent,and on the left
'''
if(desc['sib_seq']>0):
lsib_path = copy.deepcopy(desc['path'])
lsib_path[-1] = desc['sib_seq']-1
desc['lsib_path'] = lsib_path
else:
pass
return(desc) | python | {
"resource": ""
} |
q265096 | update_desc_rsib_path | validation | def update_desc_rsib_path(desc,sibs_len):
'''
rightSibling
nextSibling
rightSib
nextSib
rsib
nsib
have the same parent,and on the right
'''
if(desc['sib_seq']<(sibs_len-1)):
rsib_path = copy.deepcopy(desc['path'])
rsib_path[-1] = desc['sib_seq']+1
desc['rsib_path'] = rsib_path
else:
pass
return(desc) | python | {
"resource": ""
} |
q265097 | update_desc_lcin_path | validation | def update_desc_lcin_path(desc,pdesc_level):
'''
leftCousin
previousCousin
leftCin
prevCin
lcin
pcin
parents are neighbors,and on the left
'''
parent_breadth = desc['parent_breadth_path'][-1]
if(desc['sib_seq']==0):
if(parent_breadth==0):
pass
else:
parent_lsib_breadth = parent_breadth - 1
plsib_desc = pdesc_level[parent_lsib_breadth]
if(plsib_desc['leaf']):
pass
else:
lcin_path = copy.deepcopy(plsib_desc['path'])
lcin_path.append(plsib_desc['sons_count'] - 1)
desc['lcin_path'] = lcin_path
else:
pass
return(desc) | python | {
"resource": ""
} |
q265098 | update_desc_rcin_path | validation | def update_desc_rcin_path(desc,sibs_len,pdesc_level):
'''
rightCousin
nextCousin
rightCin
nextCin
rcin
ncin
parents are neighbors,and on the right
'''
psibs_len = pdesc_level.__len__()
parent_breadth = desc['parent_breadth_path'][-1]
if(desc['sib_seq']==(sibs_len - 1)):
if(parent_breadth==(psibs_len -1)):
pass
else:
parent_rsib_breadth = parent_breadth + 1
prsib_desc = pdesc_level[parent_rsib_breadth]
#because from left to right to handle each level
#sons_count will only be updated in the next-round
if(prsib_desc['leaf']):
pass
else:
rcin_path = copy.deepcopy(prsib_desc['path'])
rcin_path.append(0)
desc['rcin_path'] = rcin_path
else:
pass
return(desc) | python | {
"resource": ""
} |
q265099 | PointerCache.child_begin_handler | validation | def child_begin_handler(self,scache,*args):
'''
_creat_child_desc
update depth,parent_breadth_path,parent_path,sib_seq,path,lsib_path,rsib_path,lcin_path,rcin_path
'''
pdesc = self.pdesc
depth = scache.depth
sib_seq = self.sib_seq
sibs_len = self.sibs_len
pdesc_level = scache.pdesc_level
desc = copy.deepcopy(pdesc)
desc = reset_parent_desc_template(desc)
desc['depth'] = depth
desc['parent_breadth_path'] = copy.deepcopy(desc['breadth_path'])
desc['sib_seq'] = sib_seq
desc['parent_path'] = copy.deepcopy(desc['path'])
desc['path'].append(sib_seq)
update_desc_lsib_path(desc)
update_desc_rsib_path(desc,sibs_len)
if(depth == 1):
pass
else:
update_desc_lcin_path(desc,pdesc_level)
update_desc_rcin_path(desc,sibs_len,pdesc_level)
return(desc) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.