docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Generate the first value in each row.
Args:
cursor: the cursor
arraysize: split fetches into chunks of this many records
Yields:
the first value of each row
|
def genfirstvalues(cursor: Cursor, arraysize: int = 1000) \
-> Generator[Any, None, None]:
return (row[0] for row in genrows(cursor, arraysize))
| 730,894
|
Generate all rows from a cursor as :class:`OrderedDict` objects.
Args:
cursor: the cursor
arraysize: split fetches into chunks of this many records
Yields:
each row, as an :class:`OrderedDict` whose key are column names
and whose values are the row values
|
def gendicts(cursor: Cursor, arraysize: int = 1000) \
-> Generator[Dict[str, Any], None, None]:
columns = get_fieldnames_from_cursor(cursor)
return (
OrderedDict(zip(columns, row))
for row in genrows(cursor, arraysize)
)
| 730,895
|
Return all rows from a cursor as a list of :class:`OrderedDict` objects.
Args:
cursor: the cursor
Returns:
a list (one item per row) of :class:`OrderedDict` objects whose key are
column names and whose values are the row values
|
def dictfetchall(cursor: Cursor) -> List[Dict[str, Any]]:
columns = get_fieldnames_from_cursor(cursor)
return [
OrderedDict(zip(columns, row))
for row in cursor.fetchall()
]
| 730,896
|
Asks an SQLAlchemy ORM object: "what are your primary key(s)?"
Args:
obj: SQLAlchemy ORM object
Returns:
list of attribute names of primary-key columns
|
def get_pk_attrnames(obj) -> List[str]:
return [attrname
for attrname, column in gen_columns(obj)
if column.primary_key]
| 730,950
|
Asks an SQLAlchemy class how its attribute names correspond to database
column names.
Args:
cls: SQLAlchemy ORM class
Returns:
a dictionary mapping attribute names to database column names
|
def attrname_to_colname_dict(cls) -> Dict[str, str]:
attr_col = {} # type: Dict[str, str]
for attrname, column in gen_columns(cls):
attr_col[attrname] = column.name
return attr_col
| 730,952
|
Generates all subclasses of a class.
Args:
cls: a class
Yields:
all subclasses
|
def gen_all_subclasses(cls: Type) -> Generator[Type, None, None]:
for s1 in cls.__subclasses__():
yield s1
for s2 in gen_all_subclasses(s1):
yield s2
| 730,964
|
Prints the filename of, or deletes, an OpenXML file depending on whether
it is corrupt or not.
Args:
filename: filename to check
print_good: if ``True``, then prints the filename if the file
appears good.
delete_if_bad: if ``True``, then deletes the file if the file
appears corrupt.
|
def process_openxml_file(filename: str,
print_good: bool,
delete_if_bad: bool) -> None:
print_bad = not print_good
try:
file_good = is_openxml_good(filename)
file_bad = not file_good
if (print_good and file_good) or (print_bad and file_bad):
print(filename)
if delete_if_bad and file_bad:
log.warning("Deleting: {}", filename)
os.remove(filename)
except Exception as e:
# Must explicitly catch and report errors, since otherwise they vanish
# into the ether.
log.critical("Uncaught error in subprocess: {!r}\n{}", e,
traceback.format_exc())
raise
| 730,981
|
Produce CSV output, without using ``csv.writer``, so the log can be used
for lots of things.
- ... eh? What was I talking about?
- POOR; DEPRECATED.
Args:
filehandle: file to write to
fields: field names
values: values
|
def produce_csv_output(filehandle: TextIO,
fields: Sequence[str],
values: Iterable[str]) -> None:
output_csv(filehandle, fields)
for row in values:
output_csv(filehandle, row)
| 730,986
|
Write a line of CSV. POOR; does not escape things properly. DEPRECATED.
Args:
filehandle: file to write to
values: values
|
def output_csv(filehandle: TextIO, values: Iterable[str]) -> None:
line = ",".join(values)
filehandle.write(line + "\n")
| 730,987
|
Find the part of ``s`` that is after ``prefix``.
Args:
s: string to analyse
prefix: prefix to find
onlyatstart: only accept the prefix if it is right at the start of
``s``
stripwhitespace: remove whitespace from the result
Returns:
tuple: ``(found, result)``
|
def get_what_follows_raw(s: str,
prefix: str,
onlyatstart: bool = True,
stripwhitespace: bool = True) -> Tuple[bool, str]:
prefixstart = s.find(prefix)
if ((prefixstart == 0 and onlyatstart) or
(prefixstart != -1 and not onlyatstart)):
# substring found
resultstart = prefixstart + len(prefix)
result = s[resultstart:]
if stripwhitespace:
result = result.strip()
return True, result
return False, ""
| 730,988
|
Find a string as per :func:`get_what_follows`.
Args:
strings: see :func:`get_what_follows`
prefix: see :func:`get_what_follows`
ignoreleadingcolon: if ``True``, restrict the result to what comes
after its first colon (and whitespace-strip that)
precedingline: see :func:`get_what_follows`
Returns:
the line fragment
|
def get_string(strings: Sequence[str],
prefix: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[str]:
s = get_what_follows(strings, prefix, precedingline=precedingline)
if ignoreleadingcolon:
f = s.find(":")
if f != -1:
s = s[f+1:].strip()
if len(s) == 0:
return None
return s
| 730,990
|
Extracts data from a list of CSV lines (starting with a defined header
line) embedded in a longer text block but ending with a blank line.
Args:
lines: CSV lines
csvheader: CSV header line
quotechar: ``quotechar`` parameter passed to :func:`csv.reader`
Returns:
list of dictionaries mapping fieldnames (from the header) to values
|
def csv_to_list_of_dicts(lines: List[str],
csvheader: str,
quotechar: str = '"') -> List[Dict[str, str]]:
data = [] # type: List[Dict[str, str]]
# an empty line marks the end of the block
csvlines = get_lines_from_to(lines, csvheader, [None])[1:]
# ... remove the CSV header
headerfields = csvheader.split(",")
reader = csv.reader(csvlines, quotechar=quotechar)
for fields in reader:
row = {} # type: Dict[str, str]
for f in range(len(headerfields)):
row[headerfields[f]] = fields[f]
data.append(row)
return data
| 731,004
|
Decorator to add caching to a function in Django.
Uses the Django default cache.
Args:
timeout: timeout in seconds; use None for "never expire", as 0 means
"do not cache".
cache_key: optional cache key to use (if falsy, we'll invent one)
debug_cache: show hits/misses?
|
def django_cache_function(timeout: int = 5 * 60,
cache_key: str = '',
debug_cache: bool = False):
cache_key = cache_key or None
def decorator(fn):
def wrapper(*args, **kwargs):
# - NOTE that Django returns None from cache.get() for "not in
# cache", so can't cache a None value;
# https://docs.djangoproject.com/en/1.10/topics/cache/#basic-usage # noqa
# - We need to store a bit more than just the function result
# anyway, to detect hash collisions when the user doesn't specify
# the cache_key, so we may as well use that format even if the
# user does specify the cache_key, and then we can store a None
# result properly as well.
if cache_key:
# User specified a cache key. This is easy.
call_sig = ''
_cache_key = cache_key
check_stored_call_sig = False
else:
# User didn't specify a cache key, so we'll do one
# automatically. Since we do this via a hash, there is a small
# but non-zero chance of a hash collision.
call_sig = get_call_signature(fn, args, kwargs)
_cache_key = make_cache_key(call_sig)
check_stored_call_sig = True
if debug_cache:
log.critical("Checking cache for key: " + _cache_key)
cache_result_tuple = cache.get(_cache_key) # TALKS TO CACHE HERE
if cache_result_tuple is None:
if debug_cache:
log.debug("Cache miss")
else:
if debug_cache:
log.debug("Cache hit")
cached_call_sig, func_result = cache_result_tuple
if (not check_stored_call_sig) or cached_call_sig == call_sig:
return func_result
log.warning(
"... Cache hit was due to hash collision; cached_call_sig "
"{} != call_sig {}".format(
repr(cached_call_sig), repr(call_sig)))
# If we get here, either it wasn't in the cache, or something
# was in the cache that matched by cache_key but was actually a
# hash collision. Either way, we must do the real work.
func_result = fn(*args, **kwargs)
cache_result_tuple = (call_sig, func_result)
cache.set(key=_cache_key, value=cache_result_tuple,
timeout=timeout) # TALKS TO CACHE HERE
return func_result
return wrapper
return decorator
| 731,015
|
Describes how the tables found in the metadata depend on each other.
(If table B contains a foreign key to table A, for example, then B depends
on A.)
Args:
metadata: the metadata to inspect
extra_dependencies: additional table dependencies to specify manually
sort: sort into alphabetical order of (parent, child) table names?
Returns:
a list of :class:`TableDependency` objects
See :func:`sort_tables_and_constraints` for method.
|
def get_all_dependencies(metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependency]:
extra_dependencies = extra_dependencies or [] # type: List[TableDependency] # noqa
for td in extra_dependencies:
td.set_metadata_if_none(metadata)
dependencies = set([td.sqla_tuple() for td in extra_dependencies])
tables = list(metadata.tables.values()) # type: List[Table]
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
# http://docs.sqlalchemy.org/en/latest/core/constraints.html#sqlalchemy.schema.ForeignKeyConstraint.params.use_alter # noqa
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
dependencies.add((dependent_on, table))
if hasattr(table, "_extra_dependencies"):
# noinspection PyProtectedMember
dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
dependencies = [
TableDependency(parent_table=parent, child_table=child)
for parent, child in dependencies
]
if sort:
dependencies.sort(key=lambda td_: (td_.parent_tablename,
td_.child_tablename))
return dependencies
| 731,048
|
Inspects a metadata object (optionally adding other specified dependencies)
and returns a list of objects describing their dependencies.
Args:
metadata: the :class:`MetaData` to inspect
extra_dependencies: additional dependencies
sort: sort the results by table name?
Returns:
list of :class:`TableDependencyClassification` objects, one for each
table
|
def classify_tables_by_dependency_type(
metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependencyClassification]:
tables = list(metadata.tables.values()) # type: List[Table]
all_deps = get_all_dependencies(metadata, extra_dependencies)
tdcmap = {} # type: Dict[Table, TableDependencyClassification]
for table in tables:
parents = [td.parent_table for td in all_deps
if td.child_table == table]
children = [td.child_table for td in all_deps
if td.parent_table == table]
tdcmap[table] = TableDependencyClassification(
table, parents=parents, children=children
)
# Check for circularity
def parents_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.parents:
return True, [start, probe]
for parent in tdc_.parents:
contains_, chain_ = parents_contain(start=parent, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
def children_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.children:
return True, [start, probe]
for child in tdc_.children:
contains_, chain_ = children_contain(start=child, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
for table in tables:
tdc = tdcmap[table]
contains, chain = parents_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
contains, chain = children_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
tdc.set_circular(False)
classifications = list(tdcmap.values())
if sort:
classifications.sort(key=lambda c: c.tablename)
return classifications
| 731,049
|
Mark this table as circular (or not).
Args:
circular: is it circular?
chain: if it's circular, this should be the list of tables
participating in the circular chain
|
def set_circular(self, circular: bool, chain: List[Table] = None) -> None:
self.circular = circular
self.circular_chain = chain or []
| 731,055
|
Returns results and column names from a query.
Args:
session: SQLAlchemy :class:`Session`, :class:`Engine`, or
:class:`Connection` object
query: SQLAlchemy :class:`Query`
Returns:
``(rows, fieldnames)`` where ``rows`` is the usual set of results and
``fieldnames`` are the name of the result columns/fields.
|
def get_rows_fieldnames_from_query(
session: Union[Session, Engine, Connection],
query: Query) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]:
# https://stackoverflow.com/questions/6455560/how-to-get-column-names-from-sqlalchemy-result-declarative-syntax # noqa
# No! Returns e.g. "User" for session.Query(User)...
# fieldnames = [cd['name'] for cd in query.column_descriptions]
result = session.execute(query) # type: ResultProxy
fieldnames = result.keys()
# ... yes! Comes out as "_table_field", which is how SQLAlchemy SELECTs
# things.
rows = result.fetchall()
return rows, fieldnames
| 731,113
|
Initialize with either ``tablename`` or ``table``, not both.
Args:
tablename: string name of the table
table: SQLAlchemy :class:`Table` object
metadata: optional :class:`MetaData` object
|
def __init__(self,
tablename: str = None,
table: Table = None,
metadata: MetaData = None) -> None:
assert table is not None or tablename, "No table information provided"
assert not (tablename and table is not None), (
"Specify either table or tablename, not both")
self._table = table
self._tablename = tablename
self._metadata = metadata
| 731,128
|
Assert that a specific PDF processor is available.
Args:
processor: a PDF processor type from :class:`Processors`
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable
|
def assert_processor_available(processor: str) -> None:
if processor not in [Processors.XHTML2PDF,
Processors.WEASYPRINT,
Processors.PDFKIT]:
raise AssertionError("rnc_pdf.set_pdf_processor: invalid PDF processor"
" specified")
if processor == Processors.WEASYPRINT and not weasyprint:
raise RuntimeError("rnc_pdf: Weasyprint requested, but not available")
if processor == Processors.XHTML2PDF and not xhtml2pdf:
raise RuntimeError("rnc_pdf: xhtml2pdf requested, but not available")
if processor == Processors.PDFKIT and not pdfkit:
raise RuntimeError("rnc_pdf: pdfkit requested, but not available")
| 731,147
|
Appends a PDF (as bytes in memory) to a PyPDF2 writer.
Args:
input_pdf: the PDF, as ``bytes``
writer: the writer
start_recto: start a new right-hand page?
|
def append_memory_pdf_to_writer(input_pdf: bytes,
writer: PdfFileWriter,
start_recto: bool = True) -> None:
if not input_pdf:
return
if start_recto and writer.getNumPages() % 2 != 0:
writer.addBlankPage()
# ... suitable for double-sided printing
infile = io.BytesIO(input_pdf)
reader = PdfFileReader(infile)
for page_num in range(reader.numPages):
writer.addPage(reader.getPage(page_num))
| 731,151
|
Concatenates PDFs from disk and returns them as an in-memory binary PDF.
Args:
filenames: iterable of filenames of PDFs to concatenate
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes``
|
def get_concatenated_pdf_from_disk(filenames: Iterable[str],
start_recto: bool = True) -> bytes:
# http://stackoverflow.com/questions/17104926/pypdf-merging-multiple-pdf-files-into-one-pdf # noqa
# https://en.wikipedia.org/wiki/Recto_and_verso
if start_recto:
writer = PdfFileWriter()
for filename in filenames:
if filename:
if writer.getNumPages() % 2 != 0:
writer.addBlankPage()
writer.appendPagesFromReader(
PdfFileReader(open(filename, 'rb')))
return pdf_from_writer(writer)
else:
merger = PdfFileMerger()
for filename in filenames:
if filename:
merger.append(open(filename, 'rb'))
return pdf_from_writer(merger)
| 731,153
|
Concatenates PDFs and returns them as an in-memory binary PDF.
Args:
pdf_plans: iterable of :class:`PdfPlan` objects
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes``
|
def get_concatenated_pdf_in_memory(
pdf_plans: Iterable[PdfPlan],
start_recto: bool = True) -> bytes:
writer = PdfFileWriter()
for pdfplan in pdf_plans:
pdfplan.add_to_writer(writer, start_recto=start_recto)
return pdf_from_writer(writer)
| 731,154
|
Add the PDF described by this class to a PDF writer.
Args:
writer: a :class:`PyPDF2.PdfFileWriter`
start_recto: start a new right-hand page?
|
def add_to_writer(self,
writer: PdfFileWriter,
start_recto: bool = True) -> None:
if self.is_html:
pdf = get_pdf_from_html(
html=self.html,
header_html=self.header_html,
footer_html=self.footer_html,
wkhtmltopdf_filename=self.wkhtmltopdf_filename,
wkhtmltopdf_options=self.wkhtmltopdf_options)
append_memory_pdf_to_writer(pdf, writer, start_recto=start_recto)
elif self.is_filename:
if start_recto and writer.getNumPages() % 2 != 0:
writer.addBlankPage()
writer.appendPagesFromReader(PdfFileReader(
open(self.filename, 'rb')))
else:
raise AssertionError("PdfPlan: shouldn't get here!")
| 731,156
|
Internal function to make a :func:`repr`-style representation of an object.
Args:
obj: object to display
elements: list of object ``attribute=value`` strings
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
def repr_result(obj: Any, elements: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
if with_addr:
return "<{qualname}({elements}) at {addr}>".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements),
addr=hex(id(obj)))
else:
return "{qualname}({elements})".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements))
| 731,158
|
Convenience function for :func:`__repr__`.
Works its way through the object's ``__dict__`` and reports accordingly.
Args:
obj: object to display
with_addr: include the memory address of ``obj``
sort_attrs: sort the attributes into alphabetical order?
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
def auto_repr(obj: Any, with_addr: bool = False,
sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str:
if sort_attrs:
keys = sorted(obj.__dict__.keys())
else:
keys = obj.__dict__.keys()
elements = ["{}={}".format(k, repr(getattr(obj, k))) for k in keys]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)
| 731,159
|
Convenience function for :func:`__repr__`.
Works its way through a list of attribute names, and creates a ``repr()``
representation assuming that parameters to the constructor have the same
names.
Args:
obj: object to display
attrnames: names of attributes to include
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
def simple_repr(obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
elements = ["{}={}".format(name, repr(getattr(obj, name)))
for name in attrnames]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)
| 731,160
|
Convenience function for :func:`__repr__`.
Takes attribute names and corresponding initialization parameter names
(parameters to :func:`__init__`).
Args:
obj: object to display
attributes: list of tuples, each ``(attr_name, init_param_name)``.
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
def mapped_repr(obj: Any, attributes: List[Tuple[str, str]],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
elements = ["{}={}".format(init_param_name, repr(getattr(obj, attr_name)))
for attr_name, init_param_name in attributes]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)
| 731,161
|
Convenience function for :func:`__repr__`.
Here, you pass a list of internal attributes, and it assumes that the
:func:`__init__` parameter names have the leading underscore dropped.
Args:
obj: object to display
attrnames: list of attribute names
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
def mapped_repr_stripping_underscores(
obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
attributes = []
for attr_name in attrnames:
if attr_name.startswith('_'):
init_param_name = attr_name[1:]
else:
init_param_name = attr_name
attributes.append((attr_name, init_param_name))
return mapped_repr(obj, attributes, with_addr=with_addr, joiner=joiner)
| 731,162
|
Shortcut to make :func:`repr` functions ordered.
Define your :func:`__repr__` like this:
.. code-block:: python
def __repr__(self):
return ordered_repr(self, ["field1", "field2", "field3"])
Args:
obj: object to display
attrlist: iterable of attribute names
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
def ordered_repr(obj: object, attrlist: Iterable[str],
joiner: str = COMMA_SPACE) -> str:
return "<{classname}({kvp})>".format(
classname=type(obj).__name__,
kvp=joiner.join("{}={}".format(a, repr(getattr(obj, a)))
for a in attrlist)
)
| 731,163
|
Make a pretty :func:`str()` representation using :func:`pprint.pformat`
and the object's ``__dict__`` attribute.
Args:
obj: object to display
indent: see
https://docs.python.org/3/library/pprint.html#pprint.PrettyPrinter
width: as above
depth: as above
compact: as above
Returns:
string: :func:`str`-style representation
|
def auto_str(obj: Any, indent: int = 4, width: int = 80, depth: int = None,
compact: bool = False) -> str:
return pprint.pformat(obj.__dict__, indent=indent, width=width,
depth=depth, compact=compact)
| 731,164
|
Open a file-like object.
Guard the use of this function with ``with``.
Args:
filename: for specifying via a filename
blob: for specifying via an in-memory ``bytes`` object
Returns:
a :class:`BinaryIO` object
|
def get_filelikeobject(filename: str = None,
blob: bytes = None) -> BinaryIO:
if not filename and not blob:
raise ValueError("no filename and no blob")
if filename and blob:
raise ValueError("specify either filename or blob")
if filename:
return open(filename, 'rb')
else:
return io.BytesIO(blob)
| 731,173
|
Generate XML files (as strings) from a DOCX file.
Args:
fp: :class:`BinaryIO` object for reading the ``.DOCX`` file
Yields:
the string contents of each individual XML file within the ``.DOCX``
file
Raises:
zipfile.BadZipFile: if the zip is unreadable (encrypted?)
|
def gen_xml_files_from_docx(fp: BinaryIO) -> Iterator[str]:
try:
z = zipfile.ZipFile(fp)
filelist = z.namelist()
for filename in filelist:
if DOCX_HEADER_FILE_REGEX.match(filename):
yield z.read(filename).decode("utf8")
yield z.read(DOCX_DOC_FILE)
for filename in filelist:
if DOCX_FOOTER_FILE_REGEX.match(filename):
yield z.read(filename).decode("utf8")
except zipfile.BadZipFile:
# Clarify the error:
raise zipfile.BadZipFile("File is not a zip file - encrypted DOCX?")
| 731,181
|
Converts an XML tree of a DOCX file to string contents.
Args:
xml: raw XML text
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string
|
def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str:
root = ElementTree.fromstring(xml)
return docx_text_from_xml_node(root, 0, config)
| 731,182
|
Returns text from an XML node within a DOCX file.
Args:
node: an XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string
|
def docx_text_from_xml_node(node: ElementTree.Element,
level: int,
config: TextProcessingConfig) -> str:
text = ''
# log.debug("Level {}, tag {}", level, node.tag)
if node.tag == DOCX_TEXT:
text += node.text or ''
elif node.tag == DOCX_TAB:
text += '\t'
elif node.tag in DOCX_NEWLINES:
text += '\n'
elif node.tag == DOCX_NEWPARA:
text += '\n\n'
if node.tag == DOCX_TABLE:
text += '\n\n' + docx_table_from_xml_node(node, level, config)
else:
for child in node:
text += docx_text_from_xml_node(child, level + 1, config)
return text
| 731,183
|
Converts an XML node representing a DOCX table into a textual
representation.
Args:
table_node: XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
string representation
|
def docx_table_from_xml_node(table_node: ElementTree.Element,
level: int,
config: TextProcessingConfig) -> str:
table = CustomDocxTable()
for row_node in table_node:
if row_node.tag != DOCX_TABLE_ROW:
continue
table.new_row()
for cell_node in row_node:
if cell_node.tag != DOCX_TABLE_CELL:
continue
table.new_cell()
for para_node in cell_node:
text = docx_text_from_xml_node(para_node, level, config)
if text:
table.add_paragraph(text)
return docx_process_table(table, config)
| 731,184
|
Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text
|
def docx_process_simple_text(text: str, width: int) -> str:
if width:
return '\n'.join(textwrap.wrap(text, width=width))
else:
return text
| 731,185
|
Iterate through a DOCX file and yield text.
Args:
doc: DOCX document to process
config: :class:`TextProcessingConfig` control object
Yields:
pieces of text (paragraphs)
|
def docx_docx_gen_text(doc: DOCX_DOCUMENT_TYPE,
config: TextProcessingConfig) -> Iterator[str]:
# only called if docx loaded
if in_order:
for thing in docx_docx_iter_block_items(doc):
if isinstance(thing, docx.text.paragraph.Paragraph):
yield docx_process_simple_text(thing.text, config.width)
elif isinstance(thing, docx.table.Table):
yield docx_process_table(thing, config)
else:
for paragraph in doc.paragraphs:
yield docx_process_simple_text(paragraph.text, config.width)
for table in doc.tables:
yield docx_process_table(table, config)
| 731,188
|
Implements ``CREATE VIEW``.
Args:
operations: instance of ``alembic.operations.base.Operations``
operation: instance of :class:`.ReversibleOp`
Returns:
``None``
|
def create_view(operations, operation):
operations.execute("CREATE VIEW %s AS %s" % (
operation.target.name,
operation.target.sqltext
))
| 731,335
|
Implements ``CREATE FUNCTION``.
Args:
operations: instance of ``alembic.operations.base.Operations``
operation: instance of :class:`.ReversibleOp`
Returns:
``None``
|
def create_sp(operations, operation):
operations.execute(
"CREATE FUNCTION %s %s" % (
operation.target.name, operation.target.sqltext
)
)
| 731,336
|
Returns a Python object from an Alembic migration module (script).
Args:
operations: instance of ``alembic.operations.base.Operations``
ident: string of the format ``version.objname``
Returns:
the object whose name is ``objname`` within the Alembic migration
script identified by ``version``
|
def _get_object_from_version(cls, operations, ident):
version, objname = ident.split(".")
module_ = operations.get_context().script.get_revision(version).module
obj = getattr(module_, objname)
return obj
| 731,338
|
Returns a sorted list of every file extension found in a directory
and its subdirectories.
Args:
path: path to scan
reportevery: report directory progress after every *n* steps
Returns:
sorted list of every file extension found
|
def list_file_extensions(path: str, reportevery: int = 1) -> List[str]:
extensions = set()
count = 0
for root, dirs, files in os.walk(path):
count += 1
if count % reportevery == 0:
log.debug("Walking directory {}: {!r}", count, root)
for file in files:
filename, ext = os.path.splitext(file)
extensions.add(ext)
return sorted(list(extensions))
| 731,373
|
Check which of a list of Debian packages are installed, via ``dpkg-query``.
Args:
packages: list of Debian package names
Returns:
dict: mapping from package name to boolean ("present?")
|
def are_debian_packages_installed(packages: List[str]) -> Dict[str, bool]:
assert len(packages) >= 1
require_executable(DPKG_QUERY)
args = [
DPKG_QUERY,
"-W", # --show
# "-f='${Package} ${Status} ${Version}\n'",
"-f=${Package} ${Status}\n", # --showformat
] + packages
completed_process = subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)
encoding = sys.getdefaultencoding()
stdout = completed_process.stdout.decode(encoding)
stderr = completed_process.stderr.decode(encoding)
present = OrderedDict()
for line in stdout.split("\n"):
if line: # e.g. "autoconf install ok installed"
words = line.split()
assert len(words) >= 2
package = words[0]
present[package] = "installed" in words[1:]
for line in stderr.split("\n"):
if line: # e.g. "dpkg-query: no packages found matching XXX"
words = line.split()
assert len(words) >= 2
package = words[-1]
present[package] = False
log.debug("Debian package presence: {}", present)
return present
| 731,375
|
Ensure specific packages are installed under Debian.
Args:
packages: list of packages
Raises:
ValueError: if any are missing
|
def require_debian_packages(packages: List[str]) -> None:
present = are_debian_packages_installed(packages)
missing_packages = [k for k, v in present.items() if not v]
if missing_packages:
missing_packages.sort()
msg = (
"Debian packages are missing, as follows. Suggest:\n\n"
"sudo apt install {}".format(" ".join(missing_packages))
)
log.critical(msg)
raise ValueError(msg)
| 731,376
|
(Windows only.)
Create a system ODBC data source name (DSN).
Args:
driver: ODBC driver name
kw: Driver attributes
Returns:
bool: was the DSN created?
|
def create_sys_dsn(driver: str, **kw) -> bool:
attributes = [] # type: List[str]
for attr in kw.keys():
attributes.append("%s=%s" % (attr, kw[attr]))
return bool(
ctypes.windll.ODBCCP32.SQLConfigDataSource(0, ODBC_ADD_SYS_DSN,
driver,
nul.join(attributes))
)
| 731,395
|
(Windows only.)
Create a user ODBC data source name (DSN).
Args:
driver: ODBC driver name
kw: Driver attributes
Returns:
bool: was the DSN created?
|
def create_user_dsn(driver: str, **kw) -> bool:
attributes = [] # type: List[str]
for attr in kw.keys():
attributes.append("%s=%s" % (attr, kw[attr]))
return bool(
ctypes.windll.ODBCCP32.SQLConfigDataSource(0, ODBC_ADD_DSN, driver,
nul.join(attributes))
)
| 731,396
|
(Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
|
def register_access_db(fullfilename: str, dsn: str, description: str) -> bool:
directory = os.path.dirname(fullfilename)
return create_sys_dsn(
access_driver,
SERVER="",
DESCRIPTION=description,
DSN=dsn,
DBQ=fullfilename,
DefaultDir=directory
)
| 731,397
|
(Windows only.)
Creates a Microsoft Access 97 database and registers it with ODBC.
Args:
filename: filename of the database to create
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
|
def create_and_register_access97_db(filename: str,
dsn: str,
description: str) -> bool:
fullfilename = os.path.abspath(filename)
create_string = fullfilename + " General"
# ... filename, space, sort order ("General" for English)
return (create_user_dsn(access_driver, CREATE_DB3=create_string) and
register_access_db(filename, dsn, description))
| 731,398
|
(Windows only.)
Creates a Microsoft Access 2000 database and registers it with ODBC.
Args:
filename: filename of the database to create
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
|
def create_and_register_access2000_db(filename: str,
dsn: str,
description: str) -> bool:
fullfilename = os.path.abspath(filename)
create_string = fullfilename + " General"
# ... filename, space, sort order ("General" for English)
return (create_user_dsn(access_driver, CREATE_DB4=create_string) and
register_access_db(filename, dsn, description))
| 731,399
|
(Windows only.)
Creates a Microsoft Access database and registers it with ODBC.
Args:
filename: filename of the database to create
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
|
def create_and_register_access_db(filename: str,
dsn: str,
description: str) -> bool:
fullfilename = os.path.abspath(filename)
create_string = fullfilename + " General"
# ... filename, space, sort order ("General" for English)
return (create_user_dsn(access_driver, CREATE_DB=create_string) and
register_access_db(filename, dsn, description))
| 731,400
|
From an SQLAlchemy ORM class, creates the database table via the specified
engine, using a ``CREATE TABLE`` SQL (DDL) statement.
Args:
engine: SQLAlchemy :class:`Engine` object
ormclass: SQLAlchemy ORM class
without_constraints: don't add foreign key constraints
|
def create_table_from_orm_class(engine: Engine,
ormclass: DeclarativeMeta,
without_constraints: bool = False) -> None:
table = ormclass.__table__ # type: Table
log.info("Creating table {} on engine {}{}",
table.name,
get_safe_url_from_engine(engine),
" (omitting constraints)" if without_constraints else "")
# https://stackoverflow.com/questions/19175311/how-to-create-only-one-table-with-sqlalchemy # noqa
if without_constraints:
include_foreign_key_constraints = []
else:
include_foreign_key_constraints = None # the default
creator = CreateTable(
table,
include_foreign_key_constraints=include_foreign_key_constraints
)
creator.execute(bind=engine)
| 731,431
|
Deals with an OpenXML, including if it is potentially corrupted.
Args:
filename: filename to process
filetypes: list of filetypes that we care about, e.g.
``['docx', 'pptx', 'xlsx']``.
move_to: move matching files to this directory
delete_if_not_specified_file_type: if ``True``, and the file is **not**
a type specified in ``filetypes``, then delete the file.
show_zip_output: show the output from the external ``zip`` tool?
|
def process_file(filename: str,
filetypes: List[str],
move_to: str,
delete_if_not_specified_file_type: bool,
show_zip_output: bool) -> None:
# log.critical("process_file: start")
try:
reader = CorruptedOpenXmlReader(filename,
show_zip_output=show_zip_output)
if reader.file_type in filetypes:
log.info("Found {}: {}", reader.description, filename)
if move_to:
dest_file = os.path.join(move_to, os.path.basename(filename))
_, ext = os.path.splitext(dest_file)
if ext != reader.suggested_extension():
dest_file += reader.suggested_extension()
reader.move_to(destination_filename=dest_file)
else:
log.info("Unrecognized or unwanted contents: " + filename)
if delete_if_not_specified_file_type:
log.info("Deleting: " + filename)
os.remove(filename)
except Exception as e:
# Must explicitly catch and report errors, since otherwise they vanish
# into the ether.
log.critical("Uncaught error in subprocess: {!r}\n{}", e,
traceback.format_exc())
raise
| 731,445
|
Move the file to which this class refers to a new location.
The function will not overwrite existing files (but offers the option
to rename files slightly to avoid a clash).
Args:
destination_filename: filename to move to
alter_if_clash: if ``True`` (the default), appends numbers to
the filename if the destination already exists, so that the
move can proceed.
|
def move_to(self, destination_filename: str,
alter_if_clash: bool = True) -> None:
if not self.src_filename:
return
if alter_if_clash:
counter = 0
while os.path.exists(destination_filename):
root, ext = os.path.splitext(destination_filename)
destination_filename = "{r}_{c}{e}".format(
r=root, c=counter, e=ext)
counter += 1
# ... for example, "/a/b/c.txt" becomes "/a/b/c_0.txt", then
# "/a/b/c_1.txt", and so on.
else:
if os.path.exists(destination_filename):
src = self.rescue_filename or self.src_filename
log.warning("Destination exists; won't move {!r} to {!r}",
src, destination_filename)
return
if self.rescue_filename:
shutil.move(self.rescue_filename, destination_filename)
os.remove(self.src_filename)
log.info("Moved recovered file {!r} to {!r} and deleted corrupted "
"original {!r}",
self.rescue_filename,
destination_filename,
self.src_filename)
self.rescue_filename = ""
else:
shutil.move(self.src_filename, destination_filename)
log.info("Moved {!r} to {!r}", self.src_filename,
destination_filename)
self.src_filename = ""
| 731,449
|
Writes HTTP result to stdout.
Args:
contenttype_headers_content:
the tuple ``(contenttype, extraheaders, data)``
status:
HTTP status message (default ``"200 OK``)
|
def print_result_for_plain_cgi_script_from_tuple(
contenttype_headers_content: WSGI_TUPLE_TYPE,
status: str = '200 OK') -> None:
contenttype, headers, content = contenttype_headers_content
print_result_for_plain_cgi_script(contenttype, headers, content, status)
| 731,543
|
Simple WSGI app.
Args:
result: the data to be processed by ``handler``
handler: a function returning a ``(contenttype, extraheaders, data)``
tuple, e.g. ``text_result``, ``html_result``
start_response: standard WSGI ``start_response`` function
status: status code (default ``"200 OK"``)
extraheaders: optional extra HTTP headers
Returns:
WSGI application result
|
def wsgi_simple_responder(
result: Union[str, bytes],
handler: Callable[[Union[str, bytes]], WSGI_TUPLE_TYPE],
start_response: TYPE_WSGI_START_RESPONSE,
status: str = '200 OK',
extraheaders: TYPE_WSGI_RESPONSE_HEADERS = None) \
-> TYPE_WSGI_APP_RESULT:
extraheaders = extraheaders or []
(contenttype, extraheaders2, output) = handler(result)
response_headers = [('Content-Type', contenttype),
('Content-Length', str(len(output)))]
response_headers.extend(extraheaders)
if extraheaders2 is not None:
response_headers.extend(extraheaders2)
# noinspection PyArgumentList
start_response(status, response_headers)
return [output]
| 731,545
|
Underlines a heading for RST files.
Args:
heading: text to underline
underline_char: character to use
Returns:
underlined heading, over two lines (without a final terminating
newline)
|
def rst_underline(heading: str, underline_char: str) -> str:
assert "\n" not in heading
assert len(underline_char) == 1
return heading + "\n" + (underline_char * len(heading))
| 731,550
|
Writes the contents to a file, if permitted.
Args:
filename: filename to write
content: contents to write
overwrite: permit overwrites?
mock: pretend to write, but don't
Raises:
RuntimeError: if file exists but overwriting not permitted
|
def write_if_allowed(filename: str,
content: str,
overwrite: bool = False,
mock: bool = False) -> None:
# Check we're allowed
if not overwrite and exists(filename):
fail("File exists, not overwriting: {!r}".format(filename))
# Make the directory, if necessary
directory = dirname(filename)
if not mock:
mkdir_p(directory)
# Write the file
log.info("Writing to {!r}", filename)
if mock:
log.warning("Skipping writes as in mock mode")
else:
with open(filename, "wt") as outfile:
outfile.write(content)
| 731,551
|
Returns the text contents of an RST file that will automatically
document our source file.
Args:
prefix: prefix, e.g. RST copyright comment
suffix: suffix, after the part we're creating
heading_underline_char: RST character to use to underline the
heading
method: optional method to override ``self.method``; see
constructor
Returns:
the RST contents
|
def rst_content(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None) -> str:
spacer = " "
# Choose our final method
if method is None:
method = self.method
is_python = self.is_python
if method == AutodocMethod.BEST:
if is_python:
method = AutodocMethod.AUTOMODULE
else:
method = AutodocMethod.CONTENTS
elif method == AutodocMethod.AUTOMODULE:
if not is_python:
method = AutodocMethod.CONTENTS
# Write the instruction
if method == AutodocMethod.AUTOMODULE:
if self.source_rst_title_style_python:
title = self.python_module_name
else:
title = self.source_filename_rel_project_root
instruction = ".. automodule:: {modulename}\n :members:".format(
modulename=self.python_module_name
)
elif method == AutodocMethod.CONTENTS:
title = self.source_filename_rel_project_root
# Using ".. include::" with options like ":code: python" doesn't
# work properly; everything comes out as Python.
# Instead, see http://www.sphinx-doc.org/en/1.4.9/markup/code.html;
# we need ".. literalinclude::" with ":language: LANGUAGE".
instruction = (
".. literalinclude:: {filename}\n"
"{spacer}:language: {language}".format(
filename=self.source_filename_rel_rst_file,
spacer=spacer,
language=self.pygments_language
)
)
else:
raise ValueError("Bad method!")
# Create the whole file
content = .format(
filename=self.rst_filename_rel_project_root,
AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,
prefix=prefix,
underlined_title=rst_underline(
title, underline_char=heading_underline_char),
instruction=instruction,
suffix=suffix,
).strip() + "\n"
return content
| 731,556
|
Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't
|
def write_rst(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None,
overwrite: bool = False,
mock: bool = False) -> None:
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock)
| 731,557
|
Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process
|
def get_sorted_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
recursive: bool = True) -> List[str]:
if isinstance(source_filenames_or_globs, str):
source_filenames_or_globs = [source_filenames_or_globs]
final_filenames = [] # type: List[str]
for sfg in source_filenames_or_globs:
sfg_expanded = expanduser(sfg)
log.debug("Looking for: {!r}", sfg_expanded)
for filename in glob.glob(sfg_expanded, recursive=recursive):
log.debug("Trying: {!r}", filename)
if self.should_exclude(filename):
log.info("Skipping file {!r}", filename)
continue
final_filenames.append(filename)
final_filenames.sort()
return final_filenames
| 731,560
|
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
|
def write_index_and_rst_files(self, overwrite: bool = False,
mock: bool = False) -> None:
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
f.write_rst(
prefix=self.rst_prefix,
suffix=self.rst_suffix,
heading_underline_char=self.source_rst_heading_underline_char, # noqa
overwrite=overwrite,
mock=mock,
)
elif isinstance(f, AutodocIndex):
f.write_index_and_rst_files(overwrite=overwrite, mock=mock)
else:
fail("Unknown thing in files_to_index: {!r}".format(f))
self.write_index(overwrite=overwrite, mock=mock)
| 731,564
|
Returns the filename of this index, relative to the director of another
index. (For inserting a reference to this index into ``other``.)
Args:
other: the other index
Returns:
relative filename of our index
|
def index_filename_rel_other_index(self, other: str) -> str:
return relpath(self.index_filename, start=dirname(other))
| 731,565
|
Writes the index file, if permitted.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
|
def write_index(self, overwrite: bool = False, mock: bool = False) -> None:
write_if_allowed(self.index_filename, self.index_content(),
overwrite=overwrite, mock=mock)
| 731,567
|
Given an array yy(0:n-1), extirpolate (spread) a value y into
m actual array elements that best approximate the "fictional"
(i.e., possible noninteger) array element number x. The weights
used are coefficients of the Lagrange interpolating polynomial
Arguments:
y :
yy :
n :
x :
m :
Returns:
|
def __spread__(y, yy, n, x, m):
nfac=[0,1,1,2,6,24,120,720,5040,40320,362880]
if m > 10. :
print('factorial table too small in spread')
return
ix=long(x)
if x == float(ix):
yy[ix]=yy[ix]+y
else:
ilo = long(x-0.5*float(m)+1.0)
ilo = min( max( ilo , 1 ), n-m+1 )
ihi = ilo+m-1
nden = nfac[m]
fac=x-ilo
for j in range(ilo+1,ihi+1): fac = fac*(x-j)
yy[ihi] = yy[ihi] + y*fac/(nden*(x-ihi))
for j in range(ihi-1,ilo-1,-1):
nden=(nden/(j+1-ilo))*(j-ihi)
yy[j] = yy[j] + y*fac/(nden*(x-j))
| 731,641
|
A coroutine sink which prints received items stdout
Args:
sep: Optional separator to be printed between received items.
end: Optional terminator to be printed after the last item.
file: Optional stream to which to print.
flush: Optional flag to force flushing after each item.
|
def rprint(sep='\n', end='\n', file=sys.stdout, flush=False):
try:
first_item = (yield)
file.write(str(first_item))
if flush:
file.flush()
while True:
item = (yield)
file.write(sep)
file.write(str(item))
if flush:
file.flush()
except GeneratorExit:
file.write(end)
if flush:
file.flush()
| 731,810
|
Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
|
def iterable_source(iterable, target):
it = iter(iterable)
for item in it:
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter()
| 731,812
|
Send events at random times with uniform probability.
Args:
rate: The average number of events to send per second.
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
|
def poisson_source(rate, iterable, target):
if rate <= 0.0:
raise ValueError("poisson_source rate {} is not positive".format(rate))
it = iter(iterable)
for item in it:
duration = random.expovariate(rate)
sleep(duration)
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter()
| 731,813
|
Write a valid gentoo make.conf file to :path:.
Args:
path - The output path of the make.conf
|
def write_makeconfig(_path):
http_proxy = str(CFG["gentoo"]["http_proxy"])
ftp_proxy = str(CFG["gentoo"]["ftp_proxy"])
rsync_proxy = str(CFG["gentoo"]["rsync_proxy"])
path.mkfile_uchroot(local.path('/') / _path)
with open(_path, 'w') as makeconf:
lines =
makeconf.write(lines)
mounts = CFG["container"]["mounts"].value
tmp_dir = str(CFG["tmp_dir"])
mounts.append({"src": tmp_dir, "tgt": "/mnt/distfiles"})
CFG["container"]["mounts"] = mounts
if http_proxy is not None:
http_s = "http_proxy={0}".format(http_proxy)
https_s = "https_proxy={0}".format(http_proxy)
makeconf.write(http_s + "\n")
makeconf.write(https_s + "\n")
if ftp_proxy is not None:
fp_s = "ftp_proxy={0}".format(ftp_proxy)
makeconf.write(fp_s + "\n")
if rsync_proxy is not None:
rp_s = "RSYNC_PROXY={0}".format(rsync_proxy)
makeconf.write(rp_s + "\n")
| 731,964
|
Write a valid gentoo bashrc file to :path:.
Args:
path - The output path of the make.conf
|
def write_bashrc(_path):
cfg_mounts = CFG["container"]["mounts"].value
cfg_prefix = CFG["container"]["prefixes"].value
path.mkfile_uchroot("/etc/portage/bashrc")
mounts = uchroot.mounts("mnt", cfg_mounts)
p_paths, p_libs = uchroot.env(cfg_prefix)
paths, libs = uchroot.env(mounts)
paths = paths + p_paths
libs = libs + p_libs
with open(_path, 'w') as bashrc:
lines = .format(path.list_to_path(paths), path.list_to_path(libs))
bashrc.write(lines)
| 731,965
|
Write a valid gentoo layout file to :path:.
Args:
path - The output path of the layout.conf
|
def write_layout(_path):
path.mkdir_uchroot("/etc/portage/metadata")
path.mkfile_uchroot("/etc/portage/metadata/layout.conf")
with open(_path, 'w') as layoutconf:
lines =
layoutconf.write(lines)
| 731,966
|
Write a valid gentoo wgetrc file to :path:.
Args:
path - The output path of the wgetrc
|
def write_wgetrc(_path):
http_proxy = str(CFG["gentoo"]["http_proxy"])
ftp_proxy = str(CFG["gentoo"]["ftp_proxy"])
path.mkfile_uchroot("/etc/wgetrc")
with open(_path, 'w') as wgetrc:
if http_proxy is not None:
http_s = "http_proxy = {0}".format(http_proxy)
https_s = "https_proxy = {0}".format(http_proxy)
wgetrc.write("use_proxy = on\n")
wgetrc.write(http_s + "\n")
wgetrc.write(https_s + "\n")
if ftp_proxy is not None:
fp_s = "ftp_proxy={0}".format(ftp_proxy)
wgetrc.write(fp_s + "\n")
| 731,967
|
Initialize context.
Args:
context_type (~libinput.constant.ContextType): If
:attr:`~libinput.constant.ContextType.UDEV` devices are
added/removed from udev seat. If
:attr:`~libinput.constant.ContextType.PATH` devices have to be
added/removed manually.
debug (bool): If false, only errors are printed.
|
def __init__(self, context_type=ContextType.PATH, debug=False):
self._selector = DefaultSelector()
self._interface = Interface()
if context_type == ContextType.UDEV:
self._udev = self._libudev.udev_new()
self._li = self._libinput.libinput_udev_create_context(
byref(self._interface), None, self._udev)
elif context_type == ContextType.PATH:
self._li = self._libinput.libinput_path_create_context(
byref(self._interface), None)
self._log_handler = lambda pr, strn: print(pr.name, ': ', strn)
self._set_default_log_handler()
if debug:
self._libinput.libinput_log_set_priority(
self._li, LogPriority.DEBUG)
self._selector.register(
self._libinput.libinput_get_fd(self._li), EVENT_READ)
| 732,010
|
Add a device to a libinput context.
If successful, the device will be added to the internal list and
re-opened on :meth:`~libinput.LibInput.resume`. The device can be
removed with :meth:`remove_device`.
If the device was successfully initialized, it is returned.
Args:
path (str): Path to an input device.
Returns:
~libinput.define.Device: A device object or :obj:`None`.
|
def add_device(self, path):
hdevice = self._libinput.libinput_path_add_device(
self._li, path.encode())
if hdevice:
return Device(hdevice, self._libinput)
return None
| 732,014
|
Convert a hex-formatted number (i.e., `"#RGB[A]"` or `"#RRGGBB[AA]"`) to an
RGBA tuple (i.e., `(<r>, <g>, <b>, <a>)`).
Args:
hex_color (str) : hex-formatted number (e.g., `"#2fc"`, `"#3c2f8611"`)
normalize_to (int, float) : Factor to normalize each channel by
Returns:
(tuple) : RGBA tuple (i.e., `(<r>, <g>, <b>, <a>)`), where range of
each channel in tuple is `[0, normalize_to]`.
|
def hex_color_to_rgba(hex_color, normalize_to=255):
color_pattern_one_digit = (r'#(?P<R>[\da-fA-F])(?P<G>[\da-fA-F])'
r'(?P<B>[\da-fA-F])(?P<A>[\da-fA-F])?')
color_pattern_two_digit = (r'#(?P<R>[\da-fA-F]{2})(?P<G>[\da-fA-F]{2})'
r'(?P<B>[\da-fA-F]{2})(?P<A>[\da-fA-F]{2})?')
# First try to match `#rrggbb[aa]`.
match = re.match(color_pattern_two_digit, hex_color)
if match:
channels = match.groupdict()
channel_scale = 255
else:
# Try to match `#rgb[a]`.
match = re.match(color_pattern_one_digit, hex_color)
if match:
channels = match.groupdict()
channel_scale = 15
else:
raise ValueError('Color string must be in format #RGB[A] or '
'#RRGGBB[AA] (i.e., alpha channel is optional)')
scale = normalize_to / channel_scale
return tuple(type(normalize_to)(int(channels[k], 16) * scale)
if channels[k] is not None else None
for k in 'RGBA')
| 732,048
|
Recursively hash the contents of the given directory.
Args:
directory (str): The root directory we want to hash.
Returns:
A hash of all the contents in the directory.
|
def get_hash_of_dirs(directory):
import hashlib
sha = hashlib.sha512()
if not os.path.exists(directory):
return -1
for root, _, files in os.walk(directory):
for name in files:
filepath = local.path(root) / name
if filepath.exists():
with open(filepath, 'rb') as next_file:
for line in next_file:
sha.update(line)
return sha.hexdigest()
| 732,123
|
Check, if a download is required.
Args:
src_file: The filename to check for.
src_root: The path we find the file in.
Returns:
True, if we need to download something, False otherwise.
|
def source_required(src_file):
if not src_file.exists():
return True
required = True
hash_file = src_file.with_suffix(".hash", depth=0)
LOG.debug("Hash file location: %s", hash_file)
if hash_file.exists():
new_hash = get_hash_of_dirs(src_file)
with open(hash_file, 'r') as h_file:
old_hash = h_file.readline()
required = not new_hash == old_hash
if required:
from benchbuild.utils.cmd import rm
rm("-r", src_file)
rm(hash_file)
if required:
LOG.info("Source required for: %s", src_file)
LOG.debug("Reason: src-exists: %s hash-exists: %s", src_file.exists(),
hash_file.exists())
return required
| 732,124
|
Update the hash for the given file.
Args:
src: The file name.
root: The path of the given file.
|
def update_hash(src_file):
hash_file = local.path(src_file) + ".hash"
new_hash = 0
with open(hash_file, 'w') as h_file:
new_hash = get_hash_of_dirs(src_file)
h_file.write(str(new_hash))
return new_hash
| 732,125
|
Small copy wrapper.
Args:
From (str): Path to the SOURCE.
To (str): Path to the TARGET.
|
def Copy(From, To):
from benchbuild.utils.cmd import cp
cp("-ar", "--reflink=auto", From, To)
| 732,126
|
Just copy fName into the current working directory, if it exists.
No action is executed, if fName does not exist. No Hash is checked.
Args:
src: The filename we want to copy to '.'.
root: The optional source dir we should pull fName from. Defaults
to benchbuild.settings.CFG["tmpdir"].
Returns:
True, if we copied something.
|
def CopyNoFail(src, root=None):
if root is None:
root = str(CFG["tmp_dir"])
src_path = local.path(root) / src
if src_path.exists():
Copy(src_path, '.')
return True
return False
| 732,127
|
Download url, if required.
Args:
src_url (str): Our SOURCE url.
tgt_name (str): The filename we want to have on disk.
tgt_root (str): The TARGET directory for the download.
Defaults to ``CFG["tmpdir"]``.
|
def Wget(src_url, tgt_name, tgt_root=None):
if tgt_root is None:
tgt_root = str(CFG["tmp_dir"])
from benchbuild.utils.cmd import wget
tgt_file = local.path(tgt_root) / tgt_name
if not source_required(tgt_file):
Copy(tgt_file, ".")
return
wget(src_url, "-O", tgt_file)
update_hash(tgt_file)
Copy(tgt_file, ".")
| 732,128
|
Get a clone of the given repo
Args:
repository (str): Git URL of the SOURCE repo.
directory (str): Name of the repo folder on disk.
tgt_root (str): TARGET folder for the git repo.
Defaults to ``CFG["tmpdir"]``
shallow_clone (bool): Only clone the repository shallow
Defaults to true
|
def Git(repository, directory, rev=None, prefix=None, shallow_clone=True):
repository_loc = str(prefix)
if prefix is None:
repository_loc = str(CFG["tmp_dir"])
from benchbuild.utils.cmd import git
src_dir = local.path(repository_loc) / directory
if not source_required(src_dir):
Copy(src_dir, ".")
return
extra_param = []
if shallow_clone:
extra_param.append("--depth")
extra_param.append("1")
git("clone", extra_param, repository, src_dir)
if rev:
with local.cwd(src_dir):
git("checkout", rev)
update_hash(src_dir)
Copy(src_dir, ".")
return repository_loc
| 732,130
|
Checkout the SVN repo.
Args:
url (str): The SVN SOURCE repo.
fname (str): The name of the repo on disk.
to (str): The name of the TARGET folder on disk.
Defaults to ``CFG["tmpdir"]``
|
def Svn(url, fname, to=None):
if to is None:
to = str(CFG["tmp_dir"])
src_dir = local.path(to) / fname
if not source_required(src_dir):
Copy(src_dir, ".")
return
from benchbuild.utils.cmd import svn
svn("co", url, src_dir)
update_hash(src_dir)
Copy(src_dir, ".")
| 732,132
|
RSync a folder.
Args:
url (str): The url of the SOURCE location.
fname (str): The name of the TARGET.
to (str): Path of the target location.
Defaults to ``CFG["tmpdir"]``.
|
def Rsync(url, tgt_name, tgt_root=None):
if tgt_root is None:
tgt_root = str(CFG["tmp_dir"])
from benchbuild.utils.cmd import rsync
tgt_dir = local.path(tgt_root) / tgt_name
if not source_required(tgt_dir):
Copy(tgt_dir, ".")
return
rsync("-a", url, tgt_dir)
update_hash(tgt_dir)
Copy(tgt_dir, ".")
| 732,133
|
Prepare a slurm script that executes the experiment for a given project.
Args:
experiment: The experiment we want to execute
projects: All projects we generate an array job for.
|
def script(experiment, projects):
benchbuild_c = local[local.path(sys.argv[0])]
slurm_script = local.cwd / experiment.name + "-" + str(
CFG['slurm']['script'])
srun = local["srun"]
srun_args = []
if not CFG["slurm"]["multithread"]:
srun_args.append("--hint=nomultithread")
if not CFG["slurm"]["turbo"]:
srun_args.append("--pstate-turbo=off")
srun = srun[srun_args]
srun = srun[benchbuild_c["run"]]
return __save__(slurm_script, srun, experiment, projects)
| 732,295
|
Dump a bash script that can be given to SLURM.
Args:
script_name (str): name of the bash script.
commands (list(benchbuild.utils.cmd)):
List of plumbum commands to write to the bash script.
**kwargs: Dictionary with all environment variable bindings we should
map in the bash script.
|
def __save__(script_name, benchbuild, experiment, projects):
from jinja2 import Environment, PackageLoader
logs_dir = os.path.dirname(CFG['slurm']['logs'].value)
node_command = str(benchbuild["-E", experiment.name, "$_project"])
env = Environment(
trim_blocks=True,
lstrip_blocks=True,
loader=PackageLoader('benchbuild', 'utils/templates'))
template = env.get_template('slurm.sh.inc')
with open(script_name, 'w') as slurm2:
slurm2.write(
template.render(
config=["export " + x for x in repr(CFG).split('\n')],
clean_lockdir=str(CFG["slurm"]["node_dir"]),
clean_lockfile=str(CFG["slurm"]["node_dir"]) + \
".clean-in-progress.lock",
cpus=int(CFG['slurm']['cpus_per_task']),
exclusive=bool(CFG['slurm']['exclusive']),
lockfile=str(CFG['slurm']["node_dir"]) + ".lock",
log=local.path(logs_dir) / str(experiment.id),
max_running=int(CFG['slurm']['max_running']),
name=experiment.name,
nice=int(CFG['slurm']['nice']),
nice_clean=int(CFG["slurm"]["nice_clean"]),
node_command=node_command,
no_multithreading=not CFG['slurm']['multithread'],
ntasks=1,
prefix=str(CFG["slurm"]["node_dir"]),
projects=projects,
slurm_account=str(CFG["slurm"]["account"]),
slurm_partition=str(CFG["slurm"]["partition"]),
timelimit=str(CFG['slurm']['timelimit']),
)
)
chmod("+x", script_name)
if not __verify__(script_name):
LOG.error("SLURM script failed verification.")
print("SLURM script written to {0}".format(script_name))
return script_name
| 732,297
|
Print a small summary of the executed plan.
Args:
num_actions (int): Total size of the executed plan.
failed (:obj:`list` of :obj:`actions.Step`): List of failed actions.
duration: Time we spent executing the plan.
|
def print_summary(num_actions, failed, duration):
num_failed = len(failed)
print(.format(
num_total=num_actions, num_failed=num_failed, elapsed_time=duration))
| 732,314
|
Execute the plan.
Args:
plan (:obj:`list` of :obj:`actions.Step`): The plan we want to execute.
Returns:
(:obj:`list` of :obj:`actions.Step`): A list of failed actions.
|
def execute_plan(plan):
results = [action() for action in plan]
return [result for result in results if actns.step_has_failed(result)]
| 732,387
|
Convert a function return to a list of StepResults.
All Step subclasses automatically wrap the result of their
__call__ method's result with this wrapper.
If the result is not a list of StepResult values, one will
be generated.
result of `[StepResult.OK]`, or convert the given result into
a list.
Args:
func: The function to wrap.
|
def to_step_result(func):
@ft.wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
if not res:
res = [StepResult.OK]
if not hasattr(res, "__iter__"):
res = [res]
return res
return wrapper
| 732,457
|
Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError
|
def has_button(self, button):
rc = self._libinput.libinput_device_pointer_has_button(
self._handle, button)
assert rc >= 0, 'This device is not a pointer device'
return bool(rc)
| 732,539
|
Check if a :attr:`~libinput.constant.DeviceCapability.KEYBOARD`
device has a given key.
Args:
key (int): Key to check for, see ``input.h`` for key definitions.
Returns:
bool: :obj:`True` if the device has this key, :obj:`False` if
it does not.
Raises:
AssertionError
|
def has_key(self, key):
rc = self._libinput.libinput_device_keyboard_has_key(self._handle, key)
assert rc >= 0, 'This device is not a keyboard device'
return bool(rc)
| 732,540
|
While a reference is kept by the caller, the returned mode group
will compare equal with mode group returned by each subsequent call of
this method with the same index and mode group returned from
:attr:`~libinput.event.TabletPadEvent.mode_group`, provided
the event was generated by this mode group.
Args:
group (int): A mode group index.
Returns:
~libinput.define.TabletPadModeGroup: The mode group with the given
index or :obj:`None` if an invalid index is given.
|
def get_mode_group(self, group):
hmodegroup = self._libinput.libinput_device_tablet_pad_get_mode_group(
self._handle, group)
if hmodegroup:
return TabletPadModeGroup(hmodegroup, self._libinput)
return None
| 732,546
|
Creates a version for a project out of the hash.
The hash is taken from the directory of the source file.
Args:
src_file: The source file of the project using this function.
Returns:
Either returns the first 8 digits of the hash as string,
the entire hash as a string if the hash consists out of less
than 7 digits or None if the path is incorrect.
|
def get_version_from_cache_dir(src_file):
if src_file is None:
return None
tmp_dir = local.path(str(CFG["tmp_dir"]))
if tmp_dir.exists():
cache_file = tmp_dir / src_file
dir_hash = get_hash_of_dirs(cache_file)
if dir_hash is None:
return None
if len(str(dir_hash)) <= 7:
return str(dir_hash)
return str(dir_hash)[:7]
return None
| 732,649
|
Get the git commit hash of HEAD from :from_url.
Args:
from_url: The file system url of our git repository.
Returns:
git commit hash of HEAD, or empty string.
|
def get_git_hash(from_url):
from benchbuild.utils.cmd import git
if from_url is None:
return ""
if not path.exists(from_url):
return ""
with local.cwd(from_url):
return git("rev-parse", "HEAD", retcode=None)
| 732,650
|
Fetch the output /usr/bin/time from a.
Args:
marker: The marker that limits the time output
format_s: The format string used to parse the timings
ins: A list of lines we look for the output.
Returns:
A list of timing tuples
|
def fetch_time_output(marker, format_s, ins):
from parse import parse
timings = [x for x in ins if marker in x]
res = [parse(format_s, t) for t in timings]
return [_f for _f in res if _f]
| 732,653
|
Return a list of filenames found at @path.
The list of filenames can be filtered by extensions.
Arguments:
path: Existing filepath we want to list.
exts: List of extensions to filter by.
Returns:
A list of filenames found in the path.
|
def template_files(path, exts=None):
if not os.path.isabs(path):
_path = os.path.join(determine_path(), path)
if not (os.path.exists(_path) and os.path.isdir(_path)):
return []
if not exts:
exts = []
files = os.listdir(_path)
files = [f for f in files if os.path.splitext(f)[-1] in exts]
files = [os.path.join(path, f) for f in files]
return files
| 732,658
|
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
filepath:
The filepath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
|
def mkfile_uchroot(filepath, root="."):
from benchbuild.utils.uchroot import no_args, uretry
uchroot = no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uretry(uchroot["--", "/bin/touch", filepath])
| 732,660
|
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
dirpath:
The dirpath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
|
def mkdir_uchroot(dirpath, root="."):
from benchbuild.utils.uchroot import no_args, uretry
uchroot = no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uretry(uchroot["--", "/bin/mkdir", "-p", dirpath])
| 732,661
|
Create a directory if required.
This will query the user for a confirmation.
Args:
dirname: The path to create.
|
def mkdir_interactive(dirpath):
from benchbuild.utils.cmd import mkdir
if os.path.exists(dirpath):
return
response = ui.ask(
"The directory {dirname} does not exist yet. "
"Should I create it?".format(dirname=dirpath),
default_answer=True,
default_answer_str="yes")
if response:
mkdir("-p", dirpath)
print("Created directory {0}.".format(dirpath))
| 732,662
|
From MeasYaps XML root find next sibling of node matching 'search'.
MeasYaps looks like:
<value>Key</value>
<value>Value</value>
Thus 'search' is the Key and we want to find the node that has the Value.
We return the node containing the desired Value.
Arguments:
root (Element) root XML node (xml.etree.ElementTree Element)
search (String) String to match Element.text
|
def get_val_by_text(root,search):
found_flag = False
for el in root.iter():
if found_flag:
return(el)
if el.text == search:
# We want the next el
found_flag = True
| 732,663
|
Add the new path to the list of paths to clean up afterwards.
Args:
new_path: Path to the directory that need to be cleaned up.
|
def __update_cleanup_paths(new_path):
cleanup_dirs = settings.CFG["cleanup_paths"].value
cleanup_dirs = set(cleanup_dirs)
cleanup_dirs.add(new_path)
cleanup_dirs = list(cleanup_dirs)
settings.CFG["cleanup_paths"] = cleanup_dirs
| 732,676
|
Setup a unionfs via unionfs-fuse.
Args:
ro_base: base_directory of the project
rw_image: virtual image of actual file system
mountpoint: location where ro_base and rw_image merge
|
def __unionfs_set_up(ro_dir, rw_dir, mount_dir):
mount_dir.mkdir()
rw_dir.mkdir()
if not ro_dir.exists():
LOG.error("Base dir does not exist: '%s'", ro_dir)
raise ValueError("Base directory does not exist")
from benchbuild.utils.cmd import unionfs as unionfs_cmd
LOG.debug("Mounting UnionFS on %s with RO:%s RW:%s", mount_dir, ro_dir,
rw_dir)
return unionfs_cmd["-f", "-o", "auto_unmount,allow_other,cow", rw_dir +
"=RW:" + ro_dir + "=RO", mount_dir]
| 732,679
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.