repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
frnmst/md-toc | md_toc/api.py | build_toc | python | def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc | r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L103-L240 | [
"def get_md_header(header_text_line: str,\n header_duplicate_counter: dict,\n keep_header_levels: int = 3,\n parser: str = 'github',\n no_links: bool = False) -> dict:\n r\"\"\"Build a data structure with the elements needed to create a TOC line.\n\n :parameter header_text_line: a single markdown line that needs to be\n transformed into a TOC line.\n :parameter header_duplicate_counter: a data structure that contains the\n number of occurrencies of each header anchor link. This is used to\n avoid duplicate anchor links and it is meaningful only for certain\n values of parser.\n :parameter keep_header_levels: the maximum level of headers to be\n considered as such when building the table of contents.\n Defaults to ``3``.\n :parameter parser: decides rules on how to generate anchor links.\n Defaults to ``github``.\n :type header_text_line: str\n :type header_duplicate_counter: dict\n :type keep_header_levels: int\n :type parser: str\n :returns: None if the input line does not correspond to one of the\n designated cases or a data structure containing the necessary\n components to create a table of contents line, otherwise.\n :rtype: dict\n :raises: a built-in exception.\n\n .. note::\n This works like a wrapper to other functions.\n \"\"\"\n result = get_atx_heading(header_text_line, keep_header_levels, parser,\n no_links)\n if result is None:\n return result\n else:\n header_type, header_text_trimmed = result\n header = {\n 'type':\n header_type,\n 'text_original':\n header_text_trimmed,\n 'text_anchor_link':\n build_anchor_link(header_text_trimmed, header_duplicate_counter,\n parser)\n }\n return header\n",
"def build_toc_line(toc_line_no_indent: str,\n no_of_indentation_spaces: int = 0) -> str:\n r\"\"\"Build the TOC line.\n\n :parameter toc_line_no_indent: the TOC line without indentation.\n :parameter no_of_indentation_spaces: the number of indentation spaces.\n Defaults to ``0``.\n :type toc_line_no_indent: str\n :type no_of_indentation_spaces: int\n :returns: toc_line, a single line of the table of contents.\n :rtype: str\n :raises: a built-in exception.\n \"\"\"\n assert no_of_indentation_spaces >= 0\n\n indentation = no_of_indentation_spaces * ' '\n toc_line = indentation + toc_line_no_indent\n\n return toc_line\n",
"def increase_index_ordered_list(header_type_count: dict,\n header_type_prev: int,\n header_type_curr: int,\n parser: str = 'github'):\n r\"\"\"Compute the current index for ordered list table of contents.\n\n :parameter header_type_count: the count of each header type.\n :parameter header_type_prev: the previous type of header (h[1-Inf]).\n :parameter header_type_curr: the current type of header (h[1-Inf]).\n :parameter parser: decides rules on how to generate ordered list markers.\n Defaults to ``github``.\n :type header_type_count: dict\n :type header_type_prev: int\n :type header_type_curr: int\n :type parser: str\n :returns: None\n :rtype: None\n :raises: GithubOverflowOrderedListMarker or a built-in exception.\n \"\"\"\n # header_type_prev might be 0 while header_type_curr can't.\n assert header_type_prev >= 0\n assert header_type_curr >= 1\n\n # Base cases for a new table of contents or a new index type.\n if header_type_prev == 0:\n header_type_prev = header_type_curr\n if (header_type_curr not in header_type_count\n or header_type_prev < header_type_curr):\n header_type_count[header_type_curr] = 0\n\n header_type_count[header_type_curr] += 1\n\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n if header_type_count[header_type_curr] > md_parser['github']['list'][\n 'ordered']['max_marker_number']:\n raise GithubOverflowOrderedListMarker\n",
"def build_list_marker_log(parser: str = 'github',\n list_marker: str = '.') -> list:\n r\"\"\"Create a data structure that holds list marker information.\n\n :parameter parser: decides rules on how compute indentations.\n Defaults to ``github``.\n :parameter list_marker: a string that contains some of the first\n characters of the list element. Defaults to ``-``.\n :type parser: str\n :type list_marker: str\n :returns: list_marker_log, the data structure.\n :rtype: list\n :raises: a built-in exception.\n\n .. note::\n This function makes sense for ordered lists only.\n \"\"\"\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker' or parser == 'redcarpet'):\n assert list_marker in md_parser[parser]['list']['ordered'][\n 'closing_markers']\n\n list_marker_log = list()\n\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n list_marker_log = [\n str(md_parser['github']['list']['ordered']['min_marker_number']) +\n list_marker\n for i in range(0, md_parser['github']['header']['max_levels'])\n ]\n\n elif parser == 'redcarpet':\n pass\n\n return list_marker_log\n",
"def compute_toc_line_indentation_spaces(\n header_type_curr: int = 1,\n header_type_prev: int = 0,\n no_of_indentation_spaces_prev: int = 0,\n parser: str = 'github',\n ordered: bool = False,\n list_marker: str = '-',\n list_marker_log: list = build_list_marker_log('github', '.'),\n index: int = 1) -> int:\n r\"\"\"Compute the number of indentation spaces for the TOC list element.\n\n :parameter header_type_curr: the current type of header (h[1-Inf]).\n Defaults to ``1``.\n :parameter header_type_prev: the previous type of header (h[1-Inf]).\n Defaults to ``0``.\n :parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.\n Defaults to ``0``.\n :parameter parser: decides rules on how compute indentations.\n Defaults to ``github``.\n :parameter ordered: if set to ``True``, numbers will be used\n as list ids or otherwise a dash character, otherwise.\n Defaults to ``False``.\n :parameter list_marker: a string that contains some of the first\n characters of the list element.\n Defaults to ``-``.\n :parameter list_marker_log: a data structure that holds list marker\n information for ordered lists.\n Defaults to ``build_list_marker_log('github', '.')``.\n :parameter index: a number that will be used as list id in case of an\n ordered table of contents. Defaults to ``1``.\n :type header_type_curr: int\n :type header_type_prev: int\n :type no_of_indentation_spaces_prev: int\n :type parser: str\n :type ordered: bool\n :type list_marker: str\n :type list_marker_log: list\n :type index: int\n :returns: no_of_indentation_spaces_curr, the number of indentation spaces\n for the list element.\n :rtype: int\n :raises: a built-in exception.\n\n .. note::\n Please note that this function\n assumes that no_of_indentation_spaces_prev contains the correct\n number of spaces.\n \"\"\"\n assert header_type_curr >= 1\n assert header_type_prev >= 0\n assert no_of_indentation_spaces_prev >= 0\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker' or parser == 'redcarpet'):\n if ordered:\n assert list_marker in md_parser[parser]['list']['ordered'][\n 'closing_markers']\n else:\n assert list_marker in md_parser[parser]['list']['unordered'][\n 'bullet_markers']\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n if ordered:\n assert len(\n list_marker_log) == md_parser['github']['header']['max_levels']\n for e in list_marker_log:\n assert isinstance(e, str)\n assert index >= 1\n\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n if header_type_prev == 0:\n # Base case for the first toc line.\n no_of_indentation_spaces_curr = 0\n elif header_type_curr == header_type_prev:\n # Base case for same indentation.\n no_of_indentation_spaces_curr = no_of_indentation_spaces_prev\n else:\n if ordered:\n list_marker_prev = str(list_marker_log[header_type_curr - 1])\n else:\n # list_marker for unordered lists will always be 1 character.\n list_marker_prev = list_marker\n\n # Generic cases.\n if header_type_curr > header_type_prev:\n # More indentation.\n no_of_indentation_spaces_curr = (\n no_of_indentation_spaces_prev + len(list_marker_prev) +\n len(' '))\n elif header_type_curr < header_type_prev:\n # Less indentation.\n no_of_indentation_spaces_curr = (\n no_of_indentation_spaces_prev -\n (len(list_marker_prev) + len(' ')))\n\n # Reset older nested list indices. If this is not performed then\n # future nested ordered lists will rely on incorrect data to\n # compute indentations.\n if ordered:\n for i in range((header_type_curr - 1) + 1,\n md_parser['github']['header']['max_levels']):\n list_marker_log[i] = str(\n md_parser['github']['list']['ordered']\n ['min_marker_number']) + list_marker\n\n # Update the data structure.\n if ordered:\n list_marker_log[header_type_curr - 1] = str(index) + list_marker\n\n elif parser == 'redcarpet':\n no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)\n\n return no_of_indentation_spaces_curr\n",
"def build_toc_line_without_indentation(header: dict,\n ordered: bool = False,\n no_links: bool = False,\n index: int = 1,\n parser: str = 'github',\n list_marker: str = '-') -> str:\n r\"\"\"Return a list element of the table of contents.\n\n :parameter header: a data structure that contains the original\n text, the trimmed text and the type of header.\n :parameter ordered: if set to ``True``, numbers will be used\n as list ids, otherwise a dash character. Defaults\n to ``False``.\n :parameter no_links: disables the use of links. Defaults to ``False``.\n :parameter index: a number that will be used as list id in case of an\n ordered table of contents. Defaults to ``1``.\n :parameter parser: decides rules on how compute indentations.\n Defaults to ``github``.\n :parameter list_marker: a string that contains some of the first\n characters of the list element. Defaults to ``-``.\n :type header: dict\n :type ordered: bool\n :type no_links: bool\n :type index: int\n :type parser: str\n :type list_marker: str\n :returns: toc_line_no_indent, a single line of the table of contents\n without indentation.\n :rtype: str\n :raises: a built-in exception.\n \"\"\"\n assert 'type' in header\n assert 'text_original' in header\n assert 'text_anchor_link' in header\n assert isinstance(header['type'], int)\n assert isinstance(header['text_original'], str)\n assert isinstance(header['text_anchor_link'], str)\n assert header['type'] >= 1\n assert index >= 1\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker' or parser == 'redcarpet'):\n if ordered:\n assert list_marker in md_parser[parser]['list']['ordered'][\n 'closing_markers']\n else:\n assert list_marker in md_parser[parser]['list']['unordered'][\n 'bullet_markers']\n\n toc_line_no_indent = str()\n\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker' or parser == 'redcarpet'):\n if ordered:\n list_marker = str(index) + list_marker\n\n # FIXME: is this always correct?\n if no_links:\n line = header['text_original']\n else:\n line = '[' + header['text_original'] + ']' + '(#' + header[\n 'text_anchor_link'] + ')'\n toc_line_no_indent = list_marker + ' ' + line\n\n return toc_line_no_indent\n",
"def is_opening_code_fence(line: str, parser: str = 'github'):\n r\"\"\"Determine if the given line is possibly the opening of a fenced code block.\n\n :parameter line: a single markdown line to evaluate.\n :parameter parser: decides rules on how to generate the anchor text.\n Defaults to ``github``.\n :type line: str\n :type parser: str\n :returns: None if the input line is not an opening code fence. Otherwise,\n returns the string which will identify the closing code fence\n according to the input parsers' rules.\n :rtype: typing.Optional[str]\n :raises: a built-in exception.\n \"\"\"\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n markers = md_parser['github']['code fence']['marker']\n marker_min_length = md_parser['github']['code fence'][\n 'min_marker_characters']\n\n if not is_valid_code_fence_indent(line):\n return None\n\n line = line.lstrip(' ').rstrip('\\n')\n if not line.startswith(\n (markers[0] * marker_min_length, markers[1] * marker_min_length)):\n return None\n\n if line == len(line) * line[0]:\n info_string = str()\n else:\n info_string = line.lstrip(line[0])\n # Backticks or tildes in info string are explicitly forbidden.\n if markers[0] in info_string or markers[1] in info_string:\n return None\n # Solves example 107. See:\n # https://github.github.com/gfm/#example-107\n if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:\n return None\n\n return line.rstrip(info_string)\n elif parser == 'redcarpet':\n # TODO.\n return None\n",
"def is_closing_code_fence(line: str,\n fence: str,\n is_document_end: bool = False,\n parser: str = 'github') -> bool:\n r\"\"\"Determine if the given line is the end of a fenced code block.\n\n :parameter line: a single markdown line to evaluate.\n :paramter fence: a sequence of backticks or tildes marking the start of\n the current code block. This is usually the return value of the\n is_opening_code_fence function.\n :parameter is_document_end: This variable tells the function that the\n end of the file is reached.\n Defaults to ``False``.\n :parameter parser: decides rules on how to generate the anchor text.\n Defaults to ``github``.\n :type line: str\n :type fence: str\n :type is_document_end: bool\n :type parser: str\n :returns: True if the line ends the current code block. False otherwise.\n :rtype: bool\n :raises: a built-in exception.\n \"\"\"\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n markers = md_parser['github']['code fence']['marker']\n marker_min_length = md_parser['github']['code fence'][\n 'min_marker_characters']\n\n if not is_valid_code_fence_indent(line):\n return False\n\n # Remove opening fence indentation after it is known to be valid.\n fence = fence.lstrip(' ')\n # Check if fence uses valid characters.\n if not fence.startswith((markers[0], markers[1])):\n return False\n\n if len(fence) < marker_min_length:\n return False\n\n # Additional security.\n fence = fence.rstrip('\\n').rstrip(' ')\n # Check that all fence characters are equal.\n if fence != len(fence) * fence[0]:\n return False\n\n # We might be inside a code block if this is not closed\n # by the end of the document, according to example 95 and 96.\n # This means that the end of the document corresponds to\n # a closing code fence.\n # Of course we first have to check that fence is a valid opening\n # code fence marker.\n # See:\n # https://github.github.com/gfm/#example-95\n # https://github.github.com/gfm/#example-96\n if is_document_end:\n return True\n\n # Check if line uses the same character as fence.\n line = line.lstrip(' ')\n if not line.startswith(fence):\n return False\n\n line = line.rstrip('\\n').rstrip(' ')\n # Solves example 93 and 94. See:\n # https://github.github.com/gfm/#example-93\n # https://github.github.com/gfm/#example-94\n if len(line) < len(fence):\n return False\n\n # Closing fence must not have alien characters.\n if line != len(line) * line[0]:\n return False\n\n return True\n elif parser == 'redcarpet':\n # TODO.\n return False\n",
"def build_indentation_list(parser: str = 'github'):\n r\"\"\"Create a data structure that holds the state of indentations.\n\n :parameter parser: decides the length of the list.\n Defaults to ``github``.\n :type parser: str\n :returns: indentation_list, a list that contains the state of\n indentations given a header type.\n :rtype: list\n :raises: a built-in exception.\n \"\"\"\n indentation_list = list()\n\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker' or parser == 'redcarpet'):\n for i in range(0, md_parser[parser]['header']['max_levels']):\n indentation_list.append(False)\n\n return indentation_list\n",
"def toc_renders_as_coherent_list(\n header_type_curr: int = 1,\n indentation_list: list = build_indentation_list('github'),\n parser: str = 'github') -> bool:\n r\"\"\"Check if the TOC will render as a working list.\n\n :parameter header_type_curr: the current type of header (h[1-Inf]).\n :parameter parser: decides rules on how to generate ordered list markers\n :type header_type_curr: int\n :type indentation_list: list\n :type parser: str\n :returns: renders_as_list\n :rtype: bool\n :raises: a built-in exception.\n \"\"\"\n assert header_type_curr >= 1\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker' or parser == 'redcarpet'):\n assert len(\n indentation_list) == md_parser[parser]['header']['max_levels']\n for e in indentation_list:\n assert isinstance(e, bool)\n\n renders_as_list = True\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker' or parser == 'redcarpet'):\n # Update with current information.\n indentation_list[header_type_curr - 1] = True\n\n # Reset next cells to False, as a detection mechanism.\n for i in range(header_type_curr,\n md_parser['github']['header']['max_levels']):\n indentation_list[i] = False\n\n # Check for previous False cells. If there is a \"hole\" in the list\n # it means that the TOC will have \"wrong\" indentation spaces, thus\n # either not rendering as an HTML list or not as the user intended.\n i = header_type_curr - 1\n while i >= 0 and indentation_list[i]:\n i -= 1\n if i >= 0:\n renders_as_list = False\n\n return renders_as_list\n"
] | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | build_multiple_tocs | python | def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct | r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L243-L291 | [
"def build_toc(filename: str,\n ordered: bool = False,\n no_links: bool = False,\n no_indentation: bool = False,\n no_list_coherence: bool = False,\n keep_header_levels: int = 3,\n parser: str = 'github',\n list_marker: str = '-') -> str:\n r\"\"\"Build the table of contents of a single file.\n\n :parameter filename: the file that needs to be read.\n :parameter ordered: decides whether to build an ordered list or not.\n Defaults to ``False``.\n :parameter no_links: disables the use of links.\n Defaults to ``False``.\n :parameter no_indentation: disables indentation in the list.\n Defaults to ``False``.\n :parameter keep_header_levels: the maximum level of headers to be\n considered as such when building the table of contents.\n Defaults to ``3``.\n :parameter parser: decides rules on how to generate anchor links.\n Defaults to ``github``.\n :type filename: str\n :type ordered: bool\n :type no_links: bool\n :type no_indentation: bool\n :type keep_header_levels: int\n :type parser: str\n :returns: toc, the corresponding table of contents of the file.\n :rtype: str\n :raises: a built-in exception.\n \"\"\"\n toc = str()\n header_type_counter = dict()\n header_type_curr = 0\n header_type_prev = 0\n header_duplicate_counter = dict()\n no_of_indentation_spaces_prev = 0\n if ordered:\n list_marker_log = build_list_marker_log(parser, list_marker)\n if filename == '-':\n f = sys.stdin\n else:\n f = open(filename, 'r')\n line = f.readline()\n if ordered:\n list_marker_log = build_list_marker_log(parser, list_marker)\n else:\n list_marker_log = list()\n is_within_code_fence = False\n code_fence = None\n is_document_end = False\n if not no_indentation and not no_list_coherence:\n # if indentation and list coherence.\n indentation_list = build_indentation_list(parser)\n while line:\n # Document ending detection.\n #\n # This changes the state of is_within_code_fence if the\n # file has no closing fence markers. This serves no practial\n # purpose since the code would run correctly anyway. It is\n # however more sematically correct.\n #\n # See the unit tests (examples 95 and 96 of the github parser)\n # and the is_closing_code_fence function.\n if filename != '-':\n # stdin is not seekable.\n file_pointer_pos = f.tell()\n if f.readline() == str():\n is_document_end = True\n f.seek(file_pointer_pos)\n\n # Code fence detection.\n if is_within_code_fence:\n is_within_code_fence = not is_closing_code_fence(\n line, code_fence, is_document_end, parser)\n line = f.readline()\n else:\n code_fence = is_opening_code_fence(line, parser)\n if code_fence is not None:\n # Update the status of the next line.\n is_within_code_fence = True\n line = f.readline()\n\n if not is_within_code_fence or code_fence is None:\n\n # Header detection and gathering.\n header = get_md_header(line, header_duplicate_counter,\n keep_header_levels, parser, no_links)\n if header is not None:\n header_type_curr = header['type']\n\n # Take care of the ordered TOC.\n if ordered:\n increase_index_ordered_list(header_type_counter,\n header_type_prev,\n header_type_curr, parser)\n index = header_type_counter[header_type_curr]\n else:\n index = 1\n\n # Take care of list indentations.\n if no_indentation:\n no_of_indentation_spaces_curr = 0\n # TOC list coherence checks are not necessary\n # without indentation.\n else:\n if not no_list_coherence:\n if not toc_renders_as_coherent_list(\n header_type_curr, indentation_list, parser):\n raise TocDoesNotRenderAsCoherentList\n no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(\n header_type_curr, header_type_prev,\n no_of_indentation_spaces_prev, parser, ordered,\n list_marker, list_marker_log, index)\n\n # Build a single TOC line.\n toc_line_no_indent = build_toc_line_without_indentation(\n header, ordered, no_links, index, parser, list_marker)\n\n # Save the TOC line with the indentation.\n toc += build_toc_line(toc_line_no_indent,\n no_of_indentation_spaces_curr) + '\\n'\n\n header_type_prev = header_type_curr\n no_of_indentation_spaces_prev = no_of_indentation_spaces_curr\n\n # endif\n\n # endif\n\n line = f.readline()\n\n # endwhile\n\n f.close()\n\n return toc\n"
] | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | increase_index_ordered_list | python | def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker | r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L294-L330 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | build_list_marker_log | python | def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log | r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L333-L368 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | compute_toc_line_indentation_spaces | python | def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr | r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L371-L483 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | build_toc_line_without_indentation | python | def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent | r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L486-L549 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | build_toc_line | python | def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line | r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L552-L570 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | build_anchor_link | python | def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage | r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L573-L665 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | get_atx_heading | python | def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line | r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L668-L840 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | get_md_header | python | def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header | r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L843-L889 | [
"def get_atx_heading(line: str,\n keep_header_levels: int = 3,\n parser: str = 'github',\n no_links: bool = False):\n r\"\"\"Given a line extract the link label and its type.\n\n :parameter line: the line to be examined.\n :parameter keep_header_levels: the maximum level of headers to be\n considered as such when building the table of contents.\n Defaults to ``3``.\n :parameter parser: decides rules on how to generate the anchor text.\n Defaults to ``github``.\n :parameter no_links: disables the use of links.\n :type line: str\n :type keep_header_levels: int\n :type parser: str\n :type np_links: bool\n :returns: None if the line does not contain header elements according to\n the rules of the selected markdown parser, or a tuple containing the\n header type and the trimmed header text, according to the selected\n parser rules, otherwise.\n :rtype: typing.Optional[tuple]\n :raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a\n built-in exception.\n \"\"\"\n assert keep_header_levels >= 1\n\n if len(line) == 0:\n return None\n\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n\n if line[0] == '\\u005c':\n return None\n\n i = 0\n while i < len(line) and line[i] == ' ' and i <= md_parser['github'][\n 'header']['max_space_indentation']:\n i += 1\n if i > md_parser['github']['header']['max_space_indentation']:\n return None\n\n offset = i\n while i < len(line) and line[i] == '#' and i <= md_parser['github'][\n 'header']['max_levels'] + offset:\n i += 1\n if i - offset > md_parser['github']['header'][\n 'max_levels'] or i - offset > keep_header_levels or i - offset == 0:\n return None\n current_headers = i - offset\n\n # Include special cases for line endings which should not be\n # discarded as non-ATX headers.\n if i < len(line) and (line[i] != ' ' and line[i] != '\\u000a'\n and line[i] != '\\u000d'):\n return None\n\n i += 1\n # Exclude leading whitespaces after the ATX header identifier.\n while i < len(line) and line[i] == ' ':\n i += 1\n\n # An algorithm to find the start and the end of the closing sequence.\n # The closing sequence includes all the significant part of the\n # string. This algorithm has a complexity of O(n) with n being the\n # length of the line.\n cs_start = i\n cs_end = cs_start\n line_prime = line[::-1]\n hash_char_rounds = 0\n go_on = True\n i = 0\n i_prev = i\n while i < len(line) - cs_start - 1 and go_on:\n if ((line_prime[i] != ' ' and line_prime[i] != '#')\n or hash_char_rounds > 1):\n if i > i_prev:\n cs_end = len(line_prime) - i_prev\n else:\n cs_end = len(line_prime) - i\n go_on = False\n while go_on and line_prime[i] == ' ':\n i += 1\n i_prev = i\n while go_on and line_prime[i] == '#':\n i += 1\n if i > i_prev:\n hash_char_rounds += 1\n\n # Instead of changing the whole algorithm to check for line\n # endings, this seems cleaner.\n find_newline = line.find('\\u000a')\n find_carriage_return = line.find('\\u000d')\n if find_newline != -1:\n cs_end = min(cs_end, find_newline)\n if find_carriage_return != -1:\n cs_end = min(cs_end, find_carriage_return)\n\n final_line = line[cs_start:cs_end]\n\n if not no_links:\n if len(final_line) > 0 and final_line[-1] == '\\u005c':\n final_line += ' '\n if len(\n final_line.strip('\\u0020').strip('\\u0009').strip('\\u000a').\n strip('\\u000b').strip('\\u000c').strip('\\u000d')) == 0:\n raise GithubEmptyLinkLabel\n if len(final_line\n ) > md_parser['github']['link']['max_chars_label']:\n raise GithubOverflowCharsLinkLabel\n # Escape square brackets if not already escaped.\n i = 0\n while i < len(final_line):\n if (final_line[i] == '[' or final_line[i] == ']'):\n j = i - 1\n consecutive_escape_characters = 0\n while j >= 0 and final_line[j] == '\\u005c':\n consecutive_escape_characters += 1\n j -= 1\n if ((consecutive_escape_characters > 0\n and consecutive_escape_characters % 2 == 0)\n or consecutive_escape_characters == 0):\n tmp = '\\u005c'\n else:\n tmp = str()\n final_line = final_line[0:i] + tmp + final_line[i:len(\n final_line)]\n i += 1 + len(tmp)\n else:\n i += 1\n\n elif parser == 'redcarpet':\n\n if line[0] != '#':\n return None\n\n i = 0\n while (i < len(line)\n and i < md_parser['redcarpet']['header']['max_levels']\n and line[i] == '#'):\n i += 1\n current_headers = i\n\n if i < len(line) and line[i] != ' ':\n return None\n\n while i < len(line) and line[i] == ' ':\n i += 1\n\n end = i\n while end < len(line) and line[end] != '\\n':\n end += 1\n\n while end > 0 and line[end - 1] == '#':\n end -= 1\n\n while end > 0 and line[end - 1] == ' ':\n end -= 1\n\n if end > i:\n final_line = line\n if not no_links and len(final_line) > 0 and final_line[-1] == '\\\\':\n final_line += ' '\n end += 1\n final_line = final_line[i:end]\n else:\n return None\n\n # TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,\n # TODO: check link label rules for redcarpet.\n\n return current_headers, final_line\n",
"def build_anchor_link(header_text_trimmed: str,\n header_duplicate_counter: str,\n parser: str = 'github') -> str:\n r\"\"\"Apply the specified slug rule to build the anchor link.\n\n :parameter header_text_trimmed: the text that needs to be transformed\n in a link.\n :parameter header_duplicate_counter: a data structure that keeps track of\n possible duplicate header links in order to avoid them. This is\n meaningful only for certain values of parser.\n :parameter parser: decides rules on how to generate anchor links.\n Defaults to ``github``.\n :type header_text_trimmed: str\n :type header_duplicate_counter: dict\n :type parser: str\n :returns: None if the specified parser is not recognized, or the anchor\n link, otherwise.\n :rtype: str\n :raises: a built-in exception.\n\n .. note::\n The licenses of each markdown parser algorithm are reported on\n the 'Markdown spec' documentation page.\n \"\"\"\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n header_text_trimmed = header_text_trimmed.lower()\n # Remove punctuation: Keep spaces, hypens and \"word characters\"\n # only.\n header_text_trimmed = re.sub(r'[^\\w\\s\\- ]', '', header_text_trimmed)\n header_text_trimmed = header_text_trimmed.replace(' ', '-')\n\n # Check for duplicates.\n ht = header_text_trimmed\n # Set the initial value if we are examining the first occurrency.\n # The state of header_duplicate_counter is available to the caller\n # functions.\n if header_text_trimmed not in header_duplicate_counter:\n header_duplicate_counter[header_text_trimmed] = 0\n if header_duplicate_counter[header_text_trimmed] > 0:\n header_text_trimmed = header_text_trimmed + '-' + str(\n header_duplicate_counter[header_text_trimmed])\n header_duplicate_counter[ht] += 1\n return header_text_trimmed\n elif parser == 'redcarpet':\n # To ensure full compatibility what follows is a direct translation\n # of the rndr_header_anchor C function used in redcarpet.\n STRIPPED = \" -&+$,/:;=?@\\\"#{}|^~[]`\\\\*()%.!'\"\n header_text_trimmed_len = len(header_text_trimmed)\n inserted = 0\n stripped = 0\n header_text_trimmed_middle_stage = ''\n for i in range(0, header_text_trimmed_len):\n if header_text_trimmed[i] == '<':\n while i < header_text_trimmed_len and header_text_trimmed[\n i] != '>':\n i += 1\n elif header_text_trimmed[i] == '&':\n while i < header_text_trimmed_len and header_text_trimmed[\n i] != ';':\n i += 1\n # str.find() == -1 if character is not found in str.\n # https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find\n elif not curses.ascii.isascii(\n header_text_trimmed[i]) or STRIPPED.find(\n header_text_trimmed[i]) != -1:\n if inserted and not stripped:\n header_text_trimmed_middle_stage += '-'\n stripped = 1\n else:\n header_text_trimmed_middle_stage += header_text_trimmed[\n i].lower()\n stripped = 0\n inserted += 1\n\n if stripped > 0 and inserted > 0:\n header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[\n 0:-1]\n\n if inserted == 0 and header_text_trimmed_len > 0:\n hash = 5381\n for i in range(0, header_text_trimmed_len):\n # Get the unicode representation with ord.\n # Unicode should be equal to ASCII in ASCII's range of\n # characters.\n hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])\n\n # This is equivalent to %x in C. In Python we don't have\n # the length problem so %x is equal to %lx in this case.\n # Apparently there is no %l in Python...\n header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)\n\n return header_text_trimmed_middle_stage\n"
] | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | is_valid_code_fence_indent | python | def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False | r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L892-L911 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | is_opening_code_fence | python | def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None | r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L914-L957 | [
"def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:\n r\"\"\"Determine if the given line has valid indentation for a code block fence.\n\n :parameter line: a single markdown line to evaluate.\n :parameter parser: decides rules on how to generate the anchor text.\n Defaults to ``github``.\n :type line: str\n :type parser: str\n :returns: True if the given line has valid indentation or False\n otherwise.\n :rtype: bool\n :raises: a built-in exception.\n \"\"\"\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n return len(line) - len(line.lstrip(\n ' ')) <= md_parser['github']['code fence']['min_marker_characters']\n elif parser == 'redcarpet':\n # TODO.\n return False\n"
] | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | is_closing_code_fence | python | def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False | r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L960-L1038 | [
"def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:\n r\"\"\"Determine if the given line has valid indentation for a code block fence.\n\n :parameter line: a single markdown line to evaluate.\n :parameter parser: decides rules on how to generate the anchor text.\n Defaults to ``github``.\n :type line: str\n :type parser: str\n :returns: True if the given line has valid indentation or False\n otherwise.\n :rtype: bool\n :raises: a built-in exception.\n \"\"\"\n if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'\n or parser == 'commonmarker'):\n return len(line) - len(line.lstrip(\n ' ')) <= md_parser['github']['code fence']['min_marker_characters']\n elif parser == 'redcarpet':\n # TODO.\n return False\n"
] | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | build_indentation_list | python | def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list | r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L1041-L1059 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list
if __name__ == '__main__':
pass
|
frnmst/md-toc | md_toc/api.py | toc_renders_as_coherent_list | python | def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list | r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception. | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L1062-L1105 | null | #
# api.py
#
# Copyright (C) 2017-2019 frnmst (Franco Masotti) <franco.masotti@live.com>
#
# This file is part of md-toc.
#
# md-toc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# md-toc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with md-toc. If not, see <http://www.gnu.org/licenses/>.
#
"""The main file."""
import fpyutils
import re
import curses.ascii
import sys
from .exceptions import (GithubOverflowCharsLinkLabel, GithubEmptyLinkLabel,
GithubOverflowOrderedListMarker,
StdinIsNotAFileToBeWritten,
TocDoesNotRenderAsCoherentList)
from .constants import common_defaults
from .constants import parser as md_parser
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False)
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list
if __name__ == '__main__':
pass
|
gabrielelanaro/chemview | chemview/gg.py | pairs | python | def pairs(a):
a = np.asarray(a)
return as_strided(a, shape=(a.size - 1, 2), strides=a.strides * 2) | Return array of pairs of adjacent elements in a.
>>> pairs([1, 2, 3, 4])
array([[1, 2],
[2, 3],
[3, 4]]) | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/gg.py#L240-L250 | null | """GGplot like interface"""
import uuid
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
from IPython.display import Image, display
from .utils import get_atom_color
from .widget import RepresentationViewer, TrajectoryControls
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def copy(self):
return type(self)(self)
class Aes(AttrDict):
def __init__(self, *args, **kwargs):
super(Aes, self).__init__(*args, **kwargs)
def __repr__(self):
return str(self.copy())
def updated(self, other):
copy = self.copy()
copy.update(other)
return copy
class ggview(object):
def __init__(self, aes=Aes()):
self.aes = aes
self.geometries = []
self.scales = []
def display(self):
# Generate primitives
aes = self.aes
# Apply scale that map data to aes
for scale in self.scales:
aes = scale.apply(aes)
primitives = []
for geometry in self.geometries:
primitives.extend(geometry.produce(aes))
# We generate a json description
rv = RepresentationViewer.from_scene({"representations" : primitives})
for scale in self.scales:
scale.render(rv)
if 'xyz' in self.aes:
rv.autozoom(self.aes.xyz)
return rv
def _ipython_display_(self):
rv = self.display()
return rv._ipython_display_()
def __add__(self, other):
if isinstance(other, Geom):
self.geometries.append(other)
elif isinstance(other, Scale):
self.scales.append(other)
else:
raise ValueError("Data type not understood {}".format(type(other)))
return self
class ggtraj(ggview):
def __init__(self, frames, aes=Aes()):
frame_aes = ggtraj._make_frame_aes(aes, 0)
super(ggtraj, self).__init__(frame_aes)
self.frames = frames
self.traj_aes = aes
self.update_funcs = []
def display(self):
# Generate primitives
aes = self.aes
# Apply scale that map data to aes
for scale in self.scales:
scale.render()
aes = scale.apply(aes)
primitives = []
for geometry in self.geometries:
prims = geometry.produce(aes)
primitives.extend(prims)
self.update_funcs.append((prims[0]["rep_id"], geometry.update))
rv = RepresentationViewer.from_scene({"representations" : primitives})
tc = TrajectoryControls(self.frames)
tc.on_frame_change(lambda frame, self=self, widget=rv: self.update(widget, frame))
# Add trajectory viewer too
display(rv)
display(tc)
return tc, rv
@staticmethod
def _make_frame_aes(aes, frame):
frame_aes = Aes()
# Make a copy
for k in aes.keys():
frame_aes[k] = aes[k]
# Override the traj ones
for k in aes.keys():
if k.endswith("_traj"):
frame_aes[k[:-5]] = aes[k][frame]
return frame_aes
def update(self, widget, frame):
for rep_id, func in self.update_funcs:
aes = ggtraj._make_frame_aes(self.traj_aes, frame)
for scale in self.scales:
aes = scale.apply(aes)
options = func(aes)
widget.update_representation(rep_id, options)
class Geom(object):
"""Base class for all geometric objects"""
def __init__(self, aes=Aes()):
self.aes = aes
def produce(self, aes=Aes()):
raise NotImplementedError()
def update(self, aes):
raise NotImplementedError()
class GeomPoints(Geom):
def produce(self, aes=Aes()):
# If an aes was passed, we override...
aes = aes.updated(self.aes)
# Return a dict of primitives produced from aes data
return [{
"rep_id" : uuid.uuid1().hex,
'rep_type': "points",
"options": { "coordinates": aes.xyz,
"colors": process_colors(len(aes.xyz), aes.get("colors", None)),
"sizes": process_sizes(len(aes.xyz), aes.get("sizes", 1)),
"visible": aes.get("visible", None) }
}]
def update(self, aes):
# we return options
return { "coordinates": aes.xyz,
"colors": process_colors(len(aes.xyz), aes.get("colors", None)),
"sizes": process_sizes(len(aes.xyz), aes.get("sizes", None)),
"visible": aes.get("visible", None) }
class GeomSpheres(Geom):
def produce(self, aes=Aes()):
# If an aes was passed, we override...
aes = aes.updated(self.aes)
# Return a dict of primitives produced from aes data
return [{
"rep_id" : uuid.uuid1().hex,
'rep_type': "spheres",
"options": { "coordinates": np.array(aes.xyz, dtype='float32'),
"colors": process_colors(len(aes.xyz), aes.get("colors", None)),
"radii": process_sizes(len(aes.xyz), aes.get("sizes", 1)),
"visible": aes.get("visible", None) }
}]
class GeomLines(Geom):
def produce(self, aes=Aes()):
# Return a dict of primitives produced from aes data
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
edges = np.array(aes.edges, 'uint32')
colors = process_colors(len(aes.edges), aes.get("colors", None))
return [{ "rep_id" : uuid.uuid1().hex,
'rep_type': "lines",
"options" : {
"startCoords": np.take(xyz, edges[:, 0], axis=0),
"endCoords": np.take(xyz, edges[:, 1], axis=0),
"startColors": colors,
"endColors": colors}
}]
class GeomCylinders(Geom):
def produce(self, aes=Aes()):
# Return a dict of primitives produced from aes data
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
edges = np.array(aes.edges, 'uint32')
colors = process_colors(len(edges), aes.get("colors", None))
return [{ "rep_id" : uuid.uuid1().hex,
'rep_type': "cylinders",
"options" : {
"startCoords": np.take(xyz, edges[:, 0], axis=0),
"endCoords": np.take(xyz, edges[:, 1], axis=0),
"colors": colors,
"radii": process_sizes(len(aes.edges), aes.get("sizes", None))}
}]
class GeomSurface(Geom):
def produce(self, aes=Aes()):
pass
from numpy.lib.stride_tricks import as_strided
def groupby_ix(a):
p = pairs(a)
diff_ix = np.nonzero(p[:, 0] != p[:, 1])[0]
starts_ix = np.append(np.insert(diff_ix + 1, 0, 0), a.shape[0])
return pairs(starts_ix)
class GeomProteinCartoon(Geom):
def __init__(self, aes=Aes(), cmap=None):
super(GeomProteinCartoon, self).__init__(aes)
self.cmap = cmap or {'H': 0xff0000, 'E':0x00ffff, 'C':0xffffff}
# It is necessary to have
# aes.xyz (Coordinates)
# aes.types (Atom types)
# aes.secondary (secondary structure)
def produce(self, aes=Aes()):
aes = aes.updated(self.aes)
# Check if secondary_id is present, if not we generate a reasonable one
if not 'secondary_id' in aes:
pairs_ = groupby_ix(aes.secondary_type)
secondary_id = np.zeros_like(aes.secondary_type, dtype='int')
for k, (i,j) in enumerate(pairs_):
secondary_id[i:j] = k + 1
aes['secondary_id'] = secondary_id
aes['types'] = np.array(aes.types)
primitives = []
for xyz, normals in zip(*self._extract_helix_coords_normals(aes)):
g_helices = GeomRibbon(Aes(xyz=xyz, normals=normals, resolution=32),
color=self.cmap.get('H', 0xffffff))
primitives.extend(g_helices.produce(Aes()))
for xyz, normals in zip(*self._extract_sheet_coords_normals(aes)):
g_sheets = GeomRibbon(Aes(xyz=xyz, normals=normals, resolution=32),
arrow=True, color=self.cmap.get('E', 0xffffff))
primitives.extend(g_sheets.produce(Aes()))
for xyz in self._extract_coil_coords(aes):
g_coils = GeomTube(Aes(xyz=xyz), color=self.cmap.get('C', 0xffffff))
primitives.extend(g_coils.produce(Aes()))
return primitives
def _extract_helix_coords_normals(self, aes):
# First, extract the helices from the secondary
groups_ix = groupby_ix(aes.secondary_id)
helices_ix = groups_ix[aes.secondary_type[groups_ix[:, 0]] == 'H']
backbone_list = [aes.xyz[aes.types == 'CA'][i:j] for i, j in helices_ix if j - i]
normals_list = [alpha_helix_normals(backbone) for backbone in backbone_list]
return backbone_list, normals_list
def _extract_sheet_coords_normals(self, aes):
groups_ix = groupby_ix(aes.secondary_id)
sheets_ix = groups_ix[aes.secondary_type[groups_ix[:, 0]] == 'E']
ca_list = [aes.xyz[aes.types == 'CA'][i:j] for i, j in sheets_ix if j - i]
c_list = [aes.xyz[aes.types == 'C'][i:j] for i, j in sheets_ix if j - i]
o_list = [aes.xyz[aes.types == 'O'][i:j] for i, j in sheets_ix if j - i]
normals_list = [beta_sheet_normals(ca, c, o) for ca, c, o in zip(ca_list, c_list, o_list)]
return ca_list, normals_list
def _extract_coil_coords(self, aes):
groups_ix = groupby_ix(aes.secondary_id)
coils_ix = groups_ix[aes.secondary_type[groups_ix[:, 0]] == 'C']
# We remove id = 0 because they are heteroatoms
coils_id = aes.secondary_id[coils_ix[:, 0]]
coils_ix = coils_ix[coils_id != 0, :]
coils_ix[:, 1] += 1
coils_ix[:, 0] -= 1
coils_ix[coils_ix > len(aes.secondary_type)] = len(aes.secondary_type)
coils_ix[coils_ix < 0] = 0
backbone_list = [aes.xyz[aes.types == 'CA'][i:j] for i, j in coils_ix]
return backbone_list
from chemview.utils import normalized, beta_sheet_normals, alpha_helix_normals
class GeomRibbon(Geom):
def __init__(self, aes=Aes(), color=0xffffff, width=0.2, arrow=False):
super(GeomRibbon, self).__init__(aes)
self.color = color
self.width = width
self.arrow = arrow
def produce(self, aes=Aes()):
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
normals = np.array(aes.normals)
return [{'rep_id': uuid.uuid1().hex,
'rep_type': 'ribbon',
'options': {
'coordinates': xyz,
'normals': normals,
'resolution': aes.get("resolution", 4),
'color': self.color,
'width': self.width,
'arrow': self.arrow
}}]
class GeomTube(Geom):
def __init__(self, aes=Aes(), color=0xffffff, radius=0.05, resolution=4):
super(GeomTube, self).__init__(aes)
self.color = color
self.radius = radius
self.resolution = 4
def produce(self, aes=Aes()):
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
return [{'rep_id': uuid.uuid1().hex,
'rep_type': 'smoothtube',
'options': {
'coordinates': xyz,
'resolution': self.resolution,
'color': self.color,
'radius': self.radius
}}]
class Scale(object):
pass
class ScaleColorsGradient(Scale):
property = "colors"
def __init__(self, limits=None, palette="YlGnBu"):
self.limits = limits
self.palette = palette
def apply(self, aes):
aes = aes.copy()
colors = process_colors(len(aes.xyz), aes.get("colors", None), self.limits, self.palette)
aes.colors = colors
return aes
def render(self, widget):
import matplotlib as mpl
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.get_cmap(self.palette)
norm = mpl.colors.Normalize(vmin=self.limits[0], vmax=self.limits[1])
# Let's say we give 5 typical values
values = np.linspace(self.limits[0], self.limits[1], 5)
colors = [rgbfloat_to_hex(cmap(norm(v))) for v in values]
values = ["%.2f" % v for v in values]
widget._remote_call('addColorScale', colors=colors, values=values)
def rgbint_to_hex(rgb):
return (rgb[0] << 16) | (rgb[1] << 8) | rgb[2]
def rgbfloat_to_hex(rgb):
return (int(rgb[0] * 255) << 16) | (int(rgb[1]*255) << 8) | int(rgb[2] * 255)
def process_colors(size, colors, limits=None, palette="YlGnBu", cmap=None):
if colors is None:
return [0xffffff] * size
elif isinstance(colors, int):
return [colors] * size
elif isinstance(colors, list) and len(colors) == 0:
return [0xffffff] * size
elif isinstance(colors, list) and isinstance(colors[0], (str, bytes)):
return [get_atom_color(c) for c in colors]
elif isinstance(colors, list) and isinstance(colors[0], (int, np.int32, np.int64, np.int16)):
# We cast to 32 bit
return [int(c) for c in colors]
elif isinstance(colors, np.ndarray):
return process_colors(size, colors.tolist(), limits, palette)
elif isinstance(colors, list) and isinstance(colors[0], (float, np.float32, np.float64)):
if limits is None:
vmin = min(colors)
vmax = max(colors)
else:
vmin, vmax = limits
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cmap = cm.get_cmap(palette)
m = cm.ScalarMappable(norm=norm, cmap=cmap)
return [rgbint_to_hex(c) for c in m.to_rgba(colors, bytes=True)[:, :3]]
else:
raise ValueError("Wrong color format : {}".format(type(colors)))
def process_sizes(size, sizes):
if sizes is None:
return [1.0] * size
if isinstance(sizes, (float, int)):
return [sizes] * size
elif isinstance(sizes, list) and len(sizes) == 0:
return [1.0] * size
elif isinstance(sizes, list) and isinstance(sizes[0], (int, float)):
return sizes
else:
raise ValueError("Wrong sizes format")
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.points | python | def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates) | Display the system as points.
:param float size: the size of the points. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L29-L56 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.labels | python | def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update) | Display atomic labels for the system | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L58-L78 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.remove_labels | python | def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id) | Remove all atomic labels from the system | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L80-L84 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.toggle_axes | python | def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep] | Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L86-L148 | [
"def defaults(pdict,keys,default,length=3,instance=(int,float)):\n '''Helper function to generate default values and handle errors'''\n for k in keys:\n val=pdict.get(k)\n if val!=None:\n break\n if val==None:\n val=default\n elif isinstance(val,instance) and length>1:\n val = [val]*length\n elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:\n if not all([isinstance(v,instance) for v in val]):\n raise RuntimeError(\"Invalid type {t} for parameter {p}. Use {i}.\".format(t=type(val),p=val,i=instance))\n elif not isinstance(val,instance):\n raise RuntimeError(\"Invalid type {t} for parameter {p}. Use {i}.\".format(t=type(val),p=val,i=instance))\n return val\n"
] | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.lines | python | def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates) | Display the system bonds as lines. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L151-L176 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.wireframe | python | def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines() | Display atoms as points of size *pointsize* and bonds as lines. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L178-L181 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.ball_and_sticks | python | def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates) | Display the system using a ball and stick representation. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L183-L229 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.line_ribbon | python | def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates) | Display the protein secondary structure as a white lines that passes through the
backbone chain. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L231-L245 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.cylinder_and_strand | python | def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates) | Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L247-L310 | null | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.cartoon | python | def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates) | Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white) | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L312-L335 | [
"def produce(self, aes=Aes()):\n aes = aes.updated(self.aes)\n\n # Check if secondary_id is present, if not we generate a reasonable one\n if not 'secondary_id' in aes:\n pairs_ = groupby_ix(aes.secondary_type)\n secondary_id = np.zeros_like(aes.secondary_type, dtype='int')\n for k, (i,j) in enumerate(pairs_):\n secondary_id[i:j] = k + 1\n\n aes['secondary_id'] = secondary_id\n aes['types'] = np.array(aes.types)\n primitives = []\n\n for xyz, normals in zip(*self._extract_helix_coords_normals(aes)):\n g_helices = GeomRibbon(Aes(xyz=xyz, normals=normals, resolution=32), \n color=self.cmap.get('H', 0xffffff))\n primitives.extend(g_helices.produce(Aes()))\n\n for xyz, normals in zip(*self._extract_sheet_coords_normals(aes)):\n g_sheets = GeomRibbon(Aes(xyz=xyz, normals=normals, resolution=32), \n arrow=True, color=self.cmap.get('E', 0xffffff))\n primitives.extend(g_sheets.produce(Aes()))\n\n for xyz in self._extract_coil_coords(aes):\n g_coils = GeomTube(Aes(xyz=xyz), color=self.cmap.get('C', 0xffffff))\n primitives.extend(g_coils.produce(Aes()))\n\n return primitives\n"
] | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.add_isosurface | python | def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts) | Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L340-L393 | [
"def marching_cubes(field, isolevel):\n # The field is like gridpoints, and gridpoints define cubes.\n triangles = []\n # Here we determine the value for each field\n for i in range(field.shape[0] - 1):\n for j in range(field.shape[1] - 1):\n for k in range(field.shape[2] - 1):\n\n points = {\n 0: (i, j, k),\n 1: (i, j+1, k),\n 2: (i+1, j+1, k),\n 3: (i+1, j, k),\n 4: (i, j, k+1),\n 5: (i, j+1, k+1),\n 6: (i+1, j+1, k+1),\n 7: (i+1, j, k+1)\n }\n # Which kind of cube is this guy?\n cube_index = 0\n if field[points[0]] < isolevel: cube_index |= 1\n if field[points[1]] < isolevel: cube_index |= 2\n if field[points[2]] < isolevel: cube_index |= 4\n if field[points[3]] < isolevel: cube_index |= 8\n if field[points[4]] < isolevel: cube_index |= 16\n if field[points[5]] < isolevel: cube_index |= 32\n if field[points[6]] < isolevel: cube_index |= 64\n if field[points[7]] < isolevel: cube_index |= 128\n\n # Get the faces from the cube\n\n for e1,e2,e3 in tris_as_edges[cube_index]:\n # Get the three interpolated points on the edges\n triangle = []\n for edge in (e1, e2, e3):\n s, e = edge2pts[edge]\n p1, p2 = np.array(points[e], 'f'), np.array(points[s], 'f')\n v = interpolate_edge_coordinates(p1, field[points[e]],\n p2, field[points[s]],\n isolevel)\n triangle.append(v)\n triangles.append(triangle)\n triangles_ = np.array(triangles) # triangles_ NUMBA BUG\n\n if len(triangles) == 0:\n return np.array([])\n\n # TODO Let's just invert for now, but no one knows what the problem is\n triangles_[:, :, [0, 1]] = triangles_[:, :, [1, 0]]\n return triangles_\n"
] | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
"""
Add an isosurface to current scence using pre-computed data on a grid
"""
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/viewer.py | MolecularViewer.add_isosurface_grid_data | python | def add_isosurface_grid_data(self, data, origin, extent, resolution,
isolevel=0.3, scale=10,
style="wireframe", color=0xffffff):
spacing = np.array(extent/resolution)/scale
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts) | Add an isosurface to current scence using pre-computed data on a grid | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L395-L418 | [
"def marching_cubes(field, isolevel):\n # The field is like gridpoints, and gridpoints define cubes.\n triangles = []\n # Here we determine the value for each field\n for i in range(field.shape[0] - 1):\n for j in range(field.shape[1] - 1):\n for k in range(field.shape[2] - 1):\n\n points = {\n 0: (i, j, k),\n 1: (i, j+1, k),\n 2: (i+1, j+1, k),\n 3: (i+1, j, k),\n 4: (i, j, k+1),\n 5: (i, j+1, k+1),\n 6: (i+1, j+1, k+1),\n 7: (i+1, j, k+1)\n }\n # Which kind of cube is this guy?\n cube_index = 0\n if field[points[0]] < isolevel: cube_index |= 1\n if field[points[1]] < isolevel: cube_index |= 2\n if field[points[2]] < isolevel: cube_index |= 4\n if field[points[3]] < isolevel: cube_index |= 8\n if field[points[4]] < isolevel: cube_index |= 16\n if field[points[5]] < isolevel: cube_index |= 32\n if field[points[6]] < isolevel: cube_index |= 64\n if field[points[7]] < isolevel: cube_index |= 128\n\n # Get the faces from the cube\n\n for e1,e2,e3 in tris_as_edges[cube_index]:\n # Get the three interpolated points on the edges\n triangle = []\n for edge in (e1, e2, e3):\n s, e = edge2pts[edge]\n p1, p2 = np.array(points[e], 'f'), np.array(points[s], 'f')\n v = interpolate_edge_coordinates(p1, field[points[e]],\n p2, field[points[s]],\n isolevel)\n triangle.append(v)\n triangles.append(triangle)\n triangles_ = np.array(triangles) # triangles_ NUMBA BUG\n\n if len(triangles) == 0:\n return np.array([])\n\n # TODO Let's just invert for now, but no one knows what the problem is\n triangles_[:, :, [0, 1]] = triangles_[:, :, [1, 0]]\n return triangles_\n"
] | class MolecularViewer(RepresentationViewer):
coordinates = Any()
def __init__(self, coordinates, topology, width=500, height=500):
'''Create a Molecular Viewer widget to be displayed in IPython notebook.
:param np.ndarray coordinates: A numpy array containing the 3D coordinates of the atoms to be displayed
:param dict topology: A dict specifying the topology as described in the User Guide.
'''
super(MolecularViewer, self).__init__(width, height)
self.update_callbacks = []
self.coordinates = coordinates.astype('float32')
self.topology = topology
self._axes_reps = []
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0):
"""Display the system as points.
:param float size: the size of the points.
"""
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
if highlight is not None:
if isinstance(highlight, int):
colorlist[highlight] = 0xff0000
if isinstance(highlight, (list, np.ndarray)):
for i in highlight:
colorlist[i] = 0xff0000
sizes = [size] * len(self.topology['atom_types'])
points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'sizes': sizes,
'opacity': opacity})
# Update closure
def update(self=self, points=points):
self.update_representation(points, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0):
'''Display atomic labels for the system'''
if coordinates is None:
coordinates=self.coordinates
l=len(coordinates)
if text is None:
if len(self.topology.get('atom_types'))==l:
text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)]
else:
text=[str(i+1) for i in range(l)]
text_representation = self.add_representation('text', {'coordinates': coordinates,
'text': text,
'colors': colorlist,
'sizes': sizes,
'fonts': fonts,
'opacity': opacity})
def update(self=self, text_representation=text_representation):
self.update_representation(text_representation, {'coordinates': coordinates})
self.update_callbacks.append(update)
def remove_labels(self):
'''Remove all atomic labels from the system'''
for rep_id in self.representations.keys():
if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps:
self.remove_representation(rep_id)
def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep]
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def wireframe(self, pointsize=0.2, opacity=1.0):
'''Display atoms as points of size *pointsize* and bonds as lines.'''
self.points(pointsize, opacity=opacity)
self.lines()
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0):
"""Display the system using a ball and stick representation.
"""
# Add the spheres
if colorlist is None:
colorlist = [get_atom_color(t) for t in self.topology['atom_types']]
sizes = [ball_radius] * len(self.topology['atom_types'])
spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'),
'colors': colorlist,
'radii': sizes,
'opacity': opacity})
def update(self=self, spheres=spheres):
self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')})
self.update_callbacks.append(update)
# Add the cylinders
if 'bonds' in self.topology and self.topology['bonds'] is not None:
start_idx, end_idx = zip(*self.topology['bonds'])
# Added this so bonds don't go through atoms when opacity<1.0
new_start_coords = []
new_end_coords = []
for bond_ind, bond in enumerate(self.topology['bonds']):
trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0
start_coord = self.coordinates[bond[0]]
end_coord = self.coordinates[bond[1]]
vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord)
new_start_coords.append(start_coord+vec*trim_amt)
new_end_coords.append(end_coord-vec*trim_amt)
cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'),
'endCoords': np.array(new_end_coords,dtype='float32'),
'colors': [0xcccccc] * len(new_start_coords),
'radii': [stick_radius] * len(new_start_coords),
'opacity': opacity})
# Update closure
def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx):
self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def line_ribbon(self):
'''Display the protein secondary structure as a white lines that passes through the
backbone chain.
'''
# Control points are the CA (C alphas)
backbone = np.array(self.topology['atom_names']) == 'CA'
smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone],
'color': 0xffffff})
def update(self=self, smoothline=smoothline):
self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cylinder_and_strand(self):
'''Display the protein secondary structure as a white,
solid tube and the alpha-helices as yellow cylinders.
'''
top = self.topology
# We build a mini-state machine to find the
# start end of helices and such
in_helix = False
helices_starts = []
helices_ends = []
coils = []
coil = []
for i, typ in enumerate(top['secondary_structure']):
if typ == 'H':
if in_helix == False:
# We become helices
helices_starts.append(top['residue_indices'][i][0])
in_helix = True
# We end the previous coil
coil.append(top['residue_indices'][i][0])
else:
if in_helix == True:
# We stop being helices
helices_ends.append(top['residue_indices'][i][0])
# We start a new coil
coil = []
coils.append(coil)
in_helix = False
# We add control points
coil.append(top['residue_indices'][i][0])
[coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA']
# We add the coils
coil_representations = []
for control_points in coils:
rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points],
'radius': 0.05,
'resolution': 4,
'color': 0xffffff})
coil_representations.append(rid)
start_idx, end_idx = helices_starts, helices_ends
cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)],
'colors': [0xffff00] * len(self.coordinates),
'radii': [0.15] * len(self.coordinates)})
def update(self=self, cylinders=cylinders, coils=coils,
coil_representations=coil_representations,
start_idx=start_idx, end_idx=end_idx, control_points=control_points):
for i, control_points in enumerate(coils):
rid = self.update_representation(coil_representations[i],
{'coordinates': self.coordinates[control_points]})
self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)],
'endCoords': self.coordinates[list(end_idx)]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def cartoon(self, cmap=None):
'''Display a protein secondary structure as a pymol-like cartoon representation.
:param cmap: is a dictionary that maps the secondary type
(H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
'''
# Parse secondary structure
top = self.topology
geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates,
types=top['atom_names'],
secondary_type=top['secondary_structure']),
cmap=cmap)
primitives = geom.produce(gg.Aes())
ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives]
def update(self=self, geom=geom, ids=ids):
primitives = geom.produce(gg.Aes(xyz=self.coordinates))
[self.update_representation(id_, rep_options)
for id_, rep_options in zip(ids, primitives)]
self.update_callbacks.append(update)
self.autozoom(self.coordinates)
def _coordinates_changed(self, name, old, new):
[c() for c in self.update_callbacks]
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff):
'''Add an isosurface to the current scene.
:param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple
functions that involve standard arithmetic operations and functions
such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first
pass the function through ``numpy.vectorize``.\
Example: ``mv.add_isosurface(np.vectorize(f))``
:param float isolevel: The value for which the function should be constant.
:param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance.
:param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``.
:param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
'''
avail_styles = ['wireframe', 'solid', 'transparent']
if style not in avail_styles:
raise ValueError('style must be in ' + str(avail_styles))
# We want to make a container that contains the whole molecule
# and surface
area_min = self.coordinates.min(axis=0) - 0.2
area_max = self.coordinates.max(axis=0) + 0.2
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
spacing = np.array((area_max - area_min)/resolution)
if isolevel >= 0:
triangles = marching_cubes(function(xv, yv, zv), isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-function(xv, yv, zv), -isolevel)
if len(triangles) == 0:
## NO surface
return
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = area_min + spacing/2 + np.array(verts)*spacing
rep_id = self.add_representation('surface', {'verts': verts.astype('float32'),
'faces': faces.astype('int32'),
'style': style,
'color': color})
self.autozoom(verts)
|
gabrielelanaro/chemview | chemview/marchingcubes.py | isosurface_from_data | python | def isosurface_from_data(data, isolevel, origin, spacing):
spacing = np.array(extent/resolution)
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
return verts, faces | Small wrapper to get directly vertices and faces to feed into programs | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/marchingcubes.py#L28-L44 | [
"def marching_cubes(field, isolevel):\n # The field is like gridpoints, and gridpoints define cubes.\n triangles = []\n # Here we determine the value for each field\n for i in range(field.shape[0] - 1):\n for j in range(field.shape[1] - 1):\n for k in range(field.shape[2] - 1):\n\n points = {\n 0: (i, j, k),\n 1: (i, j+1, k),\n 2: (i+1, j+1, k),\n 3: (i+1, j, k),\n 4: (i, j, k+1),\n 5: (i, j+1, k+1),\n 6: (i+1, j+1, k+1),\n 7: (i+1, j, k+1)\n }\n # Which kind of cube is this guy?\n cube_index = 0\n if field[points[0]] < isolevel: cube_index |= 1\n if field[points[1]] < isolevel: cube_index |= 2\n if field[points[2]] < isolevel: cube_index |= 4\n if field[points[3]] < isolevel: cube_index |= 8\n if field[points[4]] < isolevel: cube_index |= 16\n if field[points[5]] < isolevel: cube_index |= 32\n if field[points[6]] < isolevel: cube_index |= 64\n if field[points[7]] < isolevel: cube_index |= 128\n\n # Get the faces from the cube\n\n for e1,e2,e3 in tris_as_edges[cube_index]:\n # Get the three interpolated points on the edges\n triangle = []\n for edge in (e1, e2, e3):\n s, e = edge2pts[edge]\n p1, p2 = np.array(points[e], 'f'), np.array(points[s], 'f')\n v = interpolate_edge_coordinates(p1, field[points[e]],\n p2, field[points[s]],\n isolevel)\n triangle.append(v)\n triangles.append(triangle)\n triangles_ = np.array(triangles) # triangles_ NUMBA BUG\n\n if len(triangles) == 0:\n return np.array([])\n\n # TODO Let's just invert for now, but no one knows what the problem is\n triangles_[:, :, [0, 1]] = triangles_[:, :, [1, 0]]\n return triangles_\n"
] | # A marching cube test
import numpy as np
try:
import numba as nb
numba_present = True
except ImportError:
import warnings
warnings.warn(
"Numba not installed. The isosurface generation will be extremely slow",
ImportWarning)
numba_present = False
def isosurface_from_function(function, extents=[[-1, 1], [-1, 1], [-1, 1]],
isolevel=0.3, resolution=32):
area_min, area_max = np.array(extents).T
spacing = (area_max - area_min)/resolution
x = np.linspace(area_min[0], area_max[0], resolution)
y = np.linspace(area_min[1], area_max[1], resolution)
z = np.linspace(area_min[2], area_max[2], resolution)
xv, yv, zv = np.meshgrid(x, y, z)
return isosurface_from_data(function(xv, yv, zv), isolevel,
area_min, spacing)
def isosurface_from_data(data, isolevel, origin, spacing):
"""Small wrapper to get directly vertices and faces to feed into programs
"""
spacing = np.array(extent/resolution)
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else: # Wrong traingle unwinding roder -- god only knows why
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
return verts, faces
def marching_cubes(field, isolevel):
# The field is like gridpoints, and gridpoints define cubes.
triangles = []
# Here we determine the value for each field
for i in range(field.shape[0] - 1):
for j in range(field.shape[1] - 1):
for k in range(field.shape[2] - 1):
points = {
0: (i, j, k),
1: (i, j+1, k),
2: (i+1, j+1, k),
3: (i+1, j, k),
4: (i, j, k+1),
5: (i, j+1, k+1),
6: (i+1, j+1, k+1),
7: (i+1, j, k+1)
}
# Which kind of cube is this guy?
cube_index = 0
if field[points[0]] < isolevel: cube_index |= 1
if field[points[1]] < isolevel: cube_index |= 2
if field[points[2]] < isolevel: cube_index |= 4
if field[points[3]] < isolevel: cube_index |= 8
if field[points[4]] < isolevel: cube_index |= 16
if field[points[5]] < isolevel: cube_index |= 32
if field[points[6]] < isolevel: cube_index |= 64
if field[points[7]] < isolevel: cube_index |= 128
# Get the faces from the cube
for e1,e2,e3 in tris_as_edges[cube_index]:
# Get the three interpolated points on the edges
triangle = []
for edge in (e1, e2, e3):
s, e = edge2pts[edge]
p1, p2 = np.array(points[e], 'f'), np.array(points[s], 'f')
v = interpolate_edge_coordinates(p1, field[points[e]],
p2, field[points[s]],
isolevel)
triangle.append(v)
triangles.append(triangle)
triangles_ = np.array(triangles) # triangles_ NUMBA BUG
if len(triangles) == 0:
return np.array([])
# TODO Let's just invert for now, but no one knows what the problem is
triangles_[:, :, [0, 1]] = triangles_[:, :, [1, 0]]
return triangles_
def interpolate_edge_coordinates(point1, value1, point2, value2, isolevel):
return point1 + (isolevel - value1) * (point2 - point1)/(value2 - value1)
if numba_present:
# marching_cubes = nb.jit('pyobject(f4[:, :, :], f4)')(marching_cubes)
# marching_cubes = nb.jit()(marching_cubes)
interpolate_edge_coordinates = nb.jit(interpolate_edge_coordinates)
# Returns the vertices that make up an edge
edge2pts = [(0,1),(1,2),(2,3),(3,0),(4,5),(5,6),(6,7),(7,4),
(0,4),(1,5),(2,6),(3,7)]
# For a given case (0-255), this array yields the corresponding
# triangles, given as a tuple of three edges
tris_as_edges = [
[],
[(0, 8, 3)],
[(0, 1, 9)],
[(1, 8, 3), (9, 8, 1)],
[(1, 2, 10)],
[(0, 8, 3), (1, 2, 10)],
[(10, 9, 0), (0, 2, 10)],
[(2, 8, 3), (2, 10, 8), (10, 9, 8)],
[(3, 11, 2)],
[(2, 0, 8), (8, 11, 2)],
[(1, 9, 0), (2, 3, 11)],
[(1, 11, 2), (1, 9, 11), (9, 8, 11)],
[(3, 11, 1), (1, 11, 10)],
[(0, 10, 1), (0, 8, 10), (11, 10, 8)],
[(3, 9, 0), (3, 11, 9), (11, 10, 9)],
[(9, 8, 11), (10, 9, 11)],
[(4, 7, 8)],
[(4, 3, 0), (7, 3, 4)],
[(0, 1, 9), (8, 4, 7)],
[(4, 1, 9), (4, 7, 1), (7, 3, 1)],
[(1, 2, 10), (8, 4, 7)],
[(3, 4, 7), (3, 0, 4), (1, 2, 10)],
[(9, 2, 10), (9, 0, 2), (8, 4, 7)],
[(2, 10, 9), (2, 9, 7), (2, 7, 3), (7, 9, 4)],
[(8, 4, 7), (3, 11, 2)],
[(11, 4, 7), (11, 2, 4), (2, 0, 4)],
[(9, 0, 1), (8, 4, 7), (2, 3, 11)],
[(4, 7, 11), (9, 4, 11), (9, 11, 2), (2, 1, 9)],
[(3, 11, 10), (10, 1, 3), (7, 8, 4)],
[(11, 10, 1), (4, 11, 1), (7, 11, 4), (4, 1, 0)],
[(4, 7, 8), (3, 11, 9), (9, 11, 10), (9, 0, 3)],
[(4, 7, 11), (4, 11, 9), (9, 11, 10)],
[(9, 5, 4)],
[(9, 5, 4), (0, 8, 3)],
[(0, 5, 4), (1, 5, 0)],
[(8, 5, 4), (8, 3, 5), (3, 1, 5)],
[(1, 2, 10), (9, 5, 4)],
[(3, 0, 8), (1, 2, 10), (4, 9, 5)],
[(5, 2, 10), (5, 4, 2), (4, 0, 2)],
[(2, 10, 5), (3, 2, 5), (3, 5, 4), (3, 4, 8)],
[(9, 5, 4), (2, 3, 11)],
[(0, 11, 2), (0, 8, 11), (4, 9, 5)],
[(0, 5, 4), (0, 1, 5), (2, 3, 11)],
[(2, 1, 5), (2, 5, 8), (2, 8, 11), (4, 8, 5)],
[(10, 3, 11), (10, 1, 3), (9, 5, 4)],
[(4, 9, 5), (0, 8, 1), (8, 10, 1), (8, 11, 10)],
[(5, 4, 0), (5, 0, 11), (5, 11, 10), (11, 0, 3)],
[(5, 4, 8), (5, 8, 10), (10, 8, 11)],
[(9, 7, 8), (5, 7, 9)],
[(9, 3, 0), (9, 5, 3), (5, 7, 3)],
[(0, 7, 8), (0, 1, 7), (1, 5, 7)],
[(1, 5, 3), (3, 5, 7)],
[(9, 7, 8), (9, 5, 7), (10, 1, 2)],
[(10, 1, 2), (9, 5, 0), (5, 3, 0), (5, 7, 3)],
[(8, 0, 2), (8, 2, 5), (8, 5, 7), (10, 5, 2)],
[(2, 10, 5), (2, 5, 3), (3, 5, 7)],
[(7, 9, 5), (7, 8, 9), (3, 11, 2)],
[(9, 5, 7), (9, 7, 2), (9, 2, 0), (2, 7, 11)],
[(2, 3, 11), (0, 1, 8), (1, 7, 8), (1, 5, 7)],
[(11, 2, 1), (11, 1, 7), (7, 1, 5)],
[(9, 5, 8), (8, 5, 7), (10, 1, 3), (10, 3, 11)],
[(5, 7, 0), (5, 0, 9), (7, 11, 0), (1, 0, 10), (11, 10, 0)],
[(11, 10, 0), (11, 0, 3), (10, 5, 0), (8, 0, 7), (5, 7, 0)],
[(5, 11, 10), (11, 5, 7)],
[(10, 6, 5)],
[(0, 8, 3), (6, 5, 10)],
[(9, 0, 1), (5, 10, 6)],
[(1, 8, 3), (1, 9, 8), (5, 10, 6)],
[(1, 6, 5), (2, 6, 1)],
[(1, 6, 5), (1, 2, 6), (3, 0, 8)],
[(9, 6, 5), (9, 0, 6), (0, 2, 6)],
[(5, 9, 8), (5, 8, 2), (5, 2, 6), (3, 2, 8)],
[(2, 3, 11), (10, 6, 5)],
[(11, 0, 8), (11, 2, 0), (10, 6, 5)],
[(0, 1, 9), (2, 3, 11), (5, 10, 6)],
[(5, 10, 6), (1, 9, 2), (9, 11, 2), (9, 8, 11)],
[(6, 3, 11), (6, 5, 3), (5, 1, 3)],
[(0, 8, 11), (0, 11, 5), (0, 5, 1), (5, 11, 6)],
[(3, 11, 6), (0, 3, 6), (0, 6, 5), (0, 5, 9)],
[(6, 5, 9), (6, 9, 11), (11, 9, 8)],
[(5, 10, 6), (4, 7, 8)],
[(4, 3, 0), (4, 7, 3), (6, 5, 10)],
[(1, 9, 0), (5, 10, 6), (8, 4, 7)],
[(10, 6, 5), (1, 9, 7), (1, 7, 3), (7, 9, 4)],
[(6, 1, 2), (6, 5, 1), (4, 7, 8)],
[(1, 2, 5), (5, 2, 6), (3, 0, 4), (3, 4, 7)],
[(8, 4, 7), (9, 0, 5), (0, 6, 5), (0, 2, 6)],
[(7, 3, 9), (7, 9, 4), (3, 2, 9), (5, 9, 6), (2, 6, 9)],
[(3, 11, 2), (7, 8, 4), (10, 6, 5)],
[(5, 10, 6), (4, 7, 2), (4, 2, 0), (2, 7, 11)],
[(0, 1, 9), (4, 7, 8), (2, 3, 11), (5, 10, 6)],
[(9, 2, 1), (9, 11, 2), (9, 4, 11), (7, 11, 4), (5, 10, 6)],
[(8, 4, 7), (3, 11, 5), (3, 5, 1), (5, 11, 6)],
[(5, 1, 11), (5, 11, 6), (1, 0, 11), (7, 11, 4), (0, 4, 11)],
[(0, 5, 9), (0, 6, 5), (0, 3, 6), (11, 6, 3), (8, 4, 7)],
[(6, 5, 9), (6, 9, 11), (4, 7, 9), (7, 11, 9)],
[(6, 4, 9), (9, 10, 6)],
[(4, 10, 6), (4, 9, 10), (0, 8, 3)],
[(10, 0, 1), (10, 6, 0), (6, 4, 0)],
[(8, 3, 1), (8, 1, 6), (8, 6, 4), (6, 1, 10)],
[(1, 4, 9), (1, 2, 4), (2, 6, 4)],
[(3, 0, 8), (1, 2, 9), (2, 4, 9), (2, 6, 4)],
[(0, 2, 4), (4, 2, 6)],
[(8, 3, 2), (8, 2, 4), (4, 2, 6)],
[(10, 4, 9), (10, 6, 4), (11, 2, 3)],
[(0, 8, 2), (2, 8, 11), (4, 9, 10), (4, 10, 6)],
[(3, 11, 2), (0, 1, 6), (0, 6, 4), (6, 1, 10)],
[(6, 4, 1), (6, 1, 10), (4, 8, 1), (2, 1, 11), (8, 11, 1)],
[(9, 6, 4), (9, 3, 6), (9, 1, 3), (11, 6, 3)],
[(8, 11, 1), (8, 1, 0), (11, 6, 1), (9, 1, 4), (6, 4, 1)],
[(3, 11, 6), (3, 6, 0), (0, 6, 4)],
[(6, 4, 8), (8, 11, 6)],
[(7, 10, 6), (7, 8, 10), (8, 9, 10)],
[(0, 7, 3), (0, 10, 7), (0, 9, 10), (6, 7, 10)],
[(10, 6, 7), (1, 10, 7), (1, 7, 8), (1, 8, 0)],
[(10, 6, 7), (10, 7, 1), (1, 7, 3)],
[(1, 2, 6), (1, 6, 8), (1, 8, 9), (8, 6, 7)],
[(2, 6, 9), (2, 9, 1), (6, 7, 9), (0, 9, 3), (7, 3, 9)],
[(7, 8, 0), (7, 0, 6), (6, 0, 2)],
[(7, 3, 2), (6, 7, 2)],
[(2, 3, 11), (10, 6, 8), (10, 8, 9), (8, 6, 7)],
[(2, 0, 7), (2, 7, 11), (0, 9, 7), (6, 7, 10), (9, 10, 7)],
[(1, 8, 0), (1, 7, 8), (1, 10, 7), (6, 7, 10), (2, 3, 11)],
[(11, 2, 1), (11, 1, 7), (10, 6, 1), (6, 7, 1)],
[(8, 9, 6), (8, 6, 7), (9, 1, 6), (11, 6, 3), (1, 3, 6)],
[(0, 9, 1), (11, 6, 7)],
[(7, 8, 0), (7, 0, 6), (3, 11, 0), (11, 6, 0)],
[(7, 11, 6)],
[(7, 6, 11)],
[(3, 0, 8), (11, 7, 6)],
[(0, 1, 9), (11, 7, 6)],
[(8, 1, 9), (8, 3, 1), (11, 7, 6)],
[(10, 1, 2), (6, 11, 7)],
[(1, 2, 10), (3, 0, 8), (6, 11, 7)],
[(2, 9, 0), (2, 10, 9), (6, 11, 7)],
[(6, 11, 7), (2, 10, 3), (10, 8, 3), (10, 9, 8)],
[(7, 2, 3), (6, 2, 7)],
[(7, 0, 8), (7, 6, 0), (6, 2, 0)],
[(2, 7, 6), (2, 3, 7), (0, 1, 9)],
[(1, 6, 2), (1, 8, 6), (1, 9, 8), (8, 7, 6)],
[(10, 7, 6), (10, 1, 7), (1, 3, 7)],
[(10, 7, 6), (1, 7, 10), (1, 8, 7), (1, 0, 8)],
[(0, 3, 7), (0, 7, 10), (0, 10, 9), (6, 10, 7)],
[(7, 6, 10), (7, 10, 8), (8, 10, 9)],
[(8, 4, 6), (6, 11, 8)],
[(3, 6, 11), (3, 0, 6), (0, 4, 6)],
[(8, 6, 11), (8, 4, 6), (9, 0, 1)],
[(9, 4, 6), (9, 6, 3), (9, 3, 1), (11, 3, 6)],
[(6, 8, 4), (6, 11, 8), (2, 10, 1)],
[(1, 2, 10), (3, 0, 11), (0, 6, 11), (0, 4, 6)],
[(4, 11, 8), (4, 6, 11), (0, 2, 9), (2, 10, 9)],
[(10, 9, 3), (10, 3, 2), (9, 4, 3), (11, 3, 6), (4, 6, 3)],
[(8, 2, 3), (8, 4, 2), (4, 6, 2)],
[(0, 4, 2), (4, 6, 2)],
[(1, 9, 0), (2, 3, 4), (2, 4, 6), (4, 3, 8)],
[(1, 9, 4), (1, 4, 2), (2, 4, 6)],
[(8, 1, 3), (8, 6, 1), (8, 4, 6), (6, 10, 1)],
[(10, 1, 0), (10, 0, 6), (6, 0, 4)],
[(4, 6, 3), (4, 3, 8), (6, 10, 3), (0, 3, 9), (10, 9, 3)],
[(10, 9, 4), (4, 6, 10)],
[(4, 9, 5), (7, 6, 11)],
[(0, 8, 3), (4, 9, 5), (11, 7, 6)],
[(5, 0, 1), (5, 4, 0), (7, 6, 11)],
[(11, 7, 6), (8, 3, 4), (3, 5, 4), (3, 1, 5)],
[(9, 5, 4), (10, 1, 2), (7, 6, 11)],
[(6, 11, 7), (1, 2, 10), (0, 8, 3), (4, 9, 5)],
[(7, 6, 11), (5, 4, 10), (4, 2, 10), (4, 0, 2)],
[(3, 4, 8), (3, 5, 4), (3, 2, 5), (10, 5, 2), (11, 7, 6)],
[(7, 2, 3), (7, 6, 2), (5, 4, 9)],
[(9, 5, 4), (0, 8, 6), (0, 6, 2), (6, 8, 7)],
[(3, 6, 2), (3, 7, 6), (1, 5, 0), (5, 4, 0)],
[(6, 2, 8), (6, 8, 7), (2, 1, 8), (4, 8, 5), (1, 5, 8)],
[(9, 5, 4), (10, 1, 6), (1, 7, 6), (1, 3, 7)],
[(1, 6, 10), (1, 7, 6), (1, 0, 7), (8, 7, 0), (9, 5, 4)],
[(4, 0, 10), (4, 10, 5), (0, 3, 10), (6, 10, 7), (3, 7, 10)],
[(7, 6, 10), (7, 10, 8), (5, 4, 10), (4, 8, 10)],
[(6, 9, 5), (6, 11, 9), (11, 8, 9)],
[(3, 6, 11), (0, 6, 3), (0, 5, 6), (0, 9, 5)],
[(0, 11, 8), (0, 5, 11), (0, 1, 5), (5, 6, 11)],
[(6, 11, 3), (6, 3, 5), (5, 3, 1)],
[(1, 2, 10), (9, 5, 11), (9, 11, 8), (11, 5, 6)],
[(0, 11, 3), (0, 6, 11), (0, 9, 6), (5, 6, 9), (1, 2, 10)],
[(11, 8, 5), (11, 5, 6), (8, 0, 5), (10, 5, 2), (0, 2, 5)],
[(6, 11, 3), (6, 3, 5), (2, 10, 3), (10, 5, 3)],
[(5, 8, 9), (5, 2, 8), (5, 6, 2), (3, 8, 2)],
[(9, 5, 6), (9, 6, 0), (0, 6, 2)],
[(1, 5, 8), (1, 8, 0), (5, 6, 8), (3, 8, 2), (6, 2, 8)],
[(1, 5, 6), (2, 1, 6)],
[(1, 3, 6), (1, 6, 10), (3, 8, 6), (5, 6, 9), (8, 9, 6)],
[(10, 1, 0), (10, 0, 6), (9, 5, 0), (5, 6, 0)],
[(0, 3, 8), (5, 6, 10)],
[(5, 6, 10)],
[(11, 5, 10), (7, 5, 11)],
[(11, 5, 10), (11, 7, 5), (8, 3, 0)],
[(5, 11, 7), (5, 10, 11), (1, 9, 0)],
[(10, 7, 5), (10, 11, 7), (9, 8, 1), (8, 3, 1)],
[(11, 1, 2), (11, 7, 1), (7, 5, 1)],
[(0, 8, 3), (1, 2, 7), (1, 7, 5), (7, 2, 11)],
[(9, 7, 5), (9, 2, 7), (9, 0, 2), (2, 11, 7)],
[(7, 5, 2), (7, 2, 11), (5, 9, 2), (3, 2, 8), (9, 8, 2)],
[(2, 5, 10), (2, 3, 5), (3, 7, 5)],
[(8, 2, 0), (8, 5, 2), (8, 7, 5), (10, 2, 5)],
[(9, 0, 1), (5, 10, 3), (5, 3, 7), (3, 10, 2)],
[(9, 8, 2), (9, 2, 1), (8, 7, 2), (10, 2, 5), (7, 5, 2)],
[(1, 3, 5), (3, 7, 5)],
[(0, 8, 7), (0, 7, 1), (1, 7, 5)],
[(9, 0, 3), (9, 3, 5), (5, 3, 7)],
[(9, 8, 7), (5, 9, 7)],
[(5, 8, 4), (5, 10, 8), (10, 11, 8)],
[(5, 0, 4), (5, 11, 0), (5, 10, 11), (11, 3, 0)],
[(0, 1, 9), (8, 4, 10), (8, 10, 11), (10, 4, 5)],
[(10, 11, 4), (10, 4, 5), (11, 3, 4), (9, 4, 1), (3, 1, 4)],
[(2, 5, 1), (2, 8, 5), (2, 11, 8), (4, 5, 8)],
[(0, 4, 11), (0, 11, 3), (4, 5, 11), (2, 11, 1), (5, 1, 11)],
[(0, 2, 5), (0, 5, 9), (2, 11, 5), (4, 5, 8), (11, 8, 5)],
[(9, 4, 5), (2, 11, 3)],
[(2, 5, 10), (3, 5, 2), (3, 4, 5), (3, 8, 4)],
[(5, 10, 2), (5, 2, 4), (4, 2, 0)],
[(3, 10, 2), (3, 5, 10), (3, 8, 5), (4, 5, 8), (0, 1, 9)],
[(5, 10, 2), (5, 2, 4), (1, 9, 2), (9, 4, 2)],
[(8, 4, 5), (8, 5, 3), (3, 5, 1)],
[(0, 4, 5), (1, 0, 5)],
[(8, 4, 5), (8, 5, 3), (9, 0, 5), (0, 3, 5)],
[(9, 4, 5)],
[(4, 11, 7), (4, 9, 11), (9, 10, 11)],
[(0, 8, 3), (4, 9, 7), (9, 11, 7), (9, 10, 11)],
[(1, 10, 11), (1, 11, 4), (1, 4, 0), (7, 4, 11)],
[(3, 1, 4), (3, 4, 8), (1, 10, 4), (7, 4, 11), (10, 11, 4)],
[(4, 11, 7), (9, 11, 4), (9, 2, 11), (9, 1, 2)],
[(9, 7, 4), (9, 11, 7), (9, 1, 11), (2, 11, 1), (0, 8, 3)],
[(11, 7, 4), (11, 4, 2), (2, 4, 0)],
[(11, 7, 4), (11, 4, 2), (8, 3, 4), (3, 2, 4)],
[(2, 9, 10), (2, 7, 9), (2, 3, 7), (7, 4, 9)],
[(9, 10, 7), (9, 7, 4), (10, 2, 7), (8, 7, 0), (2, 0, 7)],
[(3, 7, 10), (3, 10, 2), (7, 4, 10), (1, 10, 0), (4, 0, 10)],
[(1, 10, 2), (8, 7, 4)],
[(4, 9, 1), (4, 1, 7), (7, 1, 3)],
[(4, 9, 1), (4, 1, 7), (0, 8, 1), (8, 7, 1)],
[(4, 0, 3), (7, 4, 3)],
[(4, 8, 7)],
[(9, 11, 8), (11, 9, 10)],
[(3, 0, 9), (3, 9, 11), (11, 9, 10)],
[(0, 1, 10), (0, 10, 8), (8, 10, 11)],
[(3, 1, 11), (11, 1, 10)],
[(1, 2, 11), (1, 11, 9), (9, 11, 8)],
[(3, 0, 9), (3, 9, 11), (1, 2, 9), (2, 11, 9)],
[(0, 2, 11), (11, 8, 0)],
[(3, 2, 11)],
[(2, 3, 8), (2, 8, 10), (10, 8, 9)],
[(2, 0, 9), (9, 10, 2)],
[(2, 3, 8), (2, 8, 10), (0, 1, 8), (1, 10, 8)],
[(10, 2, 1)],
[(1, 3, 8), (9, 1, 8)],
[(0, 9, 1)],
[(3, 8, 0)],
[],
]
if __name__ == '__main__':
main()
|
gabrielelanaro/chemview | chemview/render.py | render_povray | python | def render_povray(scene, filename='ipython', width=600, height=600,
antialiasing=0.01, extra_opts={}):
'''Render the scene with povray for publication.
:param dict scene: The scene to render
:param string filename: Output filename or 'ipython' to render in the notebook.
:param int width: Width in pixels.
:param int height: Height in pixels.
:param dict extra_opts: Dictionary to merge/override with the passed scene.
'''
if not vapory_available:
raise Exception("To render with povray, you need to have the vapory"
" package installed.")
# Adding extra options
scene = normalize_scene(scene)
scene.update(extra_opts)
# Camera target
aspect = scene['camera']['aspect']
up = np.dot(rmatrixquaternion(scene['camera']['quaternion']), [0, 1, 0])
v_fov = scene['camera']['vfov'] / 180.0 * np.pi
h_fov = 2.0 * np.arctan(np.tan(v_fov/2.0) * aspect) / np.pi * 180
# Setup camera position
camera = vp.Camera( 'location', scene['camera']['location'],
'direction', [0, 0, -1],
'sky', up,
'look_at', scene['camera']['target'],
'angle', h_fov )
global_settings = []
# Setup global illumination
if scene.get('radiosity', False):
# Global Illumination
radiosity = vp.Radiosity(
'brightness', 2.0,
'count', 100,
'error_bound', 0.15,
'gray_threshold', 0.0,
'low_error_factor', 0.2,
'minimum_reuse', 0.015,
'nearest_count', 10,
'recursion_limit', 1, #Docs say 1 is enough
'adc_bailout', 0.01,
'max_sample', 0.5,
'media off',
'normal off',
'always_sample', 1,
'pretrace_start', 0.08,
'pretrace_end', 0.01)
light_sources = []
global_settings.append(radiosity)
else:
# Lights
light_sources = [
vp.LightSource( np.array([2,4,-3]) * 1000, 'color', [1,1,1] ),
vp.LightSource( np.array([-2,-4,3]) * 1000, 'color', [1,1,1] ),
vp.LightSource( np.array([-1,2,3]) * 1000, 'color', [1,1,1] ),
vp.LightSource( np.array([1,-2,-3]) * 1000, 'color', [1,1,1] )
]
# Background -- white for now
background = vp.Background([1, 1, 1])
# Things to display
stuff = _generate_objects(scene['representations'])
scene = vp.Scene( camera, objects = light_sources + stuff + [background],
global_settings=global_settings)
return scene.render(filename, width=width, height=height,
antialiasing = antialiasing) | Render the scene with povray for publication.
:param dict scene: The scene to render
:param string filename: Output filename or 'ipython' to render in the notebook.
:param int width: Width in pixels.
:param int height: Height in pixels.
:param dict extra_opts: Dictionary to merge/override with the passed scene. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/render.py#L21-L93 | [
"def normalize_scene(scene):\n \"\"\"Normalize incomplete scene with sane defaults\"\"\"\n retval = scene.copy()\n\n return validate_schema(scene, SCHEMA)\n",
"def rmatrixquaternion(q):\n \"\"\"Create a rotation matrix from q quaternion rotation.\n Quaternions are typed as Numeric Python numpy.arrays of length 4.\n \"\"\"\n assert np.allclose(math.sqrt(np.dot(q,q)), 1.0)\n\n x, y, z, w = q\n\n xx = x*x\n xy = x*y\n xz = x*z\n xw = x*w\n yy = y*y\n yz = y*z\n yw = y*w\n zz = z*z\n zw = z*w\n\n r00 = 1.0 - 2.0 * (yy + zz)\n r01 = 2.0 * (xy - zw)\n r02 = 2.0 * (xz + yw)\n\n r10 = 2.0 * (xy + zw)\n r11 = 1.0 - 2.0 * (xx + zz)\n r12 = 2.0 * (yz - xw)\n\n r20 = 2.0 * (xz - yw)\n r21 = 2.0 * (yz + xw)\n r22 = 1.0 - 2.0 * (xx + yy)\n\n R = np.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]], float)\n\n assert np.allclose(np.linalg.det(R), 1.0)\n return R\n",
"def _generate_objects(representations):\n objects = []\n\n for rep in representations:\n if rep['rep_type'] == 'spheres':\n for i, (x, y, z) in enumerate(rep['options']['coordinates']):\n r = rep['options']['radii'][i]\n c = rep['options']['colors'][i]\n # Generate the shape\n sphere = vp.Sphere( [x,y,z] , r, vp.Texture( vp.Pigment( 'color', hex2rgb(c)) ))\n objects.append(sphere)\n\n elif rep['rep_type'] == 'points':\n # Render points as small spheres\n for i, (x, y, z) in enumerate(rep['options']['coordinates']):\n c = rep['options']['colors'][i]\n s = rep['options']['sizes'][i]\n if not 'alpha' in rep['options']:\n t = 1.0\n else:\n t = rep['options']['alpha'][i]\n\n # Point = sphere with a small radius\n sphere = vp.Sphere( [x,y,z] , s * 0.15,\n vp.Texture( vp.Pigment( 'color', 'rgbf', hex2rgb(c) + (1-t,))),\n vp.Interior('ior', 1.0))\n objects.append(sphere)\n\n elif rep['rep_type'] == 'surface':\n verts = rep['options']['verts']\n faces = rep['options']['faces']\n color = rep['options']['color']\n triangles = verts.take(faces, axis=0)\n\n for v1, v2, v3 in triangles:\n povt = vp.Triangle(v1.tolist(),\n v2.tolist(),\n v3.tolist(),\n vp.Texture(vp.Pigment('color', hex2rgb(color))))\n objects.append(povt)\n\n elif rep['rep_type'] == 'cylinders':\n start = rep['options']['startCoords']\n end = rep['options']['endCoords']\n colors = rep['options']['colors']\n\n for i, (s, e) in enumerate(zip(start, end)):\n r = rep['options']['radii'][i]\n c = rep['options']['colors'][i]\n t = _get_transparency(rep['options'], i)\n\n cylinder = vp.Cylinder(s.tolist(), e.tolist(), r,\n vp.Texture(vp.Pigment('color', 'rgbf', hex2rgb(c) + (1 - t,))))\n objects.append(cylinder)\n\n elif rep['rep_type'] == 'lines':\n start = rep['options']['startCoords']\n end = rep['options']['endCoords']\n colors = rep['options']['startColors']\n\n for i, (s, e) in enumerate(zip(start, end)):\n #r = rep['options']['radii'][i]\n r = 0.02\n c = colors[i]\n t = _get_transparency(rep['options'], i)\n\n cylinder = vp.Cylinder(s.tolist(), e.tolist(), r,\n vp.Texture(vp.Pigment('color', 'rgbf', hex2rgb(c) + (1 - t,))))\n objects.append(cylinder)\n\n else:\n raise ValueError(\"No support for representation rep_type: %s\" % rep['rep_type'])\n\n return objects\n"
] | '''Utilities for rendering'''
from __future__ import division, print_function
from .scene import normalize_scene
import numpy as np
import math
import json
try:
import vapory as vp
vapory_available = True
except ImportError:
ImportWarning("Vapory is not available. Rendering publication quality images "
"will not work.")
vapory_available = False
__all__ = ['render_povray']
def _generate_objects(representations):
objects = []
for rep in representations:
if rep['rep_type'] == 'spheres':
for i, (x, y, z) in enumerate(rep['options']['coordinates']):
r = rep['options']['radii'][i]
c = rep['options']['colors'][i]
# Generate the shape
sphere = vp.Sphere( [x,y,z] , r, vp.Texture( vp.Pigment( 'color', hex2rgb(c)) ))
objects.append(sphere)
elif rep['rep_type'] == 'points':
# Render points as small spheres
for i, (x, y, z) in enumerate(rep['options']['coordinates']):
c = rep['options']['colors'][i]
s = rep['options']['sizes'][i]
if not 'alpha' in rep['options']:
t = 1.0
else:
t = rep['options']['alpha'][i]
# Point = sphere with a small radius
sphere = vp.Sphere( [x,y,z] , s * 0.15,
vp.Texture( vp.Pigment( 'color', 'rgbf', hex2rgb(c) + (1-t,))),
vp.Interior('ior', 1.0))
objects.append(sphere)
elif rep['rep_type'] == 'surface':
verts = rep['options']['verts']
faces = rep['options']['faces']
color = rep['options']['color']
triangles = verts.take(faces, axis=0)
for v1, v2, v3 in triangles:
povt = vp.Triangle(v1.tolist(),
v2.tolist(),
v3.tolist(),
vp.Texture(vp.Pigment('color', hex2rgb(color))))
objects.append(povt)
elif rep['rep_type'] == 'cylinders':
start = rep['options']['startCoords']
end = rep['options']['endCoords']
colors = rep['options']['colors']
for i, (s, e) in enumerate(zip(start, end)):
r = rep['options']['radii'][i]
c = rep['options']['colors'][i]
t = _get_transparency(rep['options'], i)
cylinder = vp.Cylinder(s.tolist(), e.tolist(), r,
vp.Texture(vp.Pigment('color', 'rgbf', hex2rgb(c) + (1 - t,))))
objects.append(cylinder)
elif rep['rep_type'] == 'lines':
start = rep['options']['startCoords']
end = rep['options']['endCoords']
colors = rep['options']['startColors']
for i, (s, e) in enumerate(zip(start, end)):
#r = rep['options']['radii'][i]
r = 0.02
c = colors[i]
t = _get_transparency(rep['options'], i)
cylinder = vp.Cylinder(s.tolist(), e.tolist(), r,
vp.Texture(vp.Pigment('color', 'rgbf', hex2rgb(c) + (1 - t,))))
objects.append(cylinder)
else:
raise ValueError("No support for representation rep_type: %s" % rep['rep_type'])
return objects
def _get_transparency(opts, i):
t = opts.get('transparency', 1.0)
if hasattr(t, "__len__"): # Array test
t = t[i]
return t
def hex2rgb(hex):
return ((hex >> 16) & 0xff)/255, ((hex >> 8) & 0xff)/255, (hex & 0x0000ff)/255
def rmatrixquaternion(q):
"""Create a rotation matrix from q quaternion rotation.
Quaternions are typed as Numeric Python numpy.arrays of length 4.
"""
assert np.allclose(math.sqrt(np.dot(q,q)), 1.0)
x, y, z, w = q
xx = x*x
xy = x*y
xz = x*z
xw = x*w
yy = y*y
yz = y*z
yw = y*w
zz = z*z
zw = z*w
r00 = 1.0 - 2.0 * (yy + zz)
r01 = 2.0 * (xy - zw)
r02 = 2.0 * (xz + yw)
r10 = 2.0 * (xy + zw)
r11 = 1.0 - 2.0 * (xx + zz)
r12 = 2.0 * (yz - xw)
r20 = 2.0 * (xz - yw)
r21 = 2.0 * (yz + xw)
r22 = 1.0 - 2.0 * (xx + yy)
R = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]], float)
assert np.allclose(np.linalg.det(R), 1.0)
return R
|
gabrielelanaro/chemview | chemview/render.py | rmatrixquaternion | python | def rmatrixquaternion(q):
assert np.allclose(math.sqrt(np.dot(q,q)), 1.0)
x, y, z, w = q
xx = x*x
xy = x*y
xz = x*z
xw = x*w
yy = y*y
yz = y*z
yw = y*w
zz = z*z
zw = z*w
r00 = 1.0 - 2.0 * (yy + zz)
r01 = 2.0 * (xy - zw)
r02 = 2.0 * (xz + yw)
r10 = 2.0 * (xy + zw)
r11 = 1.0 - 2.0 * (xx + zz)
r12 = 2.0 * (yz - xw)
r20 = 2.0 * (xz - yw)
r21 = 2.0 * (yz + xw)
r22 = 1.0 - 2.0 * (xx + yy)
R = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]], float)
assert np.allclose(np.linalg.det(R), 1.0)
return R | Create a rotation matrix from q quaternion rotation.
Quaternions are typed as Numeric Python numpy.arrays of length 4. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/render.py#L181-L216 | null | '''Utilities for rendering'''
from __future__ import division, print_function
from .scene import normalize_scene
import numpy as np
import math
import json
try:
import vapory as vp
vapory_available = True
except ImportError:
ImportWarning("Vapory is not available. Rendering publication quality images "
"will not work.")
vapory_available = False
__all__ = ['render_povray']
def render_povray(scene, filename='ipython', width=600, height=600,
antialiasing=0.01, extra_opts={}):
'''Render the scene with povray for publication.
:param dict scene: The scene to render
:param string filename: Output filename or 'ipython' to render in the notebook.
:param int width: Width in pixels.
:param int height: Height in pixels.
:param dict extra_opts: Dictionary to merge/override with the passed scene.
'''
if not vapory_available:
raise Exception("To render with povray, you need to have the vapory"
" package installed.")
# Adding extra options
scene = normalize_scene(scene)
scene.update(extra_opts)
# Camera target
aspect = scene['camera']['aspect']
up = np.dot(rmatrixquaternion(scene['camera']['quaternion']), [0, 1, 0])
v_fov = scene['camera']['vfov'] / 180.0 * np.pi
h_fov = 2.0 * np.arctan(np.tan(v_fov/2.0) * aspect) / np.pi * 180
# Setup camera position
camera = vp.Camera( 'location', scene['camera']['location'],
'direction', [0, 0, -1],
'sky', up,
'look_at', scene['camera']['target'],
'angle', h_fov )
global_settings = []
# Setup global illumination
if scene.get('radiosity', False):
# Global Illumination
radiosity = vp.Radiosity(
'brightness', 2.0,
'count', 100,
'error_bound', 0.15,
'gray_threshold', 0.0,
'low_error_factor', 0.2,
'minimum_reuse', 0.015,
'nearest_count', 10,
'recursion_limit', 1, #Docs say 1 is enough
'adc_bailout', 0.01,
'max_sample', 0.5,
'media off',
'normal off',
'always_sample', 1,
'pretrace_start', 0.08,
'pretrace_end', 0.01)
light_sources = []
global_settings.append(radiosity)
else:
# Lights
light_sources = [
vp.LightSource( np.array([2,4,-3]) * 1000, 'color', [1,1,1] ),
vp.LightSource( np.array([-2,-4,3]) * 1000, 'color', [1,1,1] ),
vp.LightSource( np.array([-1,2,3]) * 1000, 'color', [1,1,1] ),
vp.LightSource( np.array([1,-2,-3]) * 1000, 'color', [1,1,1] )
]
# Background -- white for now
background = vp.Background([1, 1, 1])
# Things to display
stuff = _generate_objects(scene['representations'])
scene = vp.Scene( camera, objects = light_sources + stuff + [background],
global_settings=global_settings)
return scene.render(filename, width=width, height=height,
antialiasing = antialiasing)
def _generate_objects(representations):
objects = []
for rep in representations:
if rep['rep_type'] == 'spheres':
for i, (x, y, z) in enumerate(rep['options']['coordinates']):
r = rep['options']['radii'][i]
c = rep['options']['colors'][i]
# Generate the shape
sphere = vp.Sphere( [x,y,z] , r, vp.Texture( vp.Pigment( 'color', hex2rgb(c)) ))
objects.append(sphere)
elif rep['rep_type'] == 'points':
# Render points as small spheres
for i, (x, y, z) in enumerate(rep['options']['coordinates']):
c = rep['options']['colors'][i]
s = rep['options']['sizes'][i]
if not 'alpha' in rep['options']:
t = 1.0
else:
t = rep['options']['alpha'][i]
# Point = sphere with a small radius
sphere = vp.Sphere( [x,y,z] , s * 0.15,
vp.Texture( vp.Pigment( 'color', 'rgbf', hex2rgb(c) + (1-t,))),
vp.Interior('ior', 1.0))
objects.append(sphere)
elif rep['rep_type'] == 'surface':
verts = rep['options']['verts']
faces = rep['options']['faces']
color = rep['options']['color']
triangles = verts.take(faces, axis=0)
for v1, v2, v3 in triangles:
povt = vp.Triangle(v1.tolist(),
v2.tolist(),
v3.tolist(),
vp.Texture(vp.Pigment('color', hex2rgb(color))))
objects.append(povt)
elif rep['rep_type'] == 'cylinders':
start = rep['options']['startCoords']
end = rep['options']['endCoords']
colors = rep['options']['colors']
for i, (s, e) in enumerate(zip(start, end)):
r = rep['options']['radii'][i]
c = rep['options']['colors'][i]
t = _get_transparency(rep['options'], i)
cylinder = vp.Cylinder(s.tolist(), e.tolist(), r,
vp.Texture(vp.Pigment('color', 'rgbf', hex2rgb(c) + (1 - t,))))
objects.append(cylinder)
elif rep['rep_type'] == 'lines':
start = rep['options']['startCoords']
end = rep['options']['endCoords']
colors = rep['options']['startColors']
for i, (s, e) in enumerate(zip(start, end)):
#r = rep['options']['radii'][i]
r = 0.02
c = colors[i]
t = _get_transparency(rep['options'], i)
cylinder = vp.Cylinder(s.tolist(), e.tolist(), r,
vp.Texture(vp.Pigment('color', 'rgbf', hex2rgb(c) + (1 - t,))))
objects.append(cylinder)
else:
raise ValueError("No support for representation rep_type: %s" % rep['rep_type'])
return objects
def _get_transparency(opts, i):
t = opts.get('transparency', 1.0)
if hasattr(t, "__len__"): # Array test
t = t[i]
return t
def hex2rgb(hex):
return ((hex >> 16) & 0xff)/255, ((hex >> 8) & 0xff)/255, (hex & 0x0000ff)/255
|
gabrielelanaro/chemview | chemview/export.py | serialize_to_dict | python | def serialize_to_dict(dictionary):
'''Make a json-serializable dictionary from input dictionary by converting
non-serializable data types such as numpy arrays.'''
retval = {}
for k, v in dictionary.items():
if isinstance(v, dict):
retval[k] = serialize_to_dict(v)
else:
# This is when custom serialization happens
if isinstance(v, np.ndarray):
if v.dtype == 'float64':
# We don't support float64 on js side
v = v.astype('float32')
retval[k] = encode_numpy(v)
else:
retval[k] = v
return retval | Make a json-serializable dictionary from input dictionary by converting
non-serializable data types such as numpy arrays. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/export.py#L27-L46 | [
"def encode_numpy(array):\n '''Encode a numpy array as a base64 encoded string, to be JSON serialized. \n\n :return: a dictionary containing the fields:\n - *data*: the base64 string\n - *type*: the array type\n - *shape*: the array shape\n\n '''\n return {'data' : base64.b64encode(array.data).decode('utf8'),\n 'type' : array.dtype.name,\n 'shape': array.shape}\n",
"def serialize_to_dict(dictionary):\n '''Make a json-serializable dictionary from input dictionary by converting\n non-serializable data types such as numpy arrays.'''\n retval = {}\n\n for k, v in dictionary.items():\n if isinstance(v, dict):\n retval[k] = serialize_to_dict(v)\n else:\n # This is when custom serialization happens\n if isinstance(v, np.ndarray):\n if v.dtype == 'float64':\n # We don't support float64 on js side\n v = v.astype('float32')\n\n retval[k] = encode_numpy(v)\n else:\n retval[k] = v\n\n return retval\n"
] | '''Static export for viewing in nbviewer (or the web)'''
import json
import os
import numpy as np
from .scene import normalize_scene
from .utils import encode_numpy
def export_html(export_dir, scene):
raise NotImplementedError()
# We validate the input to make it complete
scene = normalize_scene(scene)
# The scene gets json-serialized
scene_js = json.dumps(serialize_to_dict(scene))
os.mkdir(export_dir)
with open(os.path.join(export_dir, 'scene.js'), 'w') as fd:
fd.write(scene_js)
template = '''
'''
def serialize_to_dict(dictionary):
'''Make a json-serializable dictionary from input dictionary by converting
non-serializable data types such as numpy arrays.'''
retval = {}
for k, v in dictionary.items():
if isinstance(v, dict):
retval[k] = serialize_to_dict(v)
else:
# This is when custom serialization happens
if isinstance(v, np.ndarray):
if v.dtype == 'float64':
# We don't support float64 on js side
v = v.astype('float32')
retval[k] = encode_numpy(v)
else:
retval[k] = v
return retval
|
gabrielelanaro/chemview | chemview/contrib.py | topology_mdtraj | python | def topology_mdtraj(traj):
'''Generate topology spec for the MolecularViewer from mdtraj.
:param mdtraj.Trajectory traj: the trajectory
:return: A chemview-compatible dictionary corresponding to the topology defined in mdtraj.
'''
import mdtraj as md
top = {}
top['atom_types'] = [a.element.symbol for a in traj.topology.atoms]
top['atom_names'] = [a.name for a in traj.topology.atoms]
top['bonds'] = [(a.index, b.index) for a, b in traj.topology.bonds]
top['secondary_structure'] = md.compute_dssp(traj[0])[0]
top['residue_types'] = [r.name for r in traj.topology.residues ]
top['residue_indices'] = [ [a.index for a in r.atoms] for r in traj.topology.residues ]
return top | Generate topology spec for the MolecularViewer from mdtraj.
:param mdtraj.Trajectory traj: the trajectory
:return: A chemview-compatible dictionary corresponding to the topology defined in mdtraj. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/contrib.py#L3-L20 | null | '''Integration with other programs'''
|
gabrielelanaro/chemview | chemview/utils.py | encode_numpy | python | def encode_numpy(array):
'''Encode a numpy array as a base64 encoded string, to be JSON serialized.
:return: a dictionary containing the fields:
- *data*: the base64 string
- *type*: the array type
- *shape*: the array shape
'''
return {'data' : base64.b64encode(array.data).decode('utf8'),
'type' : array.dtype.name,
'shape': array.shape} | Encode a numpy array as a base64 encoded string, to be JSON serialized.
:return: a dictionary containing the fields:
- *data*: the base64 string
- *type*: the array type
- *shape*: the array shape | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/utils.py#L7-L18 | null | '''Various utilities of the chemview package
'''
import base64
import numpy as np
def beta_sheet_normals(ca, c, o):
c_to_ca = normalized(ca - c)
c_to_o = normalized(c - o)
normals = np.cross(c_to_ca, c_to_o)
# Make sure that angles are less than 90 degrees
for i, n in enumerate(normals[1:]):
if normals[i].dot(normals[i-1]) < 0:
normals[i] *= -1
return normals
def alpha_helix_normals(ca):
K_AVG = 5
K_OFFSET = 2
if len(ca) <= K_AVG:
start = ca[0]
end = ca[-1]
helix_dir = normalized(end - start)
position = ca - start
projected_pos = np.array([np.dot(r, helix_dir) * helix_dir for r in position])
normals = normalized(position - projected_pos)
return [start] * len(normals), [end] * len(normals), normals
# Start and end point for normals
starts = []
ends = []
for i in range(len(ca) - K_AVG):
starts.append(ca[i:i + K_AVG - K_OFFSET].mean(axis=0))
ends.append(ca[i+K_OFFSET:i + K_AVG].mean(axis=0))
starts = np.array(starts)
ends = np.array(ends)
# position relative to "start point"
normals = []
for i,r in enumerate(ca):
k = i if i < len(ca) - K_AVG else -1
position = r - starts[k]
# Find direction of the helix
helix_dir = normalized(ends[k] - starts[k])
# Project positions on the helix
projected_pos = np.dot(position, helix_dir) * helix_dir
normals.append(normalized(position - projected_pos))
return np.array(normals)
def normalized(vec):
return vec / np.linalg.norm(vec)
def get_atom_color(atom_name):
atomColors = {
"H": 0xFFFFFF,
"HE": 0xD9FFFF,
"LI": 0xCC80FF,
"BE": 0xC2FF00,
"B": 0xFFB5B5,
"C": 0x909090,
"N": 0x3050F8,
"O": 0xFF0D0D,
"F": 0x90E050,
"NE": 0xB3E3F5,
"NA": 0xAB5CF2,
"MG": 0x8AFF00,
"AL": 0xBFA6A6,
"SI": 0xF0C8A0,
"P": 0xFF8000,
"S": 0xFFFF30,
"CL": 0x1FF01F,
"AR": 0x80D1E3,
"K": 0x8F40D4,
"CA": 0x3DFF00,
"SC": 0xE6E6E6,
"TI": 0xBFC2C7,
"V": 0xA6A6AB,
"CR": 0x8A99C7,
"MN": 0x9C7AC7,
"FE": 0xE06633,
"CO": 0xF090A0,
"NI": 0x50D050,
"CU": 0xC88033,
"ZN": 0x7D80B0,
"GA": 0xC28F8F,
"GE": 0x668F8F,
"AS": 0xBD80E3,
"SE": 0xFFA100,
"BR": 0xA62929,
"KR": 0x5CB8D1,
"RB": 0x702EB0,
"SR": 0x00FF00,
"Y": 0x94FFFF,
"ZR": 0x94E0E0,
"NB": 0x73C2C9,
"MO": 0x54B5B5,
"TC": 0x3B9E9E,
"RU": 0x248F8F,
"RH": 0x0A7D8C,
"PD": 0x006985,
"AG": 0xC0C0C0,
"CD": 0xFFD98F,
"IN": 0xA67573,
"SN": 0x668080,
"SB": 0x9E63B5,
"TE": 0xD47A00,
"I": 0x940094,
"XE": 0x429EB0,
"CS": 0x57178F,
"BA": 0x00C900,
"LA": 0x70D4FF,
"CE": 0xFFFFC7,
"PR": 0xD9FFC7,
"ND": 0xC7FFC7,
"PM": 0xA3FFC7,
"SM": 0x8FFFC7,
"EU": 0x61FFC7,
"GD": 0x45FFC7,
"TB": 0x30FFC7,
"DY": 0x1FFFC7,
"HO": 0x00FF9C,
"ER": 0x00E675,
"TM": 0x00D452,
"YB": 0x00BF38,
"LU": 0x00AB24,
"HF": 0x4DC2FF,
"TA": 0x4DA6FF,
"W": 0x2194D6,
"RE": 0x267DAB,
"OS": 0x266696,
"IR": 0x175487,
"PT": 0xD0D0E0,
"AU": 0xFFD123,
"HG": 0xB8B8D0,
"TL": 0xA6544D,
"PB": 0x575961,
"BI": 0x9E4FB5,
"PO": 0xAB5C00,
"AT": 0x754F45,
"RN": 0x428296,
"FR": 0x420066,
"RA": 0x007D00,
"AC": 0x70ABFA,
"TH": 0x00BAFF,
"PA": 0x00A1FF,
"U": 0x008FFF,
"NP": 0x0080FF,
"PU": 0x006BFF,
"AM": 0x545CF2,
"CM": 0x785CE3,
"BK": 0x8A4FE3,
"CF": 0xA136D4,
"ES": 0xB31FD4,
"FM": 0xB31FBA,
}
return atomColors.get(atom_name.upper(), 0xFFFFFF)
|
gabrielelanaro/chemview | chemview/install.py | enable_notebook | python | def enable_notebook(verbose=0):
libs = ['objexporter.js',
'ArcballControls.js', 'filesaver.js',
'base64-arraybuffer.js', 'context.js',
'chemview.js', 'three.min.js', 'jquery-ui.min.js',
'context.standalone.css', 'chemview_widget.js',
'trajectory_controls_widget.js', "layout_widget.js",
"components/jquery-fullscreen/jquery.fullscreen.js",
'scales.js']
fns = [resource_filename('chemview', os.path.join('static', f)) for f in libs]
[install_nbextension(fn, verbose=verbose, overwrite=True, user=True) for fn in fns] | Enable IPython notebook widgets to be displayed.
This function should be called before using the chemview widgets. | train | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/install.py#L14-L29 | null | # Adapted from mdtraj: github.com/mdtraj/mdtraj
import os
import warnings
from IPython.display import display, Javascript
from notebook.nbextensions import install_nbextension
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from pkg_resources import resource_filename
__all__ = ['enable_notebook']
|
psss/fmf | fmf/base.py | Tree._initialize | python | def _initialize(self, path):
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format") | Find metadata tree root, detect format version | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L85-L109 | null | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.merge | python | def merge(self, parent=None):
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data | Merge parent data | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L111-L138 | null | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.inherit | python | def inherit(self):
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit() | Apply inheritance | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L140-L150 | [
"def merge(self, parent=None):\n \"\"\" Merge parent data \"\"\"\n # Check parent, append source files\n if parent is None:\n parent = self.parent\n if parent is None:\n return\n self.sources = parent.sources + self.sources\n # Merge child data with parent data\n data = copy.deepcopy(parent.data)\n for key, value in sorted(self.data.items()):\n # Handle attribute adding\n if key.endswith('+'):\n key = key.rstrip('+')\n if key in data:\n # Use dict.update() for merging dictionaries\n if type(data[key]) == type(value) == dict:\n data[key].update(value)\n continue\n try:\n value = data[key] + value\n except TypeError as error:\n raise utils.MergeError(\n \"MergeError: Key '{0}' in {1} ({2}).\".format(\n key, self.name, str(error)))\n # And finally update the value\n data[key] = value\n self.data = data\n"
] | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.update | python | def update(self, data):
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data)) | Update metadata, handle virtual hierarchy | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L152-L173 | [
"def child(self, name, data, source=None):\n \"\"\" Create or update child with given data \"\"\"\n try:\n if isinstance(data, dict):\n self.children[name].update(data)\n else:\n self.children[name].grow(data)\n except KeyError:\n self.children[name] = Tree(data, name, parent=self)\n # Save source file\n if source is not None:\n self.children[name].sources.append(source)\n"
] | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.get | python | def get(self, name=None, default=None):
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data | Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist. | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L175-L202 | null | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.child | python | def child(self, name, data, source=None):
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source) | Create or update child with given data | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L204-L215 | null | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.grow | python | def grow(self, path):
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit() | Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once! | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L217-L276 | [
"def inherit(self):\n \"\"\" Apply inheritance \"\"\"\n # Preserve original data and merge parent\n # (original data needed for custom inheritance extensions)\n self.original_data = self.data\n self.merge()\n log.debug(\"Data for '{0}' inherited.\".format(self))\n log.data(pretty(self.data))\n # Apply inheritance to all children\n for child in self.children.values():\n child.inherit()\n",
"def child(self, name, data, source=None):\n \"\"\" Create or update child with given data \"\"\"\n try:\n if isinstance(data, dict):\n self.children[name].update(data)\n else:\n self.children[name].grow(data)\n except KeyError:\n self.children[name] = Tree(data, name, parent=self)\n # Save source file\n if source is not None:\n self.children[name].sources.append(source)\n"
] | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.climb | python | def climb(self, whole=False):
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node | Climb through the tree (iterate leaf/all nodes) | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L278-L284 | null | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.find | python | def find(self, name):
for node in self.climb(whole=True):
if node.name == name:
return node
return None | Find node with given name | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L286-L291 | [
"def climb(self, whole=False):\n \"\"\" Climb through the tree (iterate leaf/all nodes) \"\"\"\n if whole or not self.children:\n yield self\n for name, child in self.children.items():\n for node in child.climb(whole):\n yield node\n"
] | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.prune | python | def prune(self, whole=False, keys=[], names=[], filters=[]):
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node | Filter tree nodes based on given criteria | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L293-L312 | [
"def climb(self, whole=False):\n \"\"\" Climb through the tree (iterate leaf/all nodes) \"\"\"\n if whole or not self.children:\n yield self\n for name, child in self.children.items():\n for node in child.climb(whole):\n yield node\n"
] | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def show(self, brief=False, formatting=None, values=[]):
""" Show metadata """
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n"
|
psss/fmf | fmf/base.py | Tree.show | python | def show(self, brief=False, formatting=None, values=[]):
# Show nothing if there's nothing
if not self.data:
return None
# Custom formatting
if formatting is not None:
formatting = re.sub("\\\\n", "\n", formatting)
name = self.name
data = self.data
root = self.root
sources = self.sources
evaluated = []
for value in values:
evaluated.append(eval(value))
return formatting.format(*evaluated)
# Show the name
output = utils.color(self.name, 'red')
if brief:
return output + "\n"
# List available attributes
for key, value in sorted(self.data.items()):
output += "\n{0}: ".format(utils.color(key, 'green'))
if isinstance(value, type("")):
output += value
elif isinstance(value, list) and all(
[isinstance(item, type("")) for item in value]):
output += utils.listed(value)
else:
output += pretty(value)
output
return output + "\n" | Show metadata | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L314-L347 | [
"def color(text, color=None, background=None, light=False, enabled=\"auto\"):\n \"\"\"\n Return text in desired color if coloring enabled\n\n Available colors: black red green yellow blue magenta cyan white.\n Alternatively color can be prefixed with \"light\", e.g. lightgreen.\n \"\"\"\n colors = {\"black\": 30, \"red\": 31, \"green\": 32, \"yellow\": 33,\n \"blue\": 34, \"magenta\": 35, \"cyan\": 36, \"white\": 37}\n # Nothing do do if coloring disabled\n if enabled == \"auto\":\n enabled = Coloring().enabled()\n if not enabled:\n return text\n # Prepare colors (strip 'light' if present in color)\n if color and color.startswith(\"light\"):\n light = True\n color = color[5:]\n color = color and \";{0}\".format(colors[color]) or \"\"\n background = background and \";{0}\".format(colors[background] + 10) or \"\"\n light = light and 1 or 0\n # Starting and finishing sequence\n start = \"\\033[{0}{1}{2}m\".format(light, color, background)\n finish = \"\\033[1;m\"\n return \"\".join([start, text, finish])\n",
"def listed(items, singular=None, plural=None, max=None, quote=\"\"):\n \"\"\"\n Convert an iterable into a nice, human readable list or description::\n\n listed(range(1)) .................... 0\n listed(range(2)) .................... 0 and 1\n listed(range(3), quote='\"') ......... \"0\", \"1\" and \"2\"\n listed(range(4), max=3) ............. 0, 1, 2 and 1 more\n listed(range(5), 'number', max=3) ... 0, 1, 2 and 2 more numbers\n listed(range(6), 'category') ........ 6 categories\n listed(7, \"leaf\", \"leaves\") ......... 7 leaves\n\n If singular form is provided but max not set the description-only\n mode is activated as shown in the last two examples. Also, an int\n can be used in this case to get a simple inflection functionality.\n \"\"\"\n\n # Convert items to list if necessary\n items = range(items) if isinstance(items, int) else list(items)\n more = \" more\"\n # Description mode expected when singular provided but no maximum set\n if singular is not None and max is None:\n max = 0\n more = \"\"\n # Set the default plural form\n if singular is not None and plural is None:\n plural = pluralize(singular)\n # Convert to strings and optionally quote each item\n items = [\"{0}{1}{0}\".format(quote, item) for item in items]\n\n # Select the maximum of items and describe the rest if max provided\n if max is not None:\n # Special case when the list is empty (0 items)\n if max == 0 and len(items) == 0:\n return \"0 {0}\".format(plural)\n # Cut the list if maximum exceeded\n if len(items) > max:\n rest = len(items[max:])\n items = items[:max]\n if singular is not None:\n more += \" {0}\".format(singular if rest == 1 else plural)\n items.append(\"{0}{1}\".format(rest, more))\n\n # For two and more items use 'and' instead of the last comma\n if len(items) < 2:\n return \"\".join(items)\n else:\n return \", \".join(items[0:-2] + [\" and \".join(items[-2:])])\n"
] | class Tree(object):
""" Metadata Tree """
def __init__(self, data, name=None, parent=None):
"""
Initialize metadata tree from directory path or data dictionary
Data parameter can be either a string with directory path to be
explored or a dictionary with the values already prepared.
"""
# Bail out if no data and no parent given
if not data and not parent:
raise utils.GeneralError(
"No data or parent provided to initialize the tree.")
# Initialize family relations, object data and source files
self.parent = parent
self.children = dict()
self.data = dict()
self.sources = list()
self.root = None
self.version = utils.VERSION
self.original_data = dict()
# Special handling for top parent
if self.parent is None:
self.name = "/"
if not isinstance(data, dict):
self._initialize(path=data)
data = self.root
# Handle child node creation
else:
self.root = self.parent.root
self.name = os.path.join(self.parent.name, name)
# Initialize data
if isinstance(data, dict):
self.update(data)
else:
self.grow(data)
log.debug("New tree '{0}' created.".format(self))
def __unicode__(self):
""" Use tree name as identifier """
return self.name # pragma: no cover
def _initialize(self, path):
""" Find metadata tree root, detect format version """
# Find the tree root
root = os.path.abspath(path)
try:
while ".fmf" not in next(os.walk(root))[1]:
if root == "/":
raise utils.RootError(
"Unable to find tree root for '{0}'.".format(
os.path.abspath(path)))
root = os.path.abspath(os.path.join(root, os.pardir))
except StopIteration:
raise utils.FileError("Invalid directory path: {0}".format(root))
log.info("Root directory found: {0}".format(root))
self.root = root
# Detect format version
try:
with open(os.path.join(self.root, ".fmf", "version")) as version:
self.version = int(version.read())
log.info("Format version detected: {0}".format(self.version))
except IOError as error:
raise utils.FormatError(
"Unable to detect format version: {0}".format(error))
except ValueError:
raise utils.FormatError("Invalid version format")
def merge(self, parent=None):
""" Merge parent data """
# Check parent, append source files
if parent is None:
parent = self.parent
if parent is None:
return
self.sources = parent.sources + self.sources
# Merge child data with parent data
data = copy.deepcopy(parent.data)
for key, value in sorted(self.data.items()):
# Handle attribute adding
if key.endswith('+'):
key = key.rstrip('+')
if key in data:
# Use dict.update() for merging dictionaries
if type(data[key]) == type(value) == dict:
data[key].update(value)
continue
try:
value = data[key] + value
except TypeError as error:
raise utils.MergeError(
"MergeError: Key '{0}' in {1} ({2}).".format(
key, self.name, str(error)))
# And finally update the value
data[key] = value
self.data = data
def inherit(self):
""" Apply inheritance """
# Preserve original data and merge parent
# (original data needed for custom inheritance extensions)
self.original_data = self.data
self.merge()
log.debug("Data for '{0}' inherited.".format(self))
log.data(pretty(self.data))
# Apply inheritance to all children
for child in self.children.values():
child.inherit()
def update(self, data):
""" Update metadata, handle virtual hierarchy """
# Nothing to do if no data
if data is None:
return
for key, value in sorted(data.items()):
# Handle child attributes
if key.startswith('/'):
name = key.lstrip('/')
# Handle deeper nesting (e.g. keys like /one/two/three) by
# extracting only the first level of the hierarchy as name
match = re.search("([^/]+)(/.*)", name)
if match:
name = match.groups()[0]
value = {match.groups()[1]: value}
# Update existing child or create a new one
self.child(name, value)
# Update regular attributes
else:
self.data[key] = value
log.debug("Data for '{0}' updated.".format(self))
log.data(pretty(self.data))
def get(self, name=None, default=None):
"""
Get attribute value or return default
Whole data dictionary is returned when no attribute provided.
Supports direct values retrieval from deep dictionaries as well.
Dictionary path should be provided as list. The following two
examples are equal:
tree.data['hardware']['memory']['size']
tree.get(['hardware', 'memory', 'size'])
However the latter approach will also correctly handle providing
default value when any of the dictionary keys does not exist.
"""
# Return the whole dictionary if no attribute specified
if name is None:
return self.data
if not isinstance(name, list):
name = [name]
data = self.data
try:
for key in name:
data = data[key]
except KeyError:
return default
return data
def child(self, name, data, source=None):
""" Create or update child with given data """
try:
if isinstance(data, dict):
self.children[name].update(data)
else:
self.children[name].grow(data)
except KeyError:
self.children[name] = Tree(data, name, parent=self)
# Save source file
if source is not None:
self.children[name].sources.append(source)
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit()
def climb(self, whole=False):
""" Climb through the tree (iterate leaf/all nodes) """
if whole or not self.children:
yield self
for name, child in self.children.items():
for node in child.climb(whole):
yield node
def find(self, name):
""" Find node with given name """
for node in self.climb(whole=True):
if node.name == name:
return node
return None
def prune(self, whole=False, keys=[], names=[], filters=[]):
""" Filter tree nodes based on given criteria """
for node in self.climb(whole):
# Select only nodes with key content
if not all([key in node.data for key in keys]):
continue
# Select nodes with name matching regular expression
if names and not any(
[re.search(name, node.name) for name in names]):
continue
# Apply advanced filters if given
try:
if not all([utils.filter(filter, node.data, regexp=True)
for filter in filters]):
continue
# Handle missing attribute as if filter failed
except utils.FilterError:
continue
# All criteria met, thus yield the node
yield node
|
psss/fmf | fmf/cli.py | Parser.options_formatting | python | def options_formatting(self):
group = self.parser.add_argument_group("Format")
group.add_argument(
"--format", dest="formatting", default=None,
help="Custom output format using the {} expansion")
group.add_argument(
"--value", dest="values", action="append", default=[],
help="Values for the custom formatting string") | Formating options | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/cli.py#L91-L99 | null | class Parser(object):
""" Command line options parser """
def __init__(self, arguments=None, path=None):
""" Prepare the parser. """
# Change current working directory (used for testing)
if path is not None:
os.chdir(path)
# Split command line if given as a string (used for testing)
if isinstance(arguments, type("")): # pragma: no cover
try:
# This is necessary for Python 2.6
self.arguments = [arg.decode('utf8')
for arg in shlex.split(arguments.encode('utf8'))]
except AttributeError:
self.arguments = shlex.split(arguments)
# Otherwise use sys.argv (plus decode unicode for Python 2)
if arguments is None: # pragma: no cover
try:
self.arguments = [arg.decode("utf-8") for arg in sys.argv]
except AttributeError:
self.arguments = sys.argv
# Enable debugging output if requested
if "--debug" in self.arguments:
utils.log.setLevel(utils.LOG_DEBUG)
# Handle subcommands (mapped to format_* methods)
self.parser = argparse.ArgumentParser(
usage="fmf command [options]\n" + __doc__)
self.parser.add_argument('command', help='Command to run')
self.command = self.parser.parse_args(self.arguments[1:2]).command
if not hasattr(self, "command_" + self.command):
self.parser.print_help()
raise utils.GeneralError(
"Unrecognized command: '{0}'".format(self.command))
# Initialize the rest and run the subcommand
self.output = ""
getattr(self, "command_" + self.command)()
def options_select(self):
""" Select by name, filter """
group = self.parser.add_argument_group("Select")
group.add_argument(
"--key", dest="keys", action="append", default=[],
help="Key content definition (required attributes)")
group.add_argument(
"--name", dest="names", action="append", default=[],
help="List objects with name matching regular expression")
group.add_argument(
"--filter", dest="filters", action="append", default=[],
help="Apply advanced filter (see 'pydoc fmf.filter')")
group.add_argument(
"--whole", dest="whole", action="store_true",
help="Consider the whole tree (leaves only by default)")
def options_utils(self):
""" Utilities """
group = self.parser.add_argument_group("Utils")
group.add_argument(
"--path", action="append", dest="paths",
help="Path to the metadata tree (default: current directory)")
group.add_argument(
"--verbose", action="store_true",
help="Print information about parsed files to stderr")
group.add_argument(
"--debug", action="store_true",
help="Turn on debugging output, do not catch exceptions")
def command_ls(self):
""" List names """
self.parser = argparse.ArgumentParser(
description="List names of available objects")
self.options_select()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=True)
def command_show(self):
""" Show metadata """
self.parser = argparse.ArgumentParser(
description="Show metadata of available objects")
self.options_select()
self.options_formatting()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=False)
def command_init(self):
""" Initialize tree """
self.parser = argparse.ArgumentParser(
description="Initialize a new metadata tree")
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
# For each path create an .fmf directory and version file
for path in self.options.paths or ["."]:
root = os.path.abspath(os.path.join(path, ".fmf"))
if os.path.exists(root):
raise utils.FileError("{0} '{1}' already exists.".format(
"Directory" if os.path.isdir(root) else "File", root))
os.makedirs(root)
with open(os.path.join(root, "version"), "w") as version:
version.write("{0}\n".format(utils.VERSION))
print("Metadata tree '{0}' successfully initialized.".format(root))
def show(self, brief=False):
""" Show metadata for each path given """
output = []
for path in self.options.paths or ["."]:
if self.options.verbose:
utils.info("Checking {0} for metadata.".format(path))
tree = fmf.Tree(path)
for node in tree.prune(
self.options.whole, self.options.keys, self.options.names,
self.options.filters):
if brief:
show = node.show(brief=True)
else:
show = node.show(
brief=False,
formatting=self.options.formatting,
values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color("{0}\n".format(source), "blue")
if show is not None:
output.append(show)
# Print output and summary
if brief or self.options.formatting:
joined = "".join(output)
else:
joined = "\n".join(output)
try: # pragma: no cover
print(joined, end="")
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end="")
if self.options.verbose:
utils.info("Found {0}.".format(
utils.listed(len(output), "object")))
self.output = joined
|
psss/fmf | fmf/cli.py | Parser.options_utils | python | def options_utils(self):
group = self.parser.add_argument_group("Utils")
group.add_argument(
"--path", action="append", dest="paths",
help="Path to the metadata tree (default: current directory)")
group.add_argument(
"--verbose", action="store_true",
help="Print information about parsed files to stderr")
group.add_argument(
"--debug", action="store_true",
help="Turn on debugging output, do not catch exceptions") | Utilities | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/cli.py#L101-L112 | null | class Parser(object):
""" Command line options parser """
def __init__(self, arguments=None, path=None):
""" Prepare the parser. """
# Change current working directory (used for testing)
if path is not None:
os.chdir(path)
# Split command line if given as a string (used for testing)
if isinstance(arguments, type("")): # pragma: no cover
try:
# This is necessary for Python 2.6
self.arguments = [arg.decode('utf8')
for arg in shlex.split(arguments.encode('utf8'))]
except AttributeError:
self.arguments = shlex.split(arguments)
# Otherwise use sys.argv (plus decode unicode for Python 2)
if arguments is None: # pragma: no cover
try:
self.arguments = [arg.decode("utf-8") for arg in sys.argv]
except AttributeError:
self.arguments = sys.argv
# Enable debugging output if requested
if "--debug" in self.arguments:
utils.log.setLevel(utils.LOG_DEBUG)
# Handle subcommands (mapped to format_* methods)
self.parser = argparse.ArgumentParser(
usage="fmf command [options]\n" + __doc__)
self.parser.add_argument('command', help='Command to run')
self.command = self.parser.parse_args(self.arguments[1:2]).command
if not hasattr(self, "command_" + self.command):
self.parser.print_help()
raise utils.GeneralError(
"Unrecognized command: '{0}'".format(self.command))
# Initialize the rest and run the subcommand
self.output = ""
getattr(self, "command_" + self.command)()
def options_select(self):
""" Select by name, filter """
group = self.parser.add_argument_group("Select")
group.add_argument(
"--key", dest="keys", action="append", default=[],
help="Key content definition (required attributes)")
group.add_argument(
"--name", dest="names", action="append", default=[],
help="List objects with name matching regular expression")
group.add_argument(
"--filter", dest="filters", action="append", default=[],
help="Apply advanced filter (see 'pydoc fmf.filter')")
group.add_argument(
"--whole", dest="whole", action="store_true",
help="Consider the whole tree (leaves only by default)")
def options_formatting(self):
""" Formating options """
group = self.parser.add_argument_group("Format")
group.add_argument(
"--format", dest="formatting", default=None,
help="Custom output format using the {} expansion")
group.add_argument(
"--value", dest="values", action="append", default=[],
help="Values for the custom formatting string")
def command_ls(self):
""" List names """
self.parser = argparse.ArgumentParser(
description="List names of available objects")
self.options_select()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=True)
def command_show(self):
""" Show metadata """
self.parser = argparse.ArgumentParser(
description="Show metadata of available objects")
self.options_select()
self.options_formatting()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=False)
def command_init(self):
""" Initialize tree """
self.parser = argparse.ArgumentParser(
description="Initialize a new metadata tree")
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
# For each path create an .fmf directory and version file
for path in self.options.paths or ["."]:
root = os.path.abspath(os.path.join(path, ".fmf"))
if os.path.exists(root):
raise utils.FileError("{0} '{1}' already exists.".format(
"Directory" if os.path.isdir(root) else "File", root))
os.makedirs(root)
with open(os.path.join(root, "version"), "w") as version:
version.write("{0}\n".format(utils.VERSION))
print("Metadata tree '{0}' successfully initialized.".format(root))
def show(self, brief=False):
""" Show metadata for each path given """
output = []
for path in self.options.paths or ["."]:
if self.options.verbose:
utils.info("Checking {0} for metadata.".format(path))
tree = fmf.Tree(path)
for node in tree.prune(
self.options.whole, self.options.keys, self.options.names,
self.options.filters):
if brief:
show = node.show(brief=True)
else:
show = node.show(
brief=False,
formatting=self.options.formatting,
values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color("{0}\n".format(source), "blue")
if show is not None:
output.append(show)
# Print output and summary
if brief or self.options.formatting:
joined = "".join(output)
else:
joined = "\n".join(output)
try: # pragma: no cover
print(joined, end="")
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end="")
if self.options.verbose:
utils.info("Found {0}.".format(
utils.listed(len(output), "object")))
self.output = joined
|
psss/fmf | fmf/cli.py | Parser.command_ls | python | def command_ls(self):
self.parser = argparse.ArgumentParser(
description="List names of available objects")
self.options_select()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=True) | List names | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/cli.py#L114-L121 | [
"def options_select(self):\n \"\"\" Select by name, filter \"\"\"\n group = self.parser.add_argument_group(\"Select\")\n group.add_argument(\n \"--key\", dest=\"keys\", action=\"append\", default=[],\n help=\"Key content definition (required attributes)\")\n group.add_argument(\n \"--name\", dest=\"names\", action=\"append\", default=[],\n help=\"List objects with name matching regular expression\")\n group.add_argument(\n \"--filter\", dest=\"filters\", action=\"append\", default=[],\n help=\"Apply advanced filter (see 'pydoc fmf.filter')\")\n group.add_argument(\n \"--whole\", dest=\"whole\", action=\"store_true\",\n help=\"Consider the whole tree (leaves only by default)\")\n",
"def options_utils(self):\n \"\"\" Utilities \"\"\"\n group = self.parser.add_argument_group(\"Utils\")\n group.add_argument(\n \"--path\", action=\"append\", dest=\"paths\",\n help=\"Path to the metadata tree (default: current directory)\")\n group.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Print information about parsed files to stderr\")\n group.add_argument(\n \"--debug\", action=\"store_true\",\n help=\"Turn on debugging output, do not catch exceptions\")\n",
"def show(self, brief=False):\n \"\"\" Show metadata for each path given \"\"\"\n output = []\n for path in self.options.paths or [\".\"]:\n if self.options.verbose:\n utils.info(\"Checking {0} for metadata.\".format(path))\n tree = fmf.Tree(path)\n for node in tree.prune(\n self.options.whole, self.options.keys, self.options.names,\n self.options.filters):\n if brief:\n show = node.show(brief=True)\n else:\n show = node.show(\n brief=False,\n formatting=self.options.formatting,\n values=self.options.values)\n # List source files when in debug mode\n if self.options.debug:\n for source in node.sources:\n show += utils.color(\"{0}\\n\".format(source), \"blue\")\n if show is not None:\n output.append(show)\n\n # Print output and summary\n if brief or self.options.formatting:\n joined = \"\".join(output)\n else:\n joined = \"\\n\".join(output)\n try: # pragma: no cover\n print(joined, end=\"\")\n except UnicodeEncodeError: # pragma: no cover\n print(joined.encode('utf-8'), end=\"\")\n if self.options.verbose:\n utils.info(\"Found {0}.\".format(\n utils.listed(len(output), \"object\")))\n self.output = joined\n"
] | class Parser(object):
""" Command line options parser """
def __init__(self, arguments=None, path=None):
""" Prepare the parser. """
# Change current working directory (used for testing)
if path is not None:
os.chdir(path)
# Split command line if given as a string (used for testing)
if isinstance(arguments, type("")): # pragma: no cover
try:
# This is necessary for Python 2.6
self.arguments = [arg.decode('utf8')
for arg in shlex.split(arguments.encode('utf8'))]
except AttributeError:
self.arguments = shlex.split(arguments)
# Otherwise use sys.argv (plus decode unicode for Python 2)
if arguments is None: # pragma: no cover
try:
self.arguments = [arg.decode("utf-8") for arg in sys.argv]
except AttributeError:
self.arguments = sys.argv
# Enable debugging output if requested
if "--debug" in self.arguments:
utils.log.setLevel(utils.LOG_DEBUG)
# Handle subcommands (mapped to format_* methods)
self.parser = argparse.ArgumentParser(
usage="fmf command [options]\n" + __doc__)
self.parser.add_argument('command', help='Command to run')
self.command = self.parser.parse_args(self.arguments[1:2]).command
if not hasattr(self, "command_" + self.command):
self.parser.print_help()
raise utils.GeneralError(
"Unrecognized command: '{0}'".format(self.command))
# Initialize the rest and run the subcommand
self.output = ""
getattr(self, "command_" + self.command)()
def options_select(self):
""" Select by name, filter """
group = self.parser.add_argument_group("Select")
group.add_argument(
"--key", dest="keys", action="append", default=[],
help="Key content definition (required attributes)")
group.add_argument(
"--name", dest="names", action="append", default=[],
help="List objects with name matching regular expression")
group.add_argument(
"--filter", dest="filters", action="append", default=[],
help="Apply advanced filter (see 'pydoc fmf.filter')")
group.add_argument(
"--whole", dest="whole", action="store_true",
help="Consider the whole tree (leaves only by default)")
def options_formatting(self):
""" Formating options """
group = self.parser.add_argument_group("Format")
group.add_argument(
"--format", dest="formatting", default=None,
help="Custom output format using the {} expansion")
group.add_argument(
"--value", dest="values", action="append", default=[],
help="Values for the custom formatting string")
def options_utils(self):
""" Utilities """
group = self.parser.add_argument_group("Utils")
group.add_argument(
"--path", action="append", dest="paths",
help="Path to the metadata tree (default: current directory)")
group.add_argument(
"--verbose", action="store_true",
help="Print information about parsed files to stderr")
group.add_argument(
"--debug", action="store_true",
help="Turn on debugging output, do not catch exceptions")
def command_show(self):
""" Show metadata """
self.parser = argparse.ArgumentParser(
description="Show metadata of available objects")
self.options_select()
self.options_formatting()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=False)
def command_init(self):
""" Initialize tree """
self.parser = argparse.ArgumentParser(
description="Initialize a new metadata tree")
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
# For each path create an .fmf directory and version file
for path in self.options.paths or ["."]:
root = os.path.abspath(os.path.join(path, ".fmf"))
if os.path.exists(root):
raise utils.FileError("{0} '{1}' already exists.".format(
"Directory" if os.path.isdir(root) else "File", root))
os.makedirs(root)
with open(os.path.join(root, "version"), "w") as version:
version.write("{0}\n".format(utils.VERSION))
print("Metadata tree '{0}' successfully initialized.".format(root))
def show(self, brief=False):
""" Show metadata for each path given """
output = []
for path in self.options.paths or ["."]:
if self.options.verbose:
utils.info("Checking {0} for metadata.".format(path))
tree = fmf.Tree(path)
for node in tree.prune(
self.options.whole, self.options.keys, self.options.names,
self.options.filters):
if brief:
show = node.show(brief=True)
else:
show = node.show(
brief=False,
formatting=self.options.formatting,
values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color("{0}\n".format(source), "blue")
if show is not None:
output.append(show)
# Print output and summary
if brief or self.options.formatting:
joined = "".join(output)
else:
joined = "\n".join(output)
try: # pragma: no cover
print(joined, end="")
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end="")
if self.options.verbose:
utils.info("Found {0}.".format(
utils.listed(len(output), "object")))
self.output = joined
|
psss/fmf | fmf/cli.py | Parser.command_show | python | def command_show(self):
self.parser = argparse.ArgumentParser(
description="Show metadata of available objects")
self.options_select()
self.options_formatting()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=False) | Show metadata | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/cli.py#L123-L131 | [
"def options_select(self):\n \"\"\" Select by name, filter \"\"\"\n group = self.parser.add_argument_group(\"Select\")\n group.add_argument(\n \"--key\", dest=\"keys\", action=\"append\", default=[],\n help=\"Key content definition (required attributes)\")\n group.add_argument(\n \"--name\", dest=\"names\", action=\"append\", default=[],\n help=\"List objects with name matching regular expression\")\n group.add_argument(\n \"--filter\", dest=\"filters\", action=\"append\", default=[],\n help=\"Apply advanced filter (see 'pydoc fmf.filter')\")\n group.add_argument(\n \"--whole\", dest=\"whole\", action=\"store_true\",\n help=\"Consider the whole tree (leaves only by default)\")\n",
"def options_formatting(self):\n \"\"\" Formating options \"\"\"\n group = self.parser.add_argument_group(\"Format\")\n group.add_argument(\n \"--format\", dest=\"formatting\", default=None,\n help=\"Custom output format using the {} expansion\")\n group.add_argument(\n \"--value\", dest=\"values\", action=\"append\", default=[],\n help=\"Values for the custom formatting string\")\n",
"def options_utils(self):\n \"\"\" Utilities \"\"\"\n group = self.parser.add_argument_group(\"Utils\")\n group.add_argument(\n \"--path\", action=\"append\", dest=\"paths\",\n help=\"Path to the metadata tree (default: current directory)\")\n group.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Print information about parsed files to stderr\")\n group.add_argument(\n \"--debug\", action=\"store_true\",\n help=\"Turn on debugging output, do not catch exceptions\")\n",
"def show(self, brief=False):\n \"\"\" Show metadata for each path given \"\"\"\n output = []\n for path in self.options.paths or [\".\"]:\n if self.options.verbose:\n utils.info(\"Checking {0} for metadata.\".format(path))\n tree = fmf.Tree(path)\n for node in tree.prune(\n self.options.whole, self.options.keys, self.options.names,\n self.options.filters):\n if brief:\n show = node.show(brief=True)\n else:\n show = node.show(\n brief=False,\n formatting=self.options.formatting,\n values=self.options.values)\n # List source files when in debug mode\n if self.options.debug:\n for source in node.sources:\n show += utils.color(\"{0}\\n\".format(source), \"blue\")\n if show is not None:\n output.append(show)\n\n # Print output and summary\n if brief or self.options.formatting:\n joined = \"\".join(output)\n else:\n joined = \"\\n\".join(output)\n try: # pragma: no cover\n print(joined, end=\"\")\n except UnicodeEncodeError: # pragma: no cover\n print(joined.encode('utf-8'), end=\"\")\n if self.options.verbose:\n utils.info(\"Found {0}.\".format(\n utils.listed(len(output), \"object\")))\n self.output = joined\n"
] | class Parser(object):
""" Command line options parser """
def __init__(self, arguments=None, path=None):
""" Prepare the parser. """
# Change current working directory (used for testing)
if path is not None:
os.chdir(path)
# Split command line if given as a string (used for testing)
if isinstance(arguments, type("")): # pragma: no cover
try:
# This is necessary for Python 2.6
self.arguments = [arg.decode('utf8')
for arg in shlex.split(arguments.encode('utf8'))]
except AttributeError:
self.arguments = shlex.split(arguments)
# Otherwise use sys.argv (plus decode unicode for Python 2)
if arguments is None: # pragma: no cover
try:
self.arguments = [arg.decode("utf-8") for arg in sys.argv]
except AttributeError:
self.arguments = sys.argv
# Enable debugging output if requested
if "--debug" in self.arguments:
utils.log.setLevel(utils.LOG_DEBUG)
# Handle subcommands (mapped to format_* methods)
self.parser = argparse.ArgumentParser(
usage="fmf command [options]\n" + __doc__)
self.parser.add_argument('command', help='Command to run')
self.command = self.parser.parse_args(self.arguments[1:2]).command
if not hasattr(self, "command_" + self.command):
self.parser.print_help()
raise utils.GeneralError(
"Unrecognized command: '{0}'".format(self.command))
# Initialize the rest and run the subcommand
self.output = ""
getattr(self, "command_" + self.command)()
def options_select(self):
""" Select by name, filter """
group = self.parser.add_argument_group("Select")
group.add_argument(
"--key", dest="keys", action="append", default=[],
help="Key content definition (required attributes)")
group.add_argument(
"--name", dest="names", action="append", default=[],
help="List objects with name matching regular expression")
group.add_argument(
"--filter", dest="filters", action="append", default=[],
help="Apply advanced filter (see 'pydoc fmf.filter')")
group.add_argument(
"--whole", dest="whole", action="store_true",
help="Consider the whole tree (leaves only by default)")
def options_formatting(self):
""" Formating options """
group = self.parser.add_argument_group("Format")
group.add_argument(
"--format", dest="formatting", default=None,
help="Custom output format using the {} expansion")
group.add_argument(
"--value", dest="values", action="append", default=[],
help="Values for the custom formatting string")
def options_utils(self):
""" Utilities """
group = self.parser.add_argument_group("Utils")
group.add_argument(
"--path", action="append", dest="paths",
help="Path to the metadata tree (default: current directory)")
group.add_argument(
"--verbose", action="store_true",
help="Print information about parsed files to stderr")
group.add_argument(
"--debug", action="store_true",
help="Turn on debugging output, do not catch exceptions")
def command_ls(self):
""" List names """
self.parser = argparse.ArgumentParser(
description="List names of available objects")
self.options_select()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=True)
def command_init(self):
""" Initialize tree """
self.parser = argparse.ArgumentParser(
description="Initialize a new metadata tree")
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
# For each path create an .fmf directory and version file
for path in self.options.paths or ["."]:
root = os.path.abspath(os.path.join(path, ".fmf"))
if os.path.exists(root):
raise utils.FileError("{0} '{1}' already exists.".format(
"Directory" if os.path.isdir(root) else "File", root))
os.makedirs(root)
with open(os.path.join(root, "version"), "w") as version:
version.write("{0}\n".format(utils.VERSION))
print("Metadata tree '{0}' successfully initialized.".format(root))
def show(self, brief=False):
""" Show metadata for each path given """
output = []
for path in self.options.paths or ["."]:
if self.options.verbose:
utils.info("Checking {0} for metadata.".format(path))
tree = fmf.Tree(path)
for node in tree.prune(
self.options.whole, self.options.keys, self.options.names,
self.options.filters):
if brief:
show = node.show(brief=True)
else:
show = node.show(
brief=False,
formatting=self.options.formatting,
values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color("{0}\n".format(source), "blue")
if show is not None:
output.append(show)
# Print output and summary
if brief or self.options.formatting:
joined = "".join(output)
else:
joined = "\n".join(output)
try: # pragma: no cover
print(joined, end="")
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end="")
if self.options.verbose:
utils.info("Found {0}.".format(
utils.listed(len(output), "object")))
self.output = joined
|
psss/fmf | fmf/cli.py | Parser.command_init | python | def command_init(self):
self.parser = argparse.ArgumentParser(
description="Initialize a new metadata tree")
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
# For each path create an .fmf directory and version file
for path in self.options.paths or ["."]:
root = os.path.abspath(os.path.join(path, ".fmf"))
if os.path.exists(root):
raise utils.FileError("{0} '{1}' already exists.".format(
"Directory" if os.path.isdir(root) else "File", root))
os.makedirs(root)
with open(os.path.join(root, "version"), "w") as version:
version.write("{0}\n".format(utils.VERSION))
print("Metadata tree '{0}' successfully initialized.".format(root)) | Initialize tree | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/cli.py#L133-L148 | [
"def options_utils(self):\n \"\"\" Utilities \"\"\"\n group = self.parser.add_argument_group(\"Utils\")\n group.add_argument(\n \"--path\", action=\"append\", dest=\"paths\",\n help=\"Path to the metadata tree (default: current directory)\")\n group.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Print information about parsed files to stderr\")\n group.add_argument(\n \"--debug\", action=\"store_true\",\n help=\"Turn on debugging output, do not catch exceptions\")\n"
] | class Parser(object):
""" Command line options parser """
def __init__(self, arguments=None, path=None):
""" Prepare the parser. """
# Change current working directory (used for testing)
if path is not None:
os.chdir(path)
# Split command line if given as a string (used for testing)
if isinstance(arguments, type("")): # pragma: no cover
try:
# This is necessary for Python 2.6
self.arguments = [arg.decode('utf8')
for arg in shlex.split(arguments.encode('utf8'))]
except AttributeError:
self.arguments = shlex.split(arguments)
# Otherwise use sys.argv (plus decode unicode for Python 2)
if arguments is None: # pragma: no cover
try:
self.arguments = [arg.decode("utf-8") for arg in sys.argv]
except AttributeError:
self.arguments = sys.argv
# Enable debugging output if requested
if "--debug" in self.arguments:
utils.log.setLevel(utils.LOG_DEBUG)
# Handle subcommands (mapped to format_* methods)
self.parser = argparse.ArgumentParser(
usage="fmf command [options]\n" + __doc__)
self.parser.add_argument('command', help='Command to run')
self.command = self.parser.parse_args(self.arguments[1:2]).command
if not hasattr(self, "command_" + self.command):
self.parser.print_help()
raise utils.GeneralError(
"Unrecognized command: '{0}'".format(self.command))
# Initialize the rest and run the subcommand
self.output = ""
getattr(self, "command_" + self.command)()
def options_select(self):
""" Select by name, filter """
group = self.parser.add_argument_group("Select")
group.add_argument(
"--key", dest="keys", action="append", default=[],
help="Key content definition (required attributes)")
group.add_argument(
"--name", dest="names", action="append", default=[],
help="List objects with name matching regular expression")
group.add_argument(
"--filter", dest="filters", action="append", default=[],
help="Apply advanced filter (see 'pydoc fmf.filter')")
group.add_argument(
"--whole", dest="whole", action="store_true",
help="Consider the whole tree (leaves only by default)")
def options_formatting(self):
""" Formating options """
group = self.parser.add_argument_group("Format")
group.add_argument(
"--format", dest="formatting", default=None,
help="Custom output format using the {} expansion")
group.add_argument(
"--value", dest="values", action="append", default=[],
help="Values for the custom formatting string")
def options_utils(self):
""" Utilities """
group = self.parser.add_argument_group("Utils")
group.add_argument(
"--path", action="append", dest="paths",
help="Path to the metadata tree (default: current directory)")
group.add_argument(
"--verbose", action="store_true",
help="Print information about parsed files to stderr")
group.add_argument(
"--debug", action="store_true",
help="Turn on debugging output, do not catch exceptions")
def command_ls(self):
""" List names """
self.parser = argparse.ArgumentParser(
description="List names of available objects")
self.options_select()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=True)
def command_show(self):
""" Show metadata """
self.parser = argparse.ArgumentParser(
description="Show metadata of available objects")
self.options_select()
self.options_formatting()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=False)
def show(self, brief=False):
""" Show metadata for each path given """
output = []
for path in self.options.paths or ["."]:
if self.options.verbose:
utils.info("Checking {0} for metadata.".format(path))
tree = fmf.Tree(path)
for node in tree.prune(
self.options.whole, self.options.keys, self.options.names,
self.options.filters):
if brief:
show = node.show(brief=True)
else:
show = node.show(
brief=False,
formatting=self.options.formatting,
values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color("{0}\n".format(source), "blue")
if show is not None:
output.append(show)
# Print output and summary
if brief or self.options.formatting:
joined = "".join(output)
else:
joined = "\n".join(output)
try: # pragma: no cover
print(joined, end="")
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end="")
if self.options.verbose:
utils.info("Found {0}.".format(
utils.listed(len(output), "object")))
self.output = joined
|
psss/fmf | fmf/cli.py | Parser.show | python | def show(self, brief=False):
output = []
for path in self.options.paths or ["."]:
if self.options.verbose:
utils.info("Checking {0} for metadata.".format(path))
tree = fmf.Tree(path)
for node in tree.prune(
self.options.whole, self.options.keys, self.options.names,
self.options.filters):
if brief:
show = node.show(brief=True)
else:
show = node.show(
brief=False,
formatting=self.options.formatting,
values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color("{0}\n".format(source), "blue")
if show is not None:
output.append(show)
# Print output and summary
if brief or self.options.formatting:
joined = "".join(output)
else:
joined = "\n".join(output)
try: # pragma: no cover
print(joined, end="")
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end="")
if self.options.verbose:
utils.info("Found {0}.".format(
utils.listed(len(output), "object")))
self.output = joined | Show metadata for each path given | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/cli.py#L150-L186 | [
"def info(message, newline=True):\n \"\"\" Log provided info message to the standard error output \"\"\"\n sys.stderr.write(message + (\"\\n\" if newline else \"\"))\n",
"def color(text, color=None, background=None, light=False, enabled=\"auto\"):\n \"\"\"\n Return text in desired color if coloring enabled\n\n Available colors: black red green yellow blue magenta cyan white.\n Alternatively color can be prefixed with \"light\", e.g. lightgreen.\n \"\"\"\n colors = {\"black\": 30, \"red\": 31, \"green\": 32, \"yellow\": 33,\n \"blue\": 34, \"magenta\": 35, \"cyan\": 36, \"white\": 37}\n # Nothing do do if coloring disabled\n if enabled == \"auto\":\n enabled = Coloring().enabled()\n if not enabled:\n return text\n # Prepare colors (strip 'light' if present in color)\n if color and color.startswith(\"light\"):\n light = True\n color = color[5:]\n color = color and \";{0}\".format(colors[color]) or \"\"\n background = background and \";{0}\".format(colors[background] + 10) or \"\"\n light = light and 1 or 0\n # Starting and finishing sequence\n start = \"\\033[{0}{1}{2}m\".format(light, color, background)\n finish = \"\\033[1;m\"\n return \"\".join([start, text, finish])\n",
"def listed(items, singular=None, plural=None, max=None, quote=\"\"):\n \"\"\"\n Convert an iterable into a nice, human readable list or description::\n\n listed(range(1)) .................... 0\n listed(range(2)) .................... 0 and 1\n listed(range(3), quote='\"') ......... \"0\", \"1\" and \"2\"\n listed(range(4), max=3) ............. 0, 1, 2 and 1 more\n listed(range(5), 'number', max=3) ... 0, 1, 2 and 2 more numbers\n listed(range(6), 'category') ........ 6 categories\n listed(7, \"leaf\", \"leaves\") ......... 7 leaves\n\n If singular form is provided but max not set the description-only\n mode is activated as shown in the last two examples. Also, an int\n can be used in this case to get a simple inflection functionality.\n \"\"\"\n\n # Convert items to list if necessary\n items = range(items) if isinstance(items, int) else list(items)\n more = \" more\"\n # Description mode expected when singular provided but no maximum set\n if singular is not None and max is None:\n max = 0\n more = \"\"\n # Set the default plural form\n if singular is not None and plural is None:\n plural = pluralize(singular)\n # Convert to strings and optionally quote each item\n items = [\"{0}{1}{0}\".format(quote, item) for item in items]\n\n # Select the maximum of items and describe the rest if max provided\n if max is not None:\n # Special case when the list is empty (0 items)\n if max == 0 and len(items) == 0:\n return \"0 {0}\".format(plural)\n # Cut the list if maximum exceeded\n if len(items) > max:\n rest = len(items[max:])\n items = items[:max]\n if singular is not None:\n more += \" {0}\".format(singular if rest == 1 else plural)\n items.append(\"{0}{1}\".format(rest, more))\n\n # For two and more items use 'and' instead of the last comma\n if len(items) < 2:\n return \"\".join(items)\n else:\n return \", \".join(items[0:-2] + [\" and \".join(items[-2:])])\n",
"def prune(self, whole=False, keys=[], names=[], filters=[]):\n \"\"\" Filter tree nodes based on given criteria \"\"\"\n for node in self.climb(whole):\n # Select only nodes with key content\n if not all([key in node.data for key in keys]):\n continue\n # Select nodes with name matching regular expression\n if names and not any(\n [re.search(name, node.name) for name in names]):\n continue\n # Apply advanced filters if given\n try:\n if not all([utils.filter(filter, node.data, regexp=True)\n for filter in filters]):\n continue\n # Handle missing attribute as if filter failed\n except utils.FilterError:\n continue\n # All criteria met, thus yield the node\n yield node\n"
] | class Parser(object):
""" Command line options parser """
def __init__(self, arguments=None, path=None):
""" Prepare the parser. """
# Change current working directory (used for testing)
if path is not None:
os.chdir(path)
# Split command line if given as a string (used for testing)
if isinstance(arguments, type("")): # pragma: no cover
try:
# This is necessary for Python 2.6
self.arguments = [arg.decode('utf8')
for arg in shlex.split(arguments.encode('utf8'))]
except AttributeError:
self.arguments = shlex.split(arguments)
# Otherwise use sys.argv (plus decode unicode for Python 2)
if arguments is None: # pragma: no cover
try:
self.arguments = [arg.decode("utf-8") for arg in sys.argv]
except AttributeError:
self.arguments = sys.argv
# Enable debugging output if requested
if "--debug" in self.arguments:
utils.log.setLevel(utils.LOG_DEBUG)
# Handle subcommands (mapped to format_* methods)
self.parser = argparse.ArgumentParser(
usage="fmf command [options]\n" + __doc__)
self.parser.add_argument('command', help='Command to run')
self.command = self.parser.parse_args(self.arguments[1:2]).command
if not hasattr(self, "command_" + self.command):
self.parser.print_help()
raise utils.GeneralError(
"Unrecognized command: '{0}'".format(self.command))
# Initialize the rest and run the subcommand
self.output = ""
getattr(self, "command_" + self.command)()
def options_select(self):
""" Select by name, filter """
group = self.parser.add_argument_group("Select")
group.add_argument(
"--key", dest="keys", action="append", default=[],
help="Key content definition (required attributes)")
group.add_argument(
"--name", dest="names", action="append", default=[],
help="List objects with name matching regular expression")
group.add_argument(
"--filter", dest="filters", action="append", default=[],
help="Apply advanced filter (see 'pydoc fmf.filter')")
group.add_argument(
"--whole", dest="whole", action="store_true",
help="Consider the whole tree (leaves only by default)")
def options_formatting(self):
""" Formating options """
group = self.parser.add_argument_group("Format")
group.add_argument(
"--format", dest="formatting", default=None,
help="Custom output format using the {} expansion")
group.add_argument(
"--value", dest="values", action="append", default=[],
help="Values for the custom formatting string")
def options_utils(self):
""" Utilities """
group = self.parser.add_argument_group("Utils")
group.add_argument(
"--path", action="append", dest="paths",
help="Path to the metadata tree (default: current directory)")
group.add_argument(
"--verbose", action="store_true",
help="Print information about parsed files to stderr")
group.add_argument(
"--debug", action="store_true",
help="Turn on debugging output, do not catch exceptions")
def command_ls(self):
""" List names """
self.parser = argparse.ArgumentParser(
description="List names of available objects")
self.options_select()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=True)
def command_show(self):
""" Show metadata """
self.parser = argparse.ArgumentParser(
description="Show metadata of available objects")
self.options_select()
self.options_formatting()
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
self.show(brief=False)
def command_init(self):
""" Initialize tree """
self.parser = argparse.ArgumentParser(
description="Initialize a new metadata tree")
self.options_utils()
self.options = self.parser.parse_args(self.arguments[2:])
# For each path create an .fmf directory and version file
for path in self.options.paths or ["."]:
root = os.path.abspath(os.path.join(path, ".fmf"))
if os.path.exists(root):
raise utils.FileError("{0} '{1}' already exists.".format(
"Directory" if os.path.isdir(root) else "File", root))
os.makedirs(root)
with open(os.path.join(root, "version"), "w") as version:
version.write("{0}\n".format(utils.VERSION))
print("Metadata tree '{0}' successfully initialized.".format(root))
|
psss/fmf | fmf/utils.py | filter | python | def filter(filter, data, sensitive=True, regexp=False):
def match_value(pattern, text):
""" Match value against data (simple or regexp) """
if regexp:
return re.match("^{0}$".format(pattern), text)
else:
return pattern == text
def check_value(dimension, value):
""" Check whether the value matches data """
# E.g. value = 'A, B' or value = "C" or value = "-D"
# If there are multiple values, at least one must match
for atom in re.split("\s*,\s*", value):
# Handle negative values (check the whole data for non-presence)
if atom.startswith("-"):
atom = atom[1:]
# Check each value for given dimension
for dato in data[dimension]:
if match_value(atom, dato):
break
# Pattern not found ---> good
else:
return True
# Handle positive values (return True upon first successful match)
else:
# Check each value for given dimension
for dato in data[dimension]:
if match_value(atom, dato):
# Pattern found ---> good
return True
# No value matched the data
return False
def check_dimension(dimension, values):
""" Check whether all values for given dimension match data """
# E.g. dimension = 'tag', values = ['A, B', 'C', '-D']
# Raise exception upon unknown dimension
if dimension not in data:
raise FilterError("Invalid filter '{0}'".format(dimension))
# Every value must match at least one value for data
return all([check_value(dimension, value) for value in values])
def check_clause(clause):
""" Split into literals and check whether all match """
# E.g. clause = 'tag: A, B & tag: C & tag: -D'
# Split into individual literals by dimension
literals = dict()
for literal in re.split("\s*&\s*", clause):
# E.g. literal = 'tag: A, B'
# Make sure the literal matches dimension:value format
matched = re.match("^(.*)\s*:\s*(.*)$", literal)
if not matched:
raise FilterError("Invalid filter '{0}'".format(literal))
dimension, value = matched.groups()
values = [value]
# Append the literal value(s) to corresponding dimension list
literals.setdefault(dimension, []).extend(values)
# For each dimension all literals must match given data
return all([check_dimension(dimension, values)
for dimension, values in literals.items()])
# Default to True if no filter given, bail out if weird data given
if filter is None or filter == "": return True
if not isinstance(data, dict):
raise FilterError("Invalid data type '{0}'".format(type(data)))
# Make sure that data dictionary contains lists of strings
data = copy.deepcopy(data)
try: # pragma: no cover
for key in data:
if isinstance(data[key], list):
data[key] = [unicode(item) for item in data[key]]
else:
data[key] = [unicode(data[key])]
except NameError: # pragma: no cover
for key in data:
if isinstance(data[key], list):
data[key] = [str(item) for item in data[key]]
else:
data[key] = [str(data[key])]
# Turn all data into lowercase if sensitivity is off
if not sensitive:
filter = filter.lower()
lowered = dict()
for key, values in data.items():
lowered[key.lower()] = [value.lower() for value in values]
data = lowered
# At least one clause must be true
return any([check_clause(clause)
for clause in re.split("\s*\|\s*", filter)]) | Return true if provided filter matches given dictionary of values
Filter supports disjunctive normal form with '|' used for OR, '&'
for AND and '-' for negation. Individual values are prefixed with
'value:', leading/trailing white-space is stripped. For example::
tag: Tier1 | tag: Tier2 | tag: Tier3
category: Sanity, Security & tag: -destructive
Note that multiple comma-separated values can be used as a syntactic
sugar to shorten the filter notation::
tag: A, B, C ---> tag: A | tag: B | tag: C
Values should be provided as a dictionary of lists each describing
the values against which the filter is to be matched. For example::
data = {tag: ["Tier1", "TIPpass"], category: ["Sanity"]}
Other types of dictionary values are converted into a string.
A FilterError exception is raised when a dimension parsed from the
filter is not found in the data dictionary. Set option 'sensitive'
to False to enable case-insensitive matching. If 'regexp' option is
True, regular expressions can be used in the filter values as well. | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/utils.py#L150-L267 | null | # coding: utf-8
""" Logging, config, constants & utilities """
from __future__ import unicode_literals, absolute_import
import os
import re
import sys
import copy
import logging
from pprint import pformat as pretty
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Constants
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Coloring
COLOR_ON = 1
COLOR_OFF = 0
COLOR_AUTO = 2
# Logging
LOG_ERROR = logging.ERROR
LOG_WARN = logging.WARN
LOG_INFO = logging.INFO
LOG_DEBUG = logging.DEBUG
LOG_CACHE = 7
LOG_DATA = 4
LOG_ALL = 1
# Current metadata format version
VERSION = 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Exceptions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class GeneralError(Exception):
""" General error """
class FormatError(GeneralError):
""" Metadata format error """
class FileError(GeneralError):
""" File reading error """
class RootError(FileError):
""" Metadata tree root missing """
class FilterError(GeneralError):
""" Missing data when filtering """
class MergeError(GeneralError):
""" Unable to merge data between parent and child """
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Utils
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def pluralize(singular=None):
""" Naively pluralize words """
if singular.endswith("y") and not singular.endswith("ay"):
plural = singular[:-1] + "ies"
elif singular.endswith("s"):
plural = singular + "es"
else:
plural = singular + "s"
return plural
def listed(items, singular=None, plural=None, max=None, quote=""):
"""
Convert an iterable into a nice, human readable list or description::
listed(range(1)) .................... 0
listed(range(2)) .................... 0 and 1
listed(range(3), quote='"') ......... "0", "1" and "2"
listed(range(4), max=3) ............. 0, 1, 2 and 1 more
listed(range(5), 'number', max=3) ... 0, 1, 2 and 2 more numbers
listed(range(6), 'category') ........ 6 categories
listed(7, "leaf", "leaves") ......... 7 leaves
If singular form is provided but max not set the description-only
mode is activated as shown in the last two examples. Also, an int
can be used in this case to get a simple inflection functionality.
"""
# Convert items to list if necessary
items = range(items) if isinstance(items, int) else list(items)
more = " more"
# Description mode expected when singular provided but no maximum set
if singular is not None and max is None:
max = 0
more = ""
# Set the default plural form
if singular is not None and plural is None:
plural = pluralize(singular)
# Convert to strings and optionally quote each item
items = ["{0}{1}{0}".format(quote, item) for item in items]
# Select the maximum of items and describe the rest if max provided
if max is not None:
# Special case when the list is empty (0 items)
if max == 0 and len(items) == 0:
return "0 {0}".format(plural)
# Cut the list if maximum exceeded
if len(items) > max:
rest = len(items[max:])
items = items[:max]
if singular is not None:
more += " {0}".format(singular if rest == 1 else plural)
items.append("{0}{1}".format(rest, more))
# For two and more items use 'and' instead of the last comma
if len(items) < 2:
return "".join(items)
else:
return ", ".join(items[0:-2] + [" and ".join(items[-2:])])
def split(values, separator=re.compile("[ ,]+")):
"""
Convert space-or-comma-separated values into a single list
Common use case for this is merging content of options with multiple
values allowed into a single list of strings thus allowing any of
the formats below and converts them into ['a', 'b', 'c']::
--option a --option b --option c ... ['a', 'b', 'c']
--option a,b --option c ............ ['a,b', 'c']
--option 'a b c' ................... ['a b c']
Accepts both string and list. By default space and comma are used as
value separators. Use any regular expression for custom separator.
"""
if not isinstance(values, list):
values = [values]
return sum([separator.split(value) for value in values], [])
def info(message, newline=True):
""" Log provided info message to the standard error output """
sys.stderr.write(message + ("\n" if newline else ""))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Filtering
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Logging
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Logging(object):
""" Logging Configuration """
# Color mapping
COLORS = {
LOG_ERROR: "red",
LOG_WARN: "yellow",
LOG_INFO: "blue",
LOG_DEBUG: "green",
LOG_CACHE: "cyan",
LOG_DATA: "magenta",
}
# Environment variable mapping
MAPPING = {
0: LOG_WARN,
1: LOG_INFO,
2: LOG_DEBUG,
3: LOG_CACHE,
4: LOG_DATA,
5: LOG_ALL,
}
# All levels
LEVELS = "CRITICAL DEBUG ERROR FATAL INFO NOTSET WARN WARNING".split()
# Default log level is WARN
_level = LOG_WARN
# Already initialized loggers by their name
_loggers = dict()
def __init__(self, name='fmf'):
# Use existing logger if already initialized
try:
self.logger = Logging._loggers[name]
# Otherwise create a new one, save it and set it
except KeyError:
self.logger = self._create_logger(name=name)
Logging._loggers[name] = self.logger
self.set()
class ColoredFormatter(logging.Formatter):
""" Custom color formatter for logging """
def format(self, record):
# Handle custom log level names
if record.levelno == LOG_ALL:
levelname = "ALL"
elif record.levelno == LOG_DATA:
levelname = "DATA"
elif record.levelno == LOG_CACHE:
levelname = "CACHE"
else:
levelname = record.levelname
# Map log level to appropriate color
try:
colour = Logging.COLORS[record.levelno]
except KeyError:
colour = "black"
# Color the log level, use brackets when coloring off
if Coloring().enabled():
level = color(" " + levelname + " ", "lightwhite", colour)
else:
level = "[{0}]".format(levelname)
return u"{0} {1}".format(level, record.getMessage())
@staticmethod
def _create_logger(name='fmf', level=None):
""" Create fmf logger """
# Create logger, handler and formatter
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(Logging.ColoredFormatter())
logger.addHandler(handler)
# Save log levels in the logger itself (backward compatibility)
for level in Logging.LEVELS:
setattr(logger, level, getattr(logging, level))
# Additional logging constants and methods for cache and xmlrpc
logger.DATA = LOG_DATA
logger.CACHE = LOG_CACHE
logger.ALL = LOG_ALL
logger.cache = lambda message: logger.log(LOG_CACHE, message) # NOQA
logger.data = lambda message: logger.log(LOG_DATA, message) # NOQA
logger.all = lambda message: logger.log(LOG_ALL, message) # NOQA
return logger
def set(self, level=None):
"""
Set the default log level
If the level is not specified environment variable DEBUG is used
with the following meaning::
DEBUG=0 ... LOG_WARN (default)
DEBUG=1 ... LOG_INFO
DEBUG=2 ... LOG_DEBUG
DEBUG=3 ... LOG_CACHE
DEBUG=4 ... LOG_DATA
DEBUG=5 ... LOG_ALL (log all messages)
"""
# If level specified, use given
if level is not None:
Logging._level = level
# Otherwise attempt to detect from the environment
else:
try:
Logging._level = Logging.MAPPING[int(os.environ["DEBUG"])]
except Exception:
Logging._level = logging.WARN
self.logger.setLevel(Logging._level)
def get(self):
""" Get the current log level """
return self.logger.level
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Coloring
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def color(text, color=None, background=None, light=False, enabled="auto"):
"""
Return text in desired color if coloring enabled
Available colors: black red green yellow blue magenta cyan white.
Alternatively color can be prefixed with "light", e.g. lightgreen.
"""
colors = {"black": 30, "red": 31, "green": 32, "yellow": 33,
"blue": 34, "magenta": 35, "cyan": 36, "white": 37}
# Nothing do do if coloring disabled
if enabled == "auto":
enabled = Coloring().enabled()
if not enabled:
return text
# Prepare colors (strip 'light' if present in color)
if color and color.startswith("light"):
light = True
color = color[5:]
color = color and ";{0}".format(colors[color]) or ""
background = background and ";{0}".format(colors[background] + 10) or ""
light = light and 1 or 0
# Starting and finishing sequence
start = "\033[{0}{1}{2}m".format(light, color, background)
finish = "\033[1;m"
return "".join([start, text, finish])
class Coloring(object):
""" Coloring configuration """
# Default color mode is auto-detected from the terminal presence
_mode = None
MODES = ["COLOR_OFF", "COLOR_ON", "COLOR_AUTO"]
# We need only a single config instance
_instance = None
def __new__(cls, *args, **kwargs):
""" Make sure we create a single instance only """
if not cls._instance:
cls._instance = super(Coloring, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, mode=None):
""" Initialize the coloring mode """
# Nothing to do if already initialized
if self._mode is not None:
return
# Set the mode
self.set(mode)
def set(self, mode=None):
"""
Set the coloring mode
If enabled, some objects (like case run Status) are printed in color
to easily spot failures, errors and so on. By default the feature is
enabled when script is attached to a terminal. Possible values are::
COLOR=0 ... COLOR_OFF .... coloring disabled
COLOR=1 ... COLOR_ON ..... coloring enabled
COLOR=2 ... COLOR_AUTO ... if terminal attached (default)
Environment variable COLOR can be used to set up the coloring to the
desired mode without modifying code.
"""
# Detect from the environment if no mode given (only once)
if mode is None:
# Nothing to do if already detected
if self._mode is not None:
return
# Detect from the environment variable COLOR
try:
mode = int(os.environ["COLOR"])
except Exception:
mode = COLOR_AUTO
elif mode < 0 or mode > 2:
raise RuntimeError("Invalid color mode '{0}'".format(mode))
self._mode = mode
log.debug(
"Coloring {0} ({1})".format(
"enabled" if self.enabled() else "disabled",
self.MODES[self._mode]))
def get(self):
""" Get the current color mode """
return self._mode
def enabled(self):
""" True if coloring is currently enabled """
# In auto-detection mode color enabled when terminal attached
if self._mode == COLOR_AUTO:
return sys.stdout.isatty()
return self._mode == COLOR_ON
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Default Logger
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create the default output logger
log = Logging('fmf').logger
|
psss/fmf | fmf/utils.py | color | python | def color(text, color=None, background=None, light=False, enabled="auto"):
colors = {"black": 30, "red": 31, "green": 32, "yellow": 33,
"blue": 34, "magenta": 35, "cyan": 36, "white": 37}
# Nothing do do if coloring disabled
if enabled == "auto":
enabled = Coloring().enabled()
if not enabled:
return text
# Prepare colors (strip 'light' if present in color)
if color and color.startswith("light"):
light = True
color = color[5:]
color = color and ";{0}".format(colors[color]) or ""
background = background and ";{0}".format(colors[background] + 10) or ""
light = light and 1 or 0
# Starting and finishing sequence
start = "\033[{0}{1}{2}m".format(light, color, background)
finish = "\033[1;m"
return "".join([start, text, finish]) | Return text in desired color if coloring enabled
Available colors: black red green yellow blue magenta cyan white.
Alternatively color can be prefixed with "light", e.g. lightgreen. | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/utils.py#L391-L415 | null | # coding: utf-8
""" Logging, config, constants & utilities """
from __future__ import unicode_literals, absolute_import
import os
import re
import sys
import copy
import logging
from pprint import pformat as pretty
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Constants
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Coloring
COLOR_ON = 1
COLOR_OFF = 0
COLOR_AUTO = 2
# Logging
LOG_ERROR = logging.ERROR
LOG_WARN = logging.WARN
LOG_INFO = logging.INFO
LOG_DEBUG = logging.DEBUG
LOG_CACHE = 7
LOG_DATA = 4
LOG_ALL = 1
# Current metadata format version
VERSION = 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Exceptions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class GeneralError(Exception):
""" General error """
class FormatError(GeneralError):
""" Metadata format error """
class FileError(GeneralError):
""" File reading error """
class RootError(FileError):
""" Metadata tree root missing """
class FilterError(GeneralError):
""" Missing data when filtering """
class MergeError(GeneralError):
""" Unable to merge data between parent and child """
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Utils
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def pluralize(singular=None):
""" Naively pluralize words """
if singular.endswith("y") and not singular.endswith("ay"):
plural = singular[:-1] + "ies"
elif singular.endswith("s"):
plural = singular + "es"
else:
plural = singular + "s"
return plural
def listed(items, singular=None, plural=None, max=None, quote=""):
"""
Convert an iterable into a nice, human readable list or description::
listed(range(1)) .................... 0
listed(range(2)) .................... 0 and 1
listed(range(3), quote='"') ......... "0", "1" and "2"
listed(range(4), max=3) ............. 0, 1, 2 and 1 more
listed(range(5), 'number', max=3) ... 0, 1, 2 and 2 more numbers
listed(range(6), 'category') ........ 6 categories
listed(7, "leaf", "leaves") ......... 7 leaves
If singular form is provided but max not set the description-only
mode is activated as shown in the last two examples. Also, an int
can be used in this case to get a simple inflection functionality.
"""
# Convert items to list if necessary
items = range(items) if isinstance(items, int) else list(items)
more = " more"
# Description mode expected when singular provided but no maximum set
if singular is not None and max is None:
max = 0
more = ""
# Set the default plural form
if singular is not None and plural is None:
plural = pluralize(singular)
# Convert to strings and optionally quote each item
items = ["{0}{1}{0}".format(quote, item) for item in items]
# Select the maximum of items and describe the rest if max provided
if max is not None:
# Special case when the list is empty (0 items)
if max == 0 and len(items) == 0:
return "0 {0}".format(plural)
# Cut the list if maximum exceeded
if len(items) > max:
rest = len(items[max:])
items = items[:max]
if singular is not None:
more += " {0}".format(singular if rest == 1 else plural)
items.append("{0}{1}".format(rest, more))
# For two and more items use 'and' instead of the last comma
if len(items) < 2:
return "".join(items)
else:
return ", ".join(items[0:-2] + [" and ".join(items[-2:])])
def split(values, separator=re.compile("[ ,]+")):
"""
Convert space-or-comma-separated values into a single list
Common use case for this is merging content of options with multiple
values allowed into a single list of strings thus allowing any of
the formats below and converts them into ['a', 'b', 'c']::
--option a --option b --option c ... ['a', 'b', 'c']
--option a,b --option c ............ ['a,b', 'c']
--option 'a b c' ................... ['a b c']
Accepts both string and list. By default space and comma are used as
value separators. Use any regular expression for custom separator.
"""
if not isinstance(values, list):
values = [values]
return sum([separator.split(value) for value in values], [])
def info(message, newline=True):
""" Log provided info message to the standard error output """
sys.stderr.write(message + ("\n" if newline else ""))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Filtering
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def filter(filter, data, sensitive=True, regexp=False):
"""
Return true if provided filter matches given dictionary of values
Filter supports disjunctive normal form with '|' used for OR, '&'
for AND and '-' for negation. Individual values are prefixed with
'value:', leading/trailing white-space is stripped. For example::
tag: Tier1 | tag: Tier2 | tag: Tier3
category: Sanity, Security & tag: -destructive
Note that multiple comma-separated values can be used as a syntactic
sugar to shorten the filter notation::
tag: A, B, C ---> tag: A | tag: B | tag: C
Values should be provided as a dictionary of lists each describing
the values against which the filter is to be matched. For example::
data = {tag: ["Tier1", "TIPpass"], category: ["Sanity"]}
Other types of dictionary values are converted into a string.
A FilterError exception is raised when a dimension parsed from the
filter is not found in the data dictionary. Set option 'sensitive'
to False to enable case-insensitive matching. If 'regexp' option is
True, regular expressions can be used in the filter values as well.
"""
def match_value(pattern, text):
""" Match value against data (simple or regexp) """
if regexp:
return re.match("^{0}$".format(pattern), text)
else:
return pattern == text
def check_value(dimension, value):
""" Check whether the value matches data """
# E.g. value = 'A, B' or value = "C" or value = "-D"
# If there are multiple values, at least one must match
for atom in re.split("\s*,\s*", value):
# Handle negative values (check the whole data for non-presence)
if atom.startswith("-"):
atom = atom[1:]
# Check each value for given dimension
for dato in data[dimension]:
if match_value(atom, dato):
break
# Pattern not found ---> good
else:
return True
# Handle positive values (return True upon first successful match)
else:
# Check each value for given dimension
for dato in data[dimension]:
if match_value(atom, dato):
# Pattern found ---> good
return True
# No value matched the data
return False
def check_dimension(dimension, values):
""" Check whether all values for given dimension match data """
# E.g. dimension = 'tag', values = ['A, B', 'C', '-D']
# Raise exception upon unknown dimension
if dimension not in data:
raise FilterError("Invalid filter '{0}'".format(dimension))
# Every value must match at least one value for data
return all([check_value(dimension, value) for value in values])
def check_clause(clause):
""" Split into literals and check whether all match """
# E.g. clause = 'tag: A, B & tag: C & tag: -D'
# Split into individual literals by dimension
literals = dict()
for literal in re.split("\s*&\s*", clause):
# E.g. literal = 'tag: A, B'
# Make sure the literal matches dimension:value format
matched = re.match("^(.*)\s*:\s*(.*)$", literal)
if not matched:
raise FilterError("Invalid filter '{0}'".format(literal))
dimension, value = matched.groups()
values = [value]
# Append the literal value(s) to corresponding dimension list
literals.setdefault(dimension, []).extend(values)
# For each dimension all literals must match given data
return all([check_dimension(dimension, values)
for dimension, values in literals.items()])
# Default to True if no filter given, bail out if weird data given
if filter is None or filter == "": return True
if not isinstance(data, dict):
raise FilterError("Invalid data type '{0}'".format(type(data)))
# Make sure that data dictionary contains lists of strings
data = copy.deepcopy(data)
try: # pragma: no cover
for key in data:
if isinstance(data[key], list):
data[key] = [unicode(item) for item in data[key]]
else:
data[key] = [unicode(data[key])]
except NameError: # pragma: no cover
for key in data:
if isinstance(data[key], list):
data[key] = [str(item) for item in data[key]]
else:
data[key] = [str(data[key])]
# Turn all data into lowercase if sensitivity is off
if not sensitive:
filter = filter.lower()
lowered = dict()
for key, values in data.items():
lowered[key.lower()] = [value.lower() for value in values]
data = lowered
# At least one clause must be true
return any([check_clause(clause)
for clause in re.split("\s*\|\s*", filter)])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Logging
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Logging(object):
""" Logging Configuration """
# Color mapping
COLORS = {
LOG_ERROR: "red",
LOG_WARN: "yellow",
LOG_INFO: "blue",
LOG_DEBUG: "green",
LOG_CACHE: "cyan",
LOG_DATA: "magenta",
}
# Environment variable mapping
MAPPING = {
0: LOG_WARN,
1: LOG_INFO,
2: LOG_DEBUG,
3: LOG_CACHE,
4: LOG_DATA,
5: LOG_ALL,
}
# All levels
LEVELS = "CRITICAL DEBUG ERROR FATAL INFO NOTSET WARN WARNING".split()
# Default log level is WARN
_level = LOG_WARN
# Already initialized loggers by their name
_loggers = dict()
def __init__(self, name='fmf'):
# Use existing logger if already initialized
try:
self.logger = Logging._loggers[name]
# Otherwise create a new one, save it and set it
except KeyError:
self.logger = self._create_logger(name=name)
Logging._loggers[name] = self.logger
self.set()
class ColoredFormatter(logging.Formatter):
""" Custom color formatter for logging """
def format(self, record):
# Handle custom log level names
if record.levelno == LOG_ALL:
levelname = "ALL"
elif record.levelno == LOG_DATA:
levelname = "DATA"
elif record.levelno == LOG_CACHE:
levelname = "CACHE"
else:
levelname = record.levelname
# Map log level to appropriate color
try:
colour = Logging.COLORS[record.levelno]
except KeyError:
colour = "black"
# Color the log level, use brackets when coloring off
if Coloring().enabled():
level = color(" " + levelname + " ", "lightwhite", colour)
else:
level = "[{0}]".format(levelname)
return u"{0} {1}".format(level, record.getMessage())
@staticmethod
def _create_logger(name='fmf', level=None):
""" Create fmf logger """
# Create logger, handler and formatter
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(Logging.ColoredFormatter())
logger.addHandler(handler)
# Save log levels in the logger itself (backward compatibility)
for level in Logging.LEVELS:
setattr(logger, level, getattr(logging, level))
# Additional logging constants and methods for cache and xmlrpc
logger.DATA = LOG_DATA
logger.CACHE = LOG_CACHE
logger.ALL = LOG_ALL
logger.cache = lambda message: logger.log(LOG_CACHE, message) # NOQA
logger.data = lambda message: logger.log(LOG_DATA, message) # NOQA
logger.all = lambda message: logger.log(LOG_ALL, message) # NOQA
return logger
def set(self, level=None):
"""
Set the default log level
If the level is not specified environment variable DEBUG is used
with the following meaning::
DEBUG=0 ... LOG_WARN (default)
DEBUG=1 ... LOG_INFO
DEBUG=2 ... LOG_DEBUG
DEBUG=3 ... LOG_CACHE
DEBUG=4 ... LOG_DATA
DEBUG=5 ... LOG_ALL (log all messages)
"""
# If level specified, use given
if level is not None:
Logging._level = level
# Otherwise attempt to detect from the environment
else:
try:
Logging._level = Logging.MAPPING[int(os.environ["DEBUG"])]
except Exception:
Logging._level = logging.WARN
self.logger.setLevel(Logging._level)
def get(self):
""" Get the current log level """
return self.logger.level
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Coloring
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Coloring(object):
""" Coloring configuration """
# Default color mode is auto-detected from the terminal presence
_mode = None
MODES = ["COLOR_OFF", "COLOR_ON", "COLOR_AUTO"]
# We need only a single config instance
_instance = None
def __new__(cls, *args, **kwargs):
""" Make sure we create a single instance only """
if not cls._instance:
cls._instance = super(Coloring, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, mode=None):
""" Initialize the coloring mode """
# Nothing to do if already initialized
if self._mode is not None:
return
# Set the mode
self.set(mode)
def set(self, mode=None):
"""
Set the coloring mode
If enabled, some objects (like case run Status) are printed in color
to easily spot failures, errors and so on. By default the feature is
enabled when script is attached to a terminal. Possible values are::
COLOR=0 ... COLOR_OFF .... coloring disabled
COLOR=1 ... COLOR_ON ..... coloring enabled
COLOR=2 ... COLOR_AUTO ... if terminal attached (default)
Environment variable COLOR can be used to set up the coloring to the
desired mode without modifying code.
"""
# Detect from the environment if no mode given (only once)
if mode is None:
# Nothing to do if already detected
if self._mode is not None:
return
# Detect from the environment variable COLOR
try:
mode = int(os.environ["COLOR"])
except Exception:
mode = COLOR_AUTO
elif mode < 0 or mode > 2:
raise RuntimeError("Invalid color mode '{0}'".format(mode))
self._mode = mode
log.debug(
"Coloring {0} ({1})".format(
"enabled" if self.enabled() else "disabled",
self.MODES[self._mode]))
def get(self):
""" Get the current color mode """
return self._mode
def enabled(self):
""" True if coloring is currently enabled """
# In auto-detection mode color enabled when terminal attached
if self._mode == COLOR_AUTO:
return sys.stdout.isatty()
return self._mode == COLOR_ON
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Default Logger
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create the default output logger
log = Logging('fmf').logger
|
psss/fmf | fmf/utils.py | Logging._create_logger | python | def _create_logger(name='fmf', level=None):
# Create logger, handler and formatter
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(Logging.ColoredFormatter())
logger.addHandler(handler)
# Save log levels in the logger itself (backward compatibility)
for level in Logging.LEVELS:
setattr(logger, level, getattr(logging, level))
# Additional logging constants and methods for cache and xmlrpc
logger.DATA = LOG_DATA
logger.CACHE = LOG_CACHE
logger.ALL = LOG_ALL
logger.cache = lambda message: logger.log(LOG_CACHE, message) # NOQA
logger.data = lambda message: logger.log(LOG_DATA, message) # NOQA
logger.all = lambda message: logger.log(LOG_ALL, message) # NOQA
return logger | Create fmf logger | train | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/utils.py#L338-L355 | null | class Logging(object):
""" Logging Configuration """
# Color mapping
COLORS = {
LOG_ERROR: "red",
LOG_WARN: "yellow",
LOG_INFO: "blue",
LOG_DEBUG: "green",
LOG_CACHE: "cyan",
LOG_DATA: "magenta",
}
# Environment variable mapping
MAPPING = {
0: LOG_WARN,
1: LOG_INFO,
2: LOG_DEBUG,
3: LOG_CACHE,
4: LOG_DATA,
5: LOG_ALL,
}
# All levels
LEVELS = "CRITICAL DEBUG ERROR FATAL INFO NOTSET WARN WARNING".split()
# Default log level is WARN
_level = LOG_WARN
# Already initialized loggers by their name
_loggers = dict()
def __init__(self, name='fmf'):
# Use existing logger if already initialized
try:
self.logger = Logging._loggers[name]
# Otherwise create a new one, save it and set it
except KeyError:
self.logger = self._create_logger(name=name)
Logging._loggers[name] = self.logger
self.set()
class ColoredFormatter(logging.Formatter):
""" Custom color formatter for logging """
def format(self, record):
# Handle custom log level names
if record.levelno == LOG_ALL:
levelname = "ALL"
elif record.levelno == LOG_DATA:
levelname = "DATA"
elif record.levelno == LOG_CACHE:
levelname = "CACHE"
else:
levelname = record.levelname
# Map log level to appropriate color
try:
colour = Logging.COLORS[record.levelno]
except KeyError:
colour = "black"
# Color the log level, use brackets when coloring off
if Coloring().enabled():
level = color(" " + levelname + " ", "lightwhite", colour)
else:
level = "[{0}]".format(levelname)
return u"{0} {1}".format(level, record.getMessage())
@staticmethod
def set(self, level=None):
"""
Set the default log level
If the level is not specified environment variable DEBUG is used
with the following meaning::
DEBUG=0 ... LOG_WARN (default)
DEBUG=1 ... LOG_INFO
DEBUG=2 ... LOG_DEBUG
DEBUG=3 ... LOG_CACHE
DEBUG=4 ... LOG_DATA
DEBUG=5 ... LOG_ALL (log all messages)
"""
# If level specified, use given
if level is not None:
Logging._level = level
# Otherwise attempt to detect from the environment
else:
try:
Logging._level = Logging.MAPPING[int(os.environ["DEBUG"])]
except Exception:
Logging._level = logging.WARN
self.logger.setLevel(Logging._level)
def get(self):
""" Get the current log level """
return self.logger.level
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/xml2xhtml.py | makeXsl | python | def makeXsl(filename):
pkg = 'cnxml2html'
package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]
if package != '':
pkg = package + '.' + pkg
path = pkg_resources.resource_filename(pkg, filename)
xml = etree.parse(path)
return etree.XSLT(xml) | Helper that creates a XSLT stylesheet | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/xml2xhtml.py#L27-L35 | null | """
Copyright (C) 2013 Rice University
This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
"""
import os
import sys
import tempfile
from copy import deepcopy
import shutil
import zipfile
import urllib
import pkg_resources
from cStringIO import StringIO
from lxml import etree, html
from rhaptos.cnxmlutils.utils import (
NAMESPACES,
XHTML_INCLUDE_XPATH as INCLUDE_XPATH,
XHTML_MODULE_BODY_XPATH as MODULE_BODY_XPATH,
)
dirname = os.path.dirname(__file__)
def transform_collxml(collxml_file):
""" Given a collxml file (collection.xml) this returns an HTML version of it
(including "include" anchor links to the modules) """
xml = etree.parse(collxml_file)
xslt = makeXsl('collxml2xhtml.xsl')
xml = xslt(xml)
return xml
def transform_cnxml(cnxml_file):
""" Given a module cnxml file (index.cnxml) this returns an HTML version of it """
xml = etree.parse(cnxml_file)
xslt = makeXsl('cnxml2xhtml.xsl')
xml = xslt(xml)
return xml
def transform_collection(collection_dir):
""" Given an unzipped collection generate a giant HTML file representing
the entire collection (including loading and converting individual modules) """
collxml_file = open(os.path.join(collection_dir, 'collection.xml'))
collxml_html = transform_collxml(collxml_file)
# For each included module, parse and convert it
for node in INCLUDE_XPATH(collxml_html):
href = node.attrib['href']
module = href.split('@')[0]
# version = None # We don't care about version
module_dir = os.path.join(collection_dir, module)
# By default, use the index_auto_generated.cnxml file for the module
module_path = os.path.join(module_dir, 'index_auto_generated.cnxml')
if not os.path.exists(module_path):
module_path = os.path.join(module_dir, 'index.cnxml')
module_html = transform_cnxml(module_path)
# Replace the include link with the body of the module
module_body = MODULE_BODY_XPATH(module_html)
node.getparent().replace(node, module_body[0])
return collxml_html
def main():
try:
import argparse
except ImportError:
print "argparse is needed for commandline"
return 2
parser = argparse.ArgumentParser(description='Convert a Connexions XML markup to HTML (cnxml, collxml, and mdml)')
parser.add_argument('-d', dest='collection_dir', help='Convert an unzipped collection to a single HTML file. Provide /path/to/collection')
parser.add_argument('-c', dest='collection', help='The file being converted is a collxml document (collection definition)', type=argparse.FileType('r'))
parser.add_argument('-m', dest='module', help='The file being converted is a cnxml document (module)', type=argparse.FileType('r'))
parser.add_argument('html_file', help='/path/to/outputfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
if args.collection_dir:
html = transform_collection(args.collection_dir)
elif args.collection:
html = transform_collxml(args.collection)
elif args.module:
html = transform_cnxml(args.module)
else:
print >> sys.stderr, "Must specify either -d -c or -m"
return 1
args.html_file.write(etree.tostring(html))
if __name__ == '__main__':
sys.exit(main())
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/xml2xhtml.py | transform_collxml | python | def transform_collxml(collxml_file):
xml = etree.parse(collxml_file)
xslt = makeXsl('collxml2xhtml.xsl')
xml = xslt(xml)
return xml | Given a collxml file (collection.xml) this returns an HTML version of it
(including "include" anchor links to the modules) | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/xml2xhtml.py#L37-L44 | [
"def makeXsl(filename):\n \"\"\" Helper that creates a XSLT stylesheet \"\"\"\n pkg = 'cnxml2html'\n package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]\n if package != '':\n pkg = package + '.' + pkg\n path = pkg_resources.resource_filename(pkg, filename)\n xml = etree.parse(path)\n return etree.XSLT(xml)\n"
] | """
Copyright (C) 2013 Rice University
This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
"""
import os
import sys
import tempfile
from copy import deepcopy
import shutil
import zipfile
import urllib
import pkg_resources
from cStringIO import StringIO
from lxml import etree, html
from rhaptos.cnxmlutils.utils import (
NAMESPACES,
XHTML_INCLUDE_XPATH as INCLUDE_XPATH,
XHTML_MODULE_BODY_XPATH as MODULE_BODY_XPATH,
)
dirname = os.path.dirname(__file__)
def makeXsl(filename):
""" Helper that creates a XSLT stylesheet """
pkg = 'cnxml2html'
package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]
if package != '':
pkg = package + '.' + pkg
path = pkg_resources.resource_filename(pkg, filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def transform_cnxml(cnxml_file):
""" Given a module cnxml file (index.cnxml) this returns an HTML version of it """
xml = etree.parse(cnxml_file)
xslt = makeXsl('cnxml2xhtml.xsl')
xml = xslt(xml)
return xml
def transform_collection(collection_dir):
""" Given an unzipped collection generate a giant HTML file representing
the entire collection (including loading and converting individual modules) """
collxml_file = open(os.path.join(collection_dir, 'collection.xml'))
collxml_html = transform_collxml(collxml_file)
# For each included module, parse and convert it
for node in INCLUDE_XPATH(collxml_html):
href = node.attrib['href']
module = href.split('@')[0]
# version = None # We don't care about version
module_dir = os.path.join(collection_dir, module)
# By default, use the index_auto_generated.cnxml file for the module
module_path = os.path.join(module_dir, 'index_auto_generated.cnxml')
if not os.path.exists(module_path):
module_path = os.path.join(module_dir, 'index.cnxml')
module_html = transform_cnxml(module_path)
# Replace the include link with the body of the module
module_body = MODULE_BODY_XPATH(module_html)
node.getparent().replace(node, module_body[0])
return collxml_html
def main():
try:
import argparse
except ImportError:
print "argparse is needed for commandline"
return 2
parser = argparse.ArgumentParser(description='Convert a Connexions XML markup to HTML (cnxml, collxml, and mdml)')
parser.add_argument('-d', dest='collection_dir', help='Convert an unzipped collection to a single HTML file. Provide /path/to/collection')
parser.add_argument('-c', dest='collection', help='The file being converted is a collxml document (collection definition)', type=argparse.FileType('r'))
parser.add_argument('-m', dest='module', help='The file being converted is a cnxml document (module)', type=argparse.FileType('r'))
parser.add_argument('html_file', help='/path/to/outputfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
if args.collection_dir:
html = transform_collection(args.collection_dir)
elif args.collection:
html = transform_collxml(args.collection)
elif args.module:
html = transform_cnxml(args.module)
else:
print >> sys.stderr, "Must specify either -d -c or -m"
return 1
args.html_file.write(etree.tostring(html))
if __name__ == '__main__':
sys.exit(main())
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/xml2xhtml.py | transform_cnxml | python | def transform_cnxml(cnxml_file):
xml = etree.parse(cnxml_file)
xslt = makeXsl('cnxml2xhtml.xsl')
xml = xslt(xml)
return xml | Given a module cnxml file (index.cnxml) this returns an HTML version of it | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/xml2xhtml.py#L46-L52 | [
"def makeXsl(filename):\n \"\"\" Helper that creates a XSLT stylesheet \"\"\"\n pkg = 'cnxml2html'\n package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]\n if package != '':\n pkg = package + '.' + pkg\n path = pkg_resources.resource_filename(pkg, filename)\n xml = etree.parse(path)\n return etree.XSLT(xml)\n"
] | """
Copyright (C) 2013 Rice University
This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
"""
import os
import sys
import tempfile
from copy import deepcopy
import shutil
import zipfile
import urllib
import pkg_resources
from cStringIO import StringIO
from lxml import etree, html
from rhaptos.cnxmlutils.utils import (
NAMESPACES,
XHTML_INCLUDE_XPATH as INCLUDE_XPATH,
XHTML_MODULE_BODY_XPATH as MODULE_BODY_XPATH,
)
dirname = os.path.dirname(__file__)
def makeXsl(filename):
""" Helper that creates a XSLT stylesheet """
pkg = 'cnxml2html'
package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]
if package != '':
pkg = package + '.' + pkg
path = pkg_resources.resource_filename(pkg, filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def transform_collxml(collxml_file):
""" Given a collxml file (collection.xml) this returns an HTML version of it
(including "include" anchor links to the modules) """
xml = etree.parse(collxml_file)
xslt = makeXsl('collxml2xhtml.xsl')
xml = xslt(xml)
return xml
def transform_collection(collection_dir):
""" Given an unzipped collection generate a giant HTML file representing
the entire collection (including loading and converting individual modules) """
collxml_file = open(os.path.join(collection_dir, 'collection.xml'))
collxml_html = transform_collxml(collxml_file)
# For each included module, parse and convert it
for node in INCLUDE_XPATH(collxml_html):
href = node.attrib['href']
module = href.split('@')[0]
# version = None # We don't care about version
module_dir = os.path.join(collection_dir, module)
# By default, use the index_auto_generated.cnxml file for the module
module_path = os.path.join(module_dir, 'index_auto_generated.cnxml')
if not os.path.exists(module_path):
module_path = os.path.join(module_dir, 'index.cnxml')
module_html = transform_cnxml(module_path)
# Replace the include link with the body of the module
module_body = MODULE_BODY_XPATH(module_html)
node.getparent().replace(node, module_body[0])
return collxml_html
def main():
try:
import argparse
except ImportError:
print "argparse is needed for commandline"
return 2
parser = argparse.ArgumentParser(description='Convert a Connexions XML markup to HTML (cnxml, collxml, and mdml)')
parser.add_argument('-d', dest='collection_dir', help='Convert an unzipped collection to a single HTML file. Provide /path/to/collection')
parser.add_argument('-c', dest='collection', help='The file being converted is a collxml document (collection definition)', type=argparse.FileType('r'))
parser.add_argument('-m', dest='module', help='The file being converted is a cnxml document (module)', type=argparse.FileType('r'))
parser.add_argument('html_file', help='/path/to/outputfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
if args.collection_dir:
html = transform_collection(args.collection_dir)
elif args.collection:
html = transform_collxml(args.collection)
elif args.module:
html = transform_cnxml(args.module)
else:
print >> sys.stderr, "Must specify either -d -c or -m"
return 1
args.html_file.write(etree.tostring(html))
if __name__ == '__main__':
sys.exit(main())
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/xml2xhtml.py | transform_collection | python | def transform_collection(collection_dir):
collxml_file = open(os.path.join(collection_dir, 'collection.xml'))
collxml_html = transform_collxml(collxml_file)
# For each included module, parse and convert it
for node in INCLUDE_XPATH(collxml_html):
href = node.attrib['href']
module = href.split('@')[0]
# version = None # We don't care about version
module_dir = os.path.join(collection_dir, module)
# By default, use the index_auto_generated.cnxml file for the module
module_path = os.path.join(module_dir, 'index_auto_generated.cnxml')
if not os.path.exists(module_path):
module_path = os.path.join(module_dir, 'index.cnxml')
module_html = transform_cnxml(module_path)
# Replace the include link with the body of the module
module_body = MODULE_BODY_XPATH(module_html)
node.getparent().replace(node, module_body[0])
return collxml_html | Given an unzipped collection generate a giant HTML file representing
the entire collection (including loading and converting individual modules) | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/xml2xhtml.py#L54-L79 | [
"def transform_collxml(collxml_file):\n \"\"\" Given a collxml file (collection.xml) this returns an HTML version of it\n (including \"include\" anchor links to the modules) \"\"\"\n\n xml = etree.parse(collxml_file)\n xslt = makeXsl('collxml2xhtml.xsl')\n xml = xslt(xml)\n return xml\n",
"def transform_cnxml(cnxml_file):\n \"\"\" Given a module cnxml file (index.cnxml) this returns an HTML version of it \"\"\"\n\n xml = etree.parse(cnxml_file)\n xslt = makeXsl('cnxml2xhtml.xsl')\n xml = xslt(xml)\n return xml\n"
] | """
Copyright (C) 2013 Rice University
This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
"""
import os
import sys
import tempfile
from copy import deepcopy
import shutil
import zipfile
import urllib
import pkg_resources
from cStringIO import StringIO
from lxml import etree, html
from rhaptos.cnxmlutils.utils import (
NAMESPACES,
XHTML_INCLUDE_XPATH as INCLUDE_XPATH,
XHTML_MODULE_BODY_XPATH as MODULE_BODY_XPATH,
)
dirname = os.path.dirname(__file__)
def makeXsl(filename):
""" Helper that creates a XSLT stylesheet """
pkg = 'cnxml2html'
package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]
if package != '':
pkg = package + '.' + pkg
path = pkg_resources.resource_filename(pkg, filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def transform_collxml(collxml_file):
""" Given a collxml file (collection.xml) this returns an HTML version of it
(including "include" anchor links to the modules) """
xml = etree.parse(collxml_file)
xslt = makeXsl('collxml2xhtml.xsl')
xml = xslt(xml)
return xml
def transform_cnxml(cnxml_file):
""" Given a module cnxml file (index.cnxml) this returns an HTML version of it """
xml = etree.parse(cnxml_file)
xslt = makeXsl('cnxml2xhtml.xsl')
xml = xslt(xml)
return xml
def main():
try:
import argparse
except ImportError:
print "argparse is needed for commandline"
return 2
parser = argparse.ArgumentParser(description='Convert a Connexions XML markup to HTML (cnxml, collxml, and mdml)')
parser.add_argument('-d', dest='collection_dir', help='Convert an unzipped collection to a single HTML file. Provide /path/to/collection')
parser.add_argument('-c', dest='collection', help='The file being converted is a collxml document (collection definition)', type=argparse.FileType('r'))
parser.add_argument('-m', dest='module', help='The file being converted is a cnxml document (module)', type=argparse.FileType('r'))
parser.add_argument('html_file', help='/path/to/outputfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
if args.collection_dir:
html = transform_collection(args.collection_dir)
elif args.collection:
html = transform_collxml(args.collection)
elif args.module:
html = transform_cnxml(args.module)
else:
print >> sys.stderr, "Must specify either -d -c or -m"
return 1
args.html_file.write(etree.tostring(html))
if __name__ == '__main__':
sys.exit(main())
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/addsectiontags.py | docHandler.storeSectionState | python | def storeSectionState(self, level):
# self.document.append("<!-- storeSectionState(): " + str(len(self.header_stack)) + " open section tags. " + str(self.header_stack) + "-->\n")
try:
# special case. we are not processing an OOo XML start tag which we
# are going to insert <section> before. we have reached a point
# where all sections need to be closed. EG </office:body> or </text:section>,
# both of which are hierarchical => scope closure for all open <section> tags
bClosedAllSections = ( level == u'0' )
if bClosedAllSections:
# have reached a point where all sections need to be closed
iSectionsClosed = len(self.header_stack)
while len(self.header_stack) > 0:
del(self.header_stack[-1])
return iSectionsClosed
if len(self.header_stack) == 0:
# no open section tags
iSectionsClosed = 0
self.header_stack.append(level)
else:
iLastLevel = self.header_stack[-1]
if level > iLastLevel:
# open sections tags AND no sections need closing
iSectionsClosed = 0
self.header_stack.append(level)
elif level == iLastLevel:
# open sections tags AND need to closed one of the sections
iSectionsClosed = 1
# imagine deleting the last level and then re-adding it
elif level < iLastLevel:
# open sections tags AND need to closed some of the sections
del(self.header_stack[-1])
iSectionsClosed = 1
iSectionsClosed += self.storeSectionState(level)
return iSectionsClosed
except IndexError:
print level
raise | Takes a header tagname (e.g. 'h1') and adjusts the
stack that remembers the headers seen. | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/addsectiontags.py#L129-L174 | [
"def storeSectionState(self, level):\n \"\"\"\n Takes a header tagname (e.g. 'h1') and adjusts the \n stack that remembers the headers seen.\n \"\"\"\n\n # self.document.append(\"<!-- storeSectionState(): \" + str(len(self.header_stack)) + \" open section tags. \" + str(self.header_stack) + \"-->\\n\")\n\n try:\n # special case. we are not processing an OOo XML start tag which we\n # are going to insert <section> before. we have reached a point\n # where all sections need to be closed. EG </office:body> or </text:section>,\n # both of which are hierarchical => scope closure for all open <section> tags\n bClosedAllSections = ( level == u'0' )\n if bClosedAllSections:\n # have reached a point where all sections need to be closed\n iSectionsClosed = len(self.header_stack)\n while len(self.header_stack) > 0:\n del(self.header_stack[-1])\n return iSectionsClosed\n\n if len(self.header_stack) == 0:\n # no open section tags\n iSectionsClosed = 0\n self.header_stack.append(level)\n else:\n iLastLevel = self.header_stack[-1]\n if level > iLastLevel:\n # open sections tags AND no sections need closing\n iSectionsClosed = 0\n self.header_stack.append(level)\n elif level == iLastLevel:\n # open sections tags AND need to closed one of the sections\n iSectionsClosed = 1\n # imagine deleting the last level and then re-adding it\n elif level < iLastLevel:\n # open sections tags AND need to closed some of the sections\n del(self.header_stack[-1])\n iSectionsClosed = 1\n iSectionsClosed += self.storeSectionState(level)\n\n return iSectionsClosed\n\n except IndexError:\n print level\n raise\n"
] | class docHandler(ContentHandler):
def __init__(self):
# on init, create links dictionary
self.document = []
self.header_stack = []
self.tableLevel = 0
self.listLevel = 0
self.deletion = 0
self.handlers = {
'table:table':self.handleTable,
'text:ordered-list':self.handleList,
'text:deletion':self.handleDeletion,
'text:h':self.handleHeader,
'text:section':self.handleSection,
'office:body':self.handleBody
}
def handleTable(self, name, end_tag, attrs={}):
if end_tag:
self.tableLevel -= 1
self.outputEndElement(name)
else:
self.tableLevel += 1
self.outputStartElement(name, attrs)
def handleList(self, name, end_tag, attrs={}):
if end_tag:
self.listLevel -= 1
self.outputEndElement(name)
else:
self.listLevel += 1
self.outputStartElement(name, attrs)
def handleDeletion(self, name, end_tag, attrs={}):
if end_tag:
self.deletion -= 1
self.outputEndElement(name)
else:
self.deletion += 1
self.outputStartElement(name, attrs)
def handleSection(self, name, end_tag, attrs={}):
# text:section is hierarchical while text:h is not
if not end_tag:
self.document.append("<!-- close all open sections -->\n")
self.endSections()
self.outputStartElement(name, attrs)
else:
self.document.append("<!-- close all open sections -->\n")
self.endSections()
self.outputEndElement(name)
def handleHeader(self, name, end_tag, attrs={}):
if self.tableLevel or self.listLevel or self.deletion:
return
level = attrs.get(u'text:level')
if not end_tag:
self.endSections(level)
id = attrs.get('id',self.generateId())
self.document.append("<section id='%s'>\n" %id)
self.document.append("<title>")
else:
self.document.append("</title>")
def handleBody(self, name, end_tag, attrs={}):
#head-> name
if not end_tag:
self.document.append('<office:body')
if attrs:
for attr, value in attrs.items():
self.document.append(' %s=%s' % (attr,quoteattr(value)))
self.document.append('>')
else:
self.endSections()
self.document.append('</office:body>')
def startElement(self, name, attrs):
handler = self.handlers.get(name, None)
if handler:
handler(name, end_tag=False, attrs=attrs)
else:
self.outputStartElement(name, attrs)
def outputStartElement(self, name, attrs):
self.document.append('<%s' % name)
if attrs:
for attr, value in attrs.items():
self.document.append(' %s=%s' % (attr,quoteattr(value)))
self.document.append('>')
def characters(self, ch):
self.document.append(escape(ch))
def endElement(self, name):
handler = self.handlers.get(name, None)
if handler:
handler(name, end_tag=True)
else:
self.outputEndElement(name)
def outputEndElement(self, name):
self.document += '</%s>' % name
def endSections(self, level=u'0'):
"""Closes all sections of level >= sectnum. Defaults to closing all open sections"""
iSectionsClosed = self.storeSectionState(level)
self.document.append("</section>\n" * iSectionsClosed)
def generateId(self):
return 'id-' + str(random.random())[2:]
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/addsectiontags.py | docHandler.endSections | python | def endSections(self, level=u'0'):
iSectionsClosed = self.storeSectionState(level)
self.document.append("</section>\n" * iSectionsClosed) | Closes all sections of level >= sectnum. Defaults to closing all open sections | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/addsectiontags.py#L176-L180 | [
"def storeSectionState(self, level):\n \"\"\"\n Takes a header tagname (e.g. 'h1') and adjusts the \n stack that remembers the headers seen.\n \"\"\"\n\n # self.document.append(\"<!-- storeSectionState(): \" + str(len(self.header_stack)) + \" open section tags. \" + str(self.header_stack) + \"-->\\n\")\n\n try:\n # special case. we are not processing an OOo XML start tag which we\n # are going to insert <section> before. we have reached a point\n # where all sections need to be closed. EG </office:body> or </text:section>,\n # both of which are hierarchical => scope closure for all open <section> tags\n bClosedAllSections = ( level == u'0' )\n if bClosedAllSections:\n # have reached a point where all sections need to be closed\n iSectionsClosed = len(self.header_stack)\n while len(self.header_stack) > 0:\n del(self.header_stack[-1])\n return iSectionsClosed\n\n if len(self.header_stack) == 0:\n # no open section tags\n iSectionsClosed = 0\n self.header_stack.append(level)\n else:\n iLastLevel = self.header_stack[-1]\n if level > iLastLevel:\n # open sections tags AND no sections need closing\n iSectionsClosed = 0\n self.header_stack.append(level)\n elif level == iLastLevel:\n # open sections tags AND need to closed one of the sections\n iSectionsClosed = 1\n # imagine deleting the last level and then re-adding it\n elif level < iLastLevel:\n # open sections tags AND need to closed some of the sections\n del(self.header_stack[-1])\n iSectionsClosed = 1\n iSectionsClosed += self.storeSectionState(level)\n\n return iSectionsClosed\n\n except IndexError:\n print level\n raise\n"
] | class docHandler(ContentHandler):
def __init__(self):
# on init, create links dictionary
self.document = []
self.header_stack = []
self.tableLevel = 0
self.listLevel = 0
self.deletion = 0
self.handlers = {
'table:table':self.handleTable,
'text:ordered-list':self.handleList,
'text:deletion':self.handleDeletion,
'text:h':self.handleHeader,
'text:section':self.handleSection,
'office:body':self.handleBody
}
def handleTable(self, name, end_tag, attrs={}):
if end_tag:
self.tableLevel -= 1
self.outputEndElement(name)
else:
self.tableLevel += 1
self.outputStartElement(name, attrs)
def handleList(self, name, end_tag, attrs={}):
if end_tag:
self.listLevel -= 1
self.outputEndElement(name)
else:
self.listLevel += 1
self.outputStartElement(name, attrs)
def handleDeletion(self, name, end_tag, attrs={}):
if end_tag:
self.deletion -= 1
self.outputEndElement(name)
else:
self.deletion += 1
self.outputStartElement(name, attrs)
def handleSection(self, name, end_tag, attrs={}):
# text:section is hierarchical while text:h is not
if not end_tag:
self.document.append("<!-- close all open sections -->\n")
self.endSections()
self.outputStartElement(name, attrs)
else:
self.document.append("<!-- close all open sections -->\n")
self.endSections()
self.outputEndElement(name)
def handleHeader(self, name, end_tag, attrs={}):
if self.tableLevel or self.listLevel or self.deletion:
return
level = attrs.get(u'text:level')
if not end_tag:
self.endSections(level)
id = attrs.get('id',self.generateId())
self.document.append("<section id='%s'>\n" %id)
self.document.append("<title>")
else:
self.document.append("</title>")
def handleBody(self, name, end_tag, attrs={}):
#head-> name
if not end_tag:
self.document.append('<office:body')
if attrs:
for attr, value in attrs.items():
self.document.append(' %s=%s' % (attr,quoteattr(value)))
self.document.append('>')
else:
self.endSections()
self.document.append('</office:body>')
def startElement(self, name, attrs):
handler = self.handlers.get(name, None)
if handler:
handler(name, end_tag=False, attrs=attrs)
else:
self.outputStartElement(name, attrs)
def outputStartElement(self, name, attrs):
self.document.append('<%s' % name)
if attrs:
for attr, value in attrs.items():
self.document.append(' %s=%s' % (attr,quoteattr(value)))
self.document.append('>')
def characters(self, ch):
self.document.append(escape(ch))
def endElement(self, name):
handler = self.handlers.get(name, None)
if handler:
handler(name, end_tag=True)
else:
self.outputEndElement(name)
def outputEndElement(self, name):
self.document += '</%s>' % name
def storeSectionState(self, level):
"""
Takes a header tagname (e.g. 'h1') and adjusts the
stack that remembers the headers seen.
"""
# self.document.append("<!-- storeSectionState(): " + str(len(self.header_stack)) + " open section tags. " + str(self.header_stack) + "-->\n")
try:
# special case. we are not processing an OOo XML start tag which we
# are going to insert <section> before. we have reached a point
# where all sections need to be closed. EG </office:body> or </text:section>,
# both of which are hierarchical => scope closure for all open <section> tags
bClosedAllSections = ( level == u'0' )
if bClosedAllSections:
# have reached a point where all sections need to be closed
iSectionsClosed = len(self.header_stack)
while len(self.header_stack) > 0:
del(self.header_stack[-1])
return iSectionsClosed
if len(self.header_stack) == 0:
# no open section tags
iSectionsClosed = 0
self.header_stack.append(level)
else:
iLastLevel = self.header_stack[-1]
if level > iLastLevel:
# open sections tags AND no sections need closing
iSectionsClosed = 0
self.header_stack.append(level)
elif level == iLastLevel:
# open sections tags AND need to closed one of the sections
iSectionsClosed = 1
# imagine deleting the last level and then re-adding it
elif level < iLastLevel:
# open sections tags AND need to closed some of the sections
del(self.header_stack[-1])
iSectionsClosed = 1
iSectionsClosed += self.storeSectionState(level)
return iSectionsClosed
except IndexError:
print level
raise
def generateId(self):
return 'id-' + str(random.random())[2:]
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | _pre_tidy | python | def _pre_tidy(html):
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree) | This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L43-L54 | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | _post_tidy | python | def _post_tidy(html):
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree | This method transforms post tidy. Will go away when tidy goes away. | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L57-L72 | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | _tidy2xhtml5 | python | def _tidy2xhtml5(html):
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5) | Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L76-L160 | [
"def _pre_tidy(html):\n \"\"\" This method transforms a few things before tidy runs. When we get rid\n of tidy, this can go away. \"\"\"\n tree = etree.fromstring(html, etree.HTMLParser())\n for el in tree.xpath('//u'):\n el.tag = 'em'\n c = el.attrib.get('class', '').split()\n if 'underline' not in c:\n c.append('underline')\n el.attrib['class'] = ' '.join(c)\n\n return tohtml(tree)\n",
"def _post_tidy(html):\n \"\"\" This method transforms post tidy. Will go away when tidy goes away. \"\"\"\n tree = etree.fromstring(html)\n ems = tree.xpath(\n \"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]\",\n namespaces={'xh': 'http://www.w3.org/1999/xhtml'})\n for el in ems:\n c = el.attrib.get('class', '').split()\n c.remove('underline')\n el.tag = '{http://www.w3.org/1999/xhtml}u'\n if c:\n el.attrib['class'] = ' '.join(c)\n elif 'class' in el.attrib:\n del(el.attrib['class'])\n\n return tree\n",
"def _io2string(s):\n \"\"\"If necessary it will convert the io object to an string\n \"\"\"\n if hasattr(s, 'read'):\n s = s.read()\n return s\n"
] | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | _make_xsl | python | def _make_xsl(filename):
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml) | Helper that creates a XSLT stylesheet | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L180-L184 | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | _transform | python | def _transform(xsl_filename, xml, **kwargs):
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml | Transforms the xml using the specifiec xsl file. | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L187-L191 | [
"def _make_xsl(filename):\n \"\"\"Helper that creates a XSLT stylesheet \"\"\"\n path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)\n xml = etree.parse(path)\n return etree.XSLT(xml)\n"
] | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | _unescape_math | python | def _unescape_math(xml):
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml | Unescapes Math from Mathjax to MathML. | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L194-L208 | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | cnxml_to_html | python | def cnxml_to_html(cnxml_source):
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0]) | Transform the CNXML source to HTML | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L211-L219 | [
"def _string2io(s):\n \"\"\"If necessary it will convert the string to an io object\n (e.g. with read and write methods).\n \"\"\"\n if not hasattr(s, 'read'):\n s = StringIO(s)\n return s\n",
"def _transform(xsl_filename, xml, **kwargs):\n \"\"\"Transforms the xml using the specifiec xsl file.\"\"\"\n xslt = _make_xsl(xsl_filename)\n xml = xslt(xml, **kwargs)\n return xml\n"
] | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | aloha_to_etree | python | def aloha_to_etree(html_source):
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml | Converts HTML5 from Aloha editor output to a lxml etree. | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L233-L238 | [
"def _tidy2xhtml5(html):\n \"\"\"Tidy up a html4/5 soup to a parsable valid XHTML5.\n Requires tidy-html5 from https://github.com/w3c/tidy-html5\n Installation: http://goo.gl/FG27n\n \"\"\"\n html = _io2string(html)\n html = _pre_tidy(html) # Pre-process\n xhtml5, errors =\\\n tidy_document(html,\n options={\n # do not merge nested div elements\n # - preserve semantic block structrues\n 'merge-divs': 0,\n # create xml output\n 'output-xml': 1,\n # Don't use indent, adds extra linespace or linefeed\n # which are big problems\n 'indent': 0,\n # No tidy meta tag in output\n 'tidy-mark': 0,\n # No wrapping\n 'wrap': 0,\n # Help ensure validation\n 'alt-text': '',\n # No sense in transitional for tool-generated markup\n 'doctype': 'strict',\n # May not get what you expect,\n # but you will get something\n 'force-output': 1,\n # remove HTML entities like e.g. nbsp\n 'numeric-entities': 1,\n # remove\n 'clean': 1,\n 'bare': 1,\n 'word-2000': 1,\n 'drop-proprietary-attributes': 1,\n # enclose text in body always with <p>...</p>\n 'enclose-text': 1,\n # transforms <i> and <b> to <em> and <strong>\n 'logical-emphasis': 1,\n # do not tidy all MathML elements!\n # List of MathML 3.0 elements from\n # http://www.w3.org/TR/MathML3/appendixi.html#index.elem\n 'new-inline-tags': 'abs, and, annotation, '\n 'annotation-xml, apply, approx, arccos, arccosh, '\n 'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '\n 'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '\n 'card, cartesianproduct, cbytes, ceiling, cerror, '\n 'ci, cn, codomain, complexes, compose, condition, '\n 'conjugate, cos, cosh, cot, coth, cs, csc, csch, '\n 'csymbol, curl, declare, degree, determinant, diff, '\n 'divergence, divide, domain, domainofapplication, '\n 'el, emptyset, eq, equivalent, eulergamma, exists, '\n 'exp, exponentiale, factorial, factorof, false, '\n 'floor, fn, forall, gcd, geq, grad, gt, ident, '\n 'image, imaginary, imaginaryi, implies, in, '\n 'infinity, int, integers, intersect, interval, '\n 'inverse, lambda, laplacian, lcm, leq, limit, list, '\n 'ln, log, logbase, lowlimit, lt, maction, malign, '\n 'maligngroup, malignmark, malignscope, math, '\n 'matrix, matrixrow, max, mean, median, menclose, '\n 'merror, mfenced, mfrac, mfraction, mglyph, mi, '\n 'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '\n 'mn, mo, mode, moment, momentabout, mover, mpadded, '\n 'mphantom, mprescripts, mroot, mrow, ms, mscarries, '\n 'mscarry, msgroup, msline, mspace, msqrt, msrow, '\n 'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '\n 'mtext, mtr, munder, munderover, naturalnumbers, '\n 'neq, none, not, notanumber, note, notin, '\n 'notprsubset, notsubset, or, otherwise, '\n 'outerproduct, partialdiff, pi, piece, piecewise, '\n 'plus, power, primes, product, prsubset, quotient, '\n 'rationals, real, reals, reln, rem, root, '\n 'scalarproduct, sdev, sec, sech, selector, '\n 'semantics, sep, set, setdiff, share, sin, sinh, '\n 'subset, sum, tan, tanh, tendsto, times, transpose, '\n 'true, union, uplimit, variance, vector, '\n 'vectorproduct, xor',\n 'doctype': 'html5',\n })\n\n # return xhtml5\n # return the tree itself, there is another modification below to avoid\n # another parse\n return _post_tidy(xhtml5)\n"
] | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | aloha_to_html | python | def aloha_to_html(html_source):
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True) | Converts HTML5 from Aloha to a more structured HTML5 | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L241-L244 | [
"def aloha_to_etree(html_source):\n \"\"\" Converts HTML5 from Aloha editor output to a lxml etree. \"\"\"\n xml = _tidy2xhtml5(html_source)\n for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):\n xml = transform(xml)\n return xml\n"
] | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | html_to_cnxml | python | def html_to_cnxml(html_source, cnxml_source):
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml) | Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document. | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L247-L262 | [
"def _string2io(s):\n \"\"\"If necessary it will convert the string to an io object\n (e.g. with read and write methods).\n \"\"\"\n if not hasattr(s, 'read'):\n s = StringIO(s)\n return s\n",
"def _transform(xsl_filename, xml, **kwargs):\n \"\"\"Transforms the xml using the specifiec xsl file.\"\"\"\n xslt = _make_xsl(xsl_filename)\n xml = xslt(xml, **kwargs)\n return xml\n"
] | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
def html_to_valid_cnxml(html_source):
"""Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method
"""
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/utils.py | html_to_valid_cnxml | python | def html_to_valid_cnxml(html_source):
source = _string2io(html_source)
xml = etree.parse(source)
return etree_to_valid_cnxml(xml, pretty_print=True) | Transform the HTML to valid CNXML (used for OERPUB).
No original CNXML is needed. If HTML is from Aloha please use
aloha_to_html before using this method | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L281-L288 | [
"def _string2io(s):\n \"\"\"If necessary it will convert the string to an io object\n (e.g. with read and write methods).\n \"\"\"\n if not hasattr(s, 'read'):\n s = StringIO(s)\n return s\n",
"def etree_to_valid_cnxml(tree, **kwargs):\n for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):\n tree = transform(tree)\n return etree.tostring(tree, **kwargs)\n"
] | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Rice University
This software is subject to the provisions of the
GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
Various utility/helper functions...
Some of thses are used to tranform from one source format to another.
"""
import pkg_resources
from lxml import etree
from lxml.html import tostring as tohtml
from functools import partial
# requires tidy-html5 from https://github.com/w3c/tidy-html5
# Installation: http://goo.gl/FG27n
from tidylib import tidy_document
# for unescaping math from Mathjax script tag
from xml.sax.saxutils import unescape
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from . import __version__ as version
__all__ = (
'NAMESPACES', 'XHTML_INCLUDE_XPATH', 'XHTML_MODULE_BODY_XPATH',
'cnxml_to_html', 'html_to_cnxml',
)
NAMESPACES = {
'xhtml': 'http://www.w3.org/1999/xhtml',
}
XHTML_INCLUDE_XPATH = etree.XPath('//xhtml:a[@class="include"]',
namespaces=NAMESPACES)
XHTML_MODULE_BODY_XPATH = etree.XPath('//xhtml:body', namespaces=NAMESPACES)
def _pre_tidy(html):
""" This method transforms a few things before tidy runs. When we get rid
of tidy, this can go away. """
tree = etree.fromstring(html, etree.HTMLParser())
for el in tree.xpath('//u'):
el.tag = 'em'
c = el.attrib.get('class', '').split()
if 'underline' not in c:
c.append('underline')
el.attrib['class'] = ' '.join(c)
return tohtml(tree)
def _post_tidy(html):
""" This method transforms post tidy. Will go away when tidy goes away. """
tree = etree.fromstring(html)
ems = tree.xpath(
"//xh:em[@class='underline']|//xh:em[contains(@class, ' underline ')]",
namespaces={'xh': 'http://www.w3.org/1999/xhtml'})
for el in ems:
c = el.attrib.get('class', '').split()
c.remove('underline')
el.tag = '{http://www.w3.org/1999/xhtml}u'
if c:
el.attrib['class'] = ' '.join(c)
elif 'class' in el.attrib:
del(el.attrib['class'])
return tree
# Tidy up the Google Docs HTML Soup
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5)
def _io2string(s):
"""If necessary it will convert the io object to an string
"""
if hasattr(s, 'read'):
s = s.read()
return s
def _string2io(s):
"""If necessary it will convert the string to an io object
(e.g. with read and write methods).
"""
if not hasattr(s, 'read'):
s = StringIO(s)
return s
def _make_xsl(filename):
"""Helper that creates a XSLT stylesheet """
path = pkg_resources.resource_filename('rhaptos.cnxmlutils.xsl', filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def _transform(xsl_filename, xml, **kwargs):
"""Transforms the xml using the specifiec xsl file."""
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0])
ALOHA2HTML_TRANSFORM_PIPELINE = [
partial(_transform, 'aloha-to-html5-pass01-leveled-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass02-new-min-header-level.xsl'),
partial(_transform, 'aloha-to-html5-pass03-nested-headers.xsl'),
partial(_transform, 'aloha-to-html5-pass04-headers2sections.xsl'),
_unescape_math,
partial(_transform, 'aloha-to-html5-pass05-mathjax2mathml.xsl'),
partial(_transform, 'aloha-to-html5-pass06-postprocessing.xsl'),
]
def aloha_to_etree(html_source):
""" Converts HTML5 from Aloha editor output to a lxml etree. """
xml = _tidy2xhtml5(html_source)
for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE):
xml = transform(xml)
return xml
def aloha_to_html(html_source):
"""Converts HTML5 from Aloha to a more structured HTML5"""
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True)
def html_to_cnxml(html_source, cnxml_source):
"""Transform the HTML to CNXML. We need the original CNXML content in
order to preserve the metadata in the CNXML document.
"""
source = _string2io(html_source)
xml = etree.parse(source)
cnxml = etree.parse(_string2io(cnxml_source))
# Run the HTML to CNXML transform on it
xml = _transform('html5-to-cnxml.xsl', xml)
# Replace the original content element with the transformed one.
namespaces = {'c': 'http://cnx.rice.edu/cnxml'}
xpath = etree.XPath('//c:content', namespaces=namespaces)
replaceable_node = xpath(cnxml)[0]
replaceable_node.getparent().replace(replaceable_node, xml.getroot())
# Set the content into the existing cnxml source
return etree.tostring(cnxml)
HTML2VALID_CNXML_TRANSFORM_PIPELINE = [
partial(_transform, 'html5-to-cnxml-pass01-cleanup.xsl'),
partial(_transform, 'html5-to-cnxml-pass02-enclose-para.xsl'),
partial(_transform, 'html5-to-cnxml-pass03-xhtml2cnxml.xsl'),
# TODO: Recognize mime type of images here!
partial(_transform, 'html5-to-cnxml-pass04-postprocessing.xsl'),
partial(_transform, 'html5-to-cnxml-pass05-cnxml-id-generation.xsl'),
partial(_transform, 'html5-to-cnxml-pass06-cnxml-postprocessing.xsl'),
]
def etree_to_valid_cnxml(tree, **kwargs):
for i, transform in enumerate(HTML2VALID_CNXML_TRANSFORM_PIPELINE):
tree = transform(tree)
return etree.tostring(tree, **kwargs)
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/symbols.py | replace | python | def replace(text):
for hex, value in UNICODE_DICTIONARY.items():
num = int(hex[3:-1], 16)
#uni = unichr(num)
decimal = '&#' + str(num) + ';'
for key in [ hex, decimal ]: #uni
text = text.replace(key, value)
return text | Replace both the hex and decimal versions of symbols in an XML string | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/symbols.py#L219-L227 | null | """
Copyright (C) 2013 Rice University
This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
"""
""" Replace high unicode symbols found in some documents
with characters with lower code points """
#
# Microsoft Symbol fonts - U+F020..U+F0FF (unicode private use area)
#
UNICODE_DICTIONARY = {
# wierd MS symbol font back flips
"":"(" # (
, "":")" # )
# known MS private use area unicode UTF-8 characters
, "�":" " # U+0020 SPACE, UTF-8: 0x20
, "�":"∀" # U+2200 FOR ALL, UTF-8: 0xE2 0x88 0x80
, "�":"∃" # U+2203 THERE EXISTS, UTF-8: 0xE2 0x88 0x83
, "�":"(" # U+0028 LEFT PARENTHESIS, UTF-8: 0x28
, "�":")" # U+0029 RIGHT PARENTHESIS, UTF-8: 0x29
, "�":"﹐" # U+FE50 SMALL COMMA, UTF-8: 0xEF 0xB9 0x90
# or U+002C COMMA
, "�":"-" # U+FF0D FULLWIDTH HYPHEN-MINUS, UTF-8: 0xEF 0xBC 0x8D
, "�":"." # U+002E FULL STOP, UTF-8: 0x2E
, "�":"²" # U+00B2 SUPERSCRIPT TWO, UTF-8: 0xC2 0xB2
, "�":"<" # U+003C LESS-THAN SIGN, UTF-8: 0x3C
, "�":"=" # U+003D EQUALS SIGN, UTF-8: 0x3D
, "�":"≅" # U+2245 APPROXIMATELY EQUAL TO, UTF-8: 0xE2 0x89 0x85
, "�":"Χ" # U+03A7 GREEK CAPITAL LETTER CHI, UTF-8: 0xCE 0xA7
, "�":"Δ" # U+0394 GREEK CAPITAL LETTER DELTA, UTF-8: 0xCE 0x94
, "�":"Γ" # U+0393 GREEK CAPITAL LETTER GAMMA, UTF-8: 0xCE 0x93
, "�":"Μ" # U+039C GREEK CAPITAL LETTER MU, UTF-8: 0xCE 0x9C
, "�":"Π" # U+03A0 GREEK CAPITAL LETTER PI, UTF-8: 0xCE 0xA0
, "�":"Ρ" # U+03A1 GREEK CAPITAL LETTER RHO, UTF-8: 0xCE 0xA1
, "�":"Σ" # U+03A3 GREEK CAPITAL LETTER SIGMA, UTF-8: 0xCE 0xA3
, "�":"Τ" # U+03A4 GREEK CAPITAL LETTER TAU, UTF-8: 0xCE 0xA4
, "�":"⊥" # U+22A5 UP TACK, UTF-8: 0xE2 0x8A 0xA5
, "�":"α" # U+03B1 GREEK SMALL LETTER ALPHA, UTF-8: 0xCE 0xB1
, "�":"β" # U+03B2 GREEK SMALL LETTER BETA, UTF-8: 0xCE 0xB2
, "�":"γ" # U+03B3 GREEK SMALL LETTER GAMMA, UTF-8: 0xCE 0xB3
, "�":"δ" # U+03B4 GREEK SMALL LETTER DELTA, UTF-8: 0xCE 0xB4
, "�":"ε" # U+03B5 GREEK SMALL LETTER EPSILON, UTF-8: 0xCE 0xB5
, "�":"ϕ" # U+03D5 GREEK PHI SYMBOL, UTF-8: 0xCF 0x95
# or U+03C6 GREEK SMALL LETTER PHI
, "�":"φ" # U+03C6 GREEK SMALL LETTER PHI, UTF-8: 0xCF 0x86
, "�":"κ" # U+03BA GREEK SMALL LETTER KAPPA, UTF-8: 0xCE 0xBA
, "�":"λ" # U+03BB GREEK SMALL LETTER LAMDA, UTF-8: 0xCE 0xBB
, "�":"μ" # U+03BC GREEK SMALL LETTER MU, UTF-8: 0xCE 0xBC
# or U+00B5 MICRO SIGN
, "�":"ν" # U+03BD GREEK SMALL LETTER NU, UTF-8: 0xCE 0xBD
, "�":"ο" # U+03BF GREEK SMALL LETTER OMICRON, UTF-8: 0xCE 0xBF
, "�":"π" # U+03C0 GREEK SMALL LETTER PI, UTF-8: 0xCF 0x80
# or U+03D6 GREEK PI SYMBOL
, "�":"θ" # U+03B8 GREEK SMALL LETTER THETA, UTF-8: 0xCE 0xB8
, "�":"ρ" # U+03C1 GREEK SMALL LETTER RHO, UTF-8: 0xCF 0x81
, "�":"σ" # U+03C3 GREEK SMALL LETTER SIGMA, UTF-8: 0xCF 0x83
, "�":"τ" # U+03C4 GREEK SMALL LETTER TAU, UTF-8: 0xCF 0x84
, "�":"υ" # U+03C5 GREEK SMALL LETTER UPSILON, UTF-8: 0xCF 0x85
, "�":"ω" # U+03C9 GREEK SMALL LETTER OMEGA, UTF-8: 0xCF 0x89
, "�":"ζ" # U+03B6 GREEK SMALL LETTER ZETA, UTF-8: 0xCE 0xB6
, "�":"|" # U+007C VERTICAL LINE, UTF-8: 0x7C
, "�":"∼" # U+223C TILDE OPERATOR, UTF-8: 0xE2 0x88 0xBC
, "�":"≤" # U+2264 LESS-THAN OR EQUAL TO, UTF-8: 0xE2 0x89 0xA4
, "�":"∞" # U+221E INFINITY, UTF-8: 0xE2 0x88 0x9E
, "�":"→" # U+2192 RIGHTWARDS ARROW, UTF-8: 0xE2 0x86 0x92
, "�":"°" # U+00B0 DEGREE SIGN, UTF-8: 0xC2 0xB0
, "�":"±" # U+00B1 PLUS-MINUS SIGN, UTF-8: 0xC2 0xB1
, "�":"≥" # U+2265 GREATER-THAN OR EQUAL TO, UTF-8: 0xE2 0x89 0xA5
, "�":"×" # U+00D7 MULTIPLICATION SIGN, UTF-8: 0xC3 0x97
, "�":"≠" # U+2260 NOT EQUAL TO, UTF-8: 0xE2 0x89 0xA0
, "�":"≈" # U+2248 ALMOST EQUAL TO, UTF-8: 0xE2 0x89 0x88
, "�":"…" # U+2026 HORIZONTAL ELLIPSIS, UTF-8: 0xE2 0x80 0xA6
, "�":"∅" # U+2205 EMPTY SET, UTF-8: 0xE2 0x88 0x85
, "�":"⊇" # U+2287 SUPERSET OF OR EQUAL TO, UTF-8: 0xE2 0x8A 0x87
, "�":"⊂" # U+2282 SUBSET OF, UTF-8: 0xE2 0x8A 0x82
, "�":"⊆" # U+2286 SUBSET OF OR EQUAL TO, UTF-8: 0xE2 0x8A 0x86
, "�":"∈" # U+2208 ELEMENT OF, UTF-8: 0xE2 0x88 0x88
, "�":"∉" # U+2209 NOT AN ELEMENT OF, UTF-8: 0xE2 0x88 0x89
, "�":"∧" # U+2227 LOGICAL AND, UTF-8: 0xE2 0x88 0xA7
, "�":"∨" # U+2228 LOGICAL OR, UTF-8: 0xE2 0x88 0xA8
, "�":"⇔" # U+21D4 LEFT RIGHT DOUBLE ARROW, UTF-8: 0xE2 0x87 0x94
, "�":"⇐" # U+21D0 LEFTWARDS DOUBLE ARROW, UTF-8: 0xE2 0x87 0x90
, "�":"⇒" # U+21D2 RIGHTWARDS DOUBLE ARROW, UTF-8: 0xE2 0x87 0x92
, "�":"→" # U+2192 RIGHTWARDS ARROW, UTF-8: 0xE2 0x86 0x92
# known MS private use area unicode entity references
, "":" "
, "":"!"
, "":"∀"
, "":"#"
, "":"Δ" # Capital Delta
, "":"%"
, "":"&"
, "":"∍"
, "":"(" # U+0028 LEFT PARENTHESIS, UTF-8: 0x28
, "":")" # U+0029 RIGHT PARENTHESIS, UTF-8: 0x29
, "":"*" # U+002A ASTERISK, UTF-8: 0x2A
, "":"+" # U+002B PLUS SIGN, UTF-8: 0x2B
, "":"," # U+002C COMMA, UTF-8: 0x2C
, "":"-" # U+002D HYPHEN-MINUS, UTF-8: 0x2D
, "":"." # U+002E FULL STOP, UTF-8: 0x2E
, "":"/" # U+002F SOLIDUS, UTF-8: 0x2F
, "":"0" # U+0030 DIGIT ZERO, UTF-8: 0x30
, "":"1" # U+0031 DIGIT ONE, UTF-8: 0x31
, "":"2" # U+0032 DIGIT TWO, UTF-8: 0x32
, "":"3" # U+0033 DIGIT THREE, UTF-8: 0x33
, "":"4" # U+0034 DIGIT FOUR, UTF-8: 0x34
, "":"5" # U+0035 DIGIT FIVE, UTF-8: 0x35
, "":"6" # U+0036 DIGIT SIX, UTF-8: 0x36
, "":"7" # U+0037 DIGIT SEVEN, UTF-8: 0x37
, "":"8" # U+0038 DIGIT EIGHT, UTF-8: 0x38
, "":"9" # U+0039 DIGIT NINE, UTF-8: 0x39
, "":":" # U+003A COLON, UTF-8: 0x3A
, "":";" # U+003B SEMICOLONUTF-8: 0x3B
, "":"<" # U+003C LESS-THAN SIGN, UTF-8: 0x3C
, "":"=" # U+003D EQUALS SIGN, UTF-8: 0x3D
, "":">" # U+003E GREATER-THAN SIGN, UTF-8: 0x3E
, "":"?" # U+003F QUESTION MARK, UTF-8: 0x3F
, "":"≅"
, "":"Α"
, "":"Β"
, "":"Χ"
, "":"Δ"
, "":"Ε"
, "":"Φ"
, "":"Γ"
, "":"Η"
, "":"Ι"
, "":"Θ"
, "":"Κ"
, "":"Λ"
, "":"Μ"
, "":"Ν"
, "":"Ο"
, "":"Π"
, "":"Θ"
, "":"Ρ"
, "":"Σ"
, "":"Τ"
, "":"Υ"
, "":"ζ"
, "":"Ω"
, "":"Ξ"
, "":"Ψ"
, "":"Ζ"
, "":"["
, "":"∴"
, "":"]"
, "":"⊥"
, "":"€"
, "":"¥"
, "":"´"
, "":"≤"
, "":"/"
, "":"∞"
, "":"♣"
, "":"♦"
, "":"♥"
, "":"♠"
, "":"↔"
, "":"←"
, "":"↑"
, "":"→"
, "":"↓"
, "":"°"
, "":"±"
, "":"""
, "":"≥"
, "":"≂"
, "":"∝"
, "":"∂"
, "":"∙"
, "":"≃"
, "":"≠"
, "":"≡"
, "":"≈"
, "":"⋯"
, "":"∣"
, "":"─"
, "":"↵"
, "":"ℵ"
, "":"ℷ"
, "":"ℜ"
, "":"℘"
, "":"⊗"
, "":"⊕"
, "":"⊘"
, "":"∩"
, "":"∪"
, "":"⊃"
, "":"⊇"
, "":"⊄"
, "":"⊂"
, "":"⊆"
, "":"∊"
, "":"∉"
, "":"∇"
, "":"®"
, "":"©"
, "":"™"
, "":"∏"
, "":"√"
, "":"∙"
, "":"¬"
, "":"⋀"
, "":"⋁"
, "":"⇔"
, "":"⇐"
, "":"⇑"
, "":"⇒"
}
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/odt2cnxml.py | writeXMLFile | python | def writeXMLFile(filename, content):
xmlfile = open(filename, 'w')
# pretty print
content = etree.tostring(content, pretty_print=True)
xmlfile.write(content)
xmlfile.close() | Used only for debugging to write out intermediate files | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/odt2cnxml.py#L56-L62 | null | """
Copyright (C) 2013 Rice University
This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
"""
import os
import sys
import tempfile
from copy import deepcopy
import shutil
import zipfile
import urllib
import pkg_resources
from cStringIO import StringIO
from lxml import etree, html
try:
import json
except ImportError:
import simplejson as json
import symbols
dirname = os.path.dirname(__file__)
NAMESPACES = {
'office':'urn:oasis:names:tc:opendocument:xmlns:office:1.0',
'draw':'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xlink':'http://www.w3.org/1999/xlink',
}
MATH_XPATH = etree.XPath('//draw:object[@xlink:href]', namespaces=NAMESPACES)
MATH_HREF_XPATH = etree.XPath('@xlink:href', namespaces=NAMESPACES)
IMAGE_XPATH = etree.XPath('//draw:frame[not(draw:object or draw:object-ole) and @draw:name and draw:image[@xlink:href and @xlink:type="simple"]]', namespaces=NAMESPACES)
IMAGE_HREF_XPATH = etree.XPath('draw:image/@xlink:href', namespaces=NAMESPACES)
IMAGE_NAME_XPATH = etree.XPath('@draw:name', namespaces=NAMESPACES)
STYLES_XPATH = etree.XPath('//office:styles', namespaces=NAMESPACES)
DRAW_XPATH = etree.XPath('//draw:g[not(parent::draw:*)]', namespaces=NAMESPACES)
DRAW_STYLES_XPATH = etree.XPath('/office:document-content/office:automatic-styles/*', namespaces=NAMESPACES)
DRAW_FILENAME_PREFIX = "draw_odg"
def makeXsl(filename):
""" Helper that creates a XSLT stylesheet """
pkg = 'xsl'
package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]
if package != '':
pkg = package + '.' + pkg
path = pkg_resources.resource_filename(pkg, filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def transform(odtfile, debug=False, parsable=False, outputdir=None):
""" Given an ODT file this returns a tuple containing
the cnxml, a dictionary of filename -> data, and a list of errors """
# Store mapping of images extracted from the ODT file (and their bits)
images = {}
# Log of Errors and Warnings generated
# For example, the text produced by XSLT should be:
# {'level':'WARNING',
# 'msg' :'Headings without text between them are not allowed',
# 'id' :'import-auto-id2376'}
# That way we can put a little * near all the cnxml where issues arose
errors = []
zip = zipfile.ZipFile(odtfile, 'r')
content = zip.read('content.xml')
xml = etree.fromstring(content)
def appendLog(xslDoc):
if hasattr(xslDoc, 'error_log'):
for entry in xslDoc.error_log:
# Entries are of the form:
# {'level':'ERROR','id':'id1234','msg':'Descriptive message'}
text = entry.message
try:
dict = json.loads(text)
errors.append(dict)
except ValueError:
errors.append({
u'level':u'CRITICAL',
u'id' :u'(none)',
u'msg' :unicode(text) })
def injectStyles(xml):
# HACK - need to find the object location from the manifest ...
strStyles = zip.read('styles.xml')
parser = etree.XMLParser()
parser.feed(strStyles)
stylesXml = parser.close()
for i, obj in enumerate(STYLES_XPATH(stylesXml)):
xml.append(obj)
return xml
# All MathML is stored in separate files "Object #/content.xml"
# This converter includes the MathML by looking up the file in the zip
def mathIncluder(xml):
for i, obj in enumerate(MATH_XPATH(xml)):
strMathPath = MATH_HREF_XPATH(obj)[0] # Or obj.get('{%s}href' % XLINK_NS)
if strMathPath[0] == '#':
strMathPath = strMathPath[1:]
# Remove leading './' Zip doesn't like it
if strMathPath[0] == '.':
strMathPath = strMathPath[2:]
# HACK - need to find the object location from the manifest ...
strMathPath = os.path.join(strMathPath, 'content.xml')
strMath = zip.read(strMathPath)
#parser = etree.XMLParser(encoding='utf-8')
#parser.feed(strMath)
#math = parser.close()
math = etree.parse(StringIO(strMath)).getroot()
# Replace the reference to the Math with the actual MathML
obj.getparent().replace(obj, math)
return xml
def imagePuller(xml):
for i, obj in enumerate(IMAGE_XPATH(xml)):
strPath = IMAGE_HREF_XPATH(obj)[0]
strName = IMAGE_NAME_XPATH(obj)[0]
fileNeedEnding = ( strName.find('.') == -1 )
if fileNeedEnding:
strName = strName + strPath[strPath.index('.'):]
if strPath[0] == '#':
strPath = strPath[1:]
# Remove leading './' Zip doesn't like it
if strPath[0] == '.':
strPath = strPath[2:]
image = zip.read(strPath)
images[strName] = image
# Later on, an XSL pass will convert the draw:frame to a c:image and
# set the @src correctly
return xml
def drawPuller(xml):
styles = DRAW_STYLES_XPATH(xml)
empty_odg_dirname = os.path.join(dirname, 'empty_odg_template')
temp_dirname = tempfile.mkdtemp()
for i, obj in enumerate(DRAW_XPATH(xml)):
# Copy everything except content.xml from the empty ODG (OOo Draw) template into a new zipfile
odg_filename = DRAW_FILENAME_PREFIX + str(i) + '.odg'
png_filename = DRAW_FILENAME_PREFIX + str(i) + '.png'
# add PNG filename as attribute to parent node. The good thing is: The child (obj) will get lost! :-)
parent = obj.getparent()
parent.attrib['ooo_drawing'] = png_filename
odg_zip = zipfile.ZipFile(os.path.join(temp_dirname, odg_filename), 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(empty_odg_dirname):
for name in files:
if name not in ('content.xml', 'styles.xml'): # copy everything inside ZIP except content.xml or styles.xml
sourcename = os.path.join(root, name)
# http://stackoverflow.com/a/1193171/756056
arcname = os.path.join(root[len(empty_odg_dirname):], name) # Path name inside the ZIP file, empty_odg_template is the root folder
odg_zip.write(sourcename, arcname)
content = etree.parse(os.path.join(empty_odg_dirname, 'content.xml'))
# Inject content styles in empty OOo Draw content.xml
content_style_xpath = etree.XPath('/office:document-content/office:automatic-styles', namespaces=NAMESPACES)
content_styles = content_style_xpath(content)
for style in styles:
content_styles[0].append(deepcopy(style))
# Inject drawing in empty OOo Draw content.xml
content_page_xpath = etree.XPath('/office:document-content/office:body/office:drawing/draw:page', namespaces=NAMESPACES)
content_page = content_page_xpath(content)
content_page[0].append(obj)
# write modified content.xml
odg_zip.writestr('content.xml', etree.tostring(content, xml_declaration=True, encoding='UTF-8'))
# copy styles.xml from odt to odg without modification
styles_xml = zip.read('styles.xml')
odg_zip.writestr('styles.xml', styles_xml)
odg_zip.close()
# TODO: Better error handling in the future.
try:
# convert every odg to png
command = '/usr/bin/soffice -headless -nologo -nofirststartwizard "macro:///Standard.Module1.SaveAsPNG(%s,%s)"' % (os.path.join(temp_dirname, odg_filename),os.path.join(temp_dirname, png_filename))
os.system(command)
# save every image to memory
image = open(os.path.join(temp_dirname, png_filename), 'r').read()
images[png_filename] = image
if outputdir is not None:
shutil.copy (os.path.join(temp_dirname, odg_filename), os.path.join(outputdir, odg_filename))
shutil.copy (os.path.join(temp_dirname, png_filename), os.path.join(outputdir, png_filename))
except:
pass
# delete temporary directory
shutil.rmtree(temp_dirname)
return xml
# Reparse after XSL because the RED-escape pass injects arbitrary XML
def redParser(xml):
xsl = makeXsl('pass1_odt2red-escape.xsl')
result = xsl(xml)
appendLog(xsl)
try:
xml = etree.fromstring(etree.tostring(result))
except etree.XMLSyntaxError, e:
msg = str(e)
xml = makeXsl('pass1_odt2red-failed.xsl')(xml, message="'%s'" % msg.replace("'", '"'))
xml = xml.getroot()
return xml
def replaceSymbols(xml):
xmlstr = etree.tostring(xml)
xmlstr = symbols.replace(xmlstr)
return etree.fromstring(xmlstr)
PIPELINE = [
drawPuller, # gets OOo Draw objects out of odt and generate odg (OOo Draw) files
replaceSymbols,
injectStyles, # include the styles.xml file because it contains list numbering info
makeXsl('pass2_odt-normalize.xsl'), # This needs to be done 2x to fix headings
makeXsl('pass2_odt-normalize.xsl'), # In the worst case all headings are 9
# and need to be 1. See (testbed) southwood__Lesson_2.doc
makeXsl('pass2_odt-collapse-spans.xsl'), # Collapse adjacent spans (for RED)
redParser, # makeXsl('pass1_odt2red-escape.xsl'),
makeXsl('pass4_odt-headers.xsl'),
imagePuller, # Need to run before math because both have a <draw:image> (see xpath)
mathIncluder,
makeXsl('pass7_odt2cnxml.xsl'),
makeXsl('pass8_cnxml-cleanup.xsl'),
makeXsl('pass8.5_cnxml-cleanup.xsl'),
makeXsl('pass9_id-generation.xsl'),
makeXsl('pass10_processing-instruction-logger.xsl'),
]
# "xml" variable gets replaced during each iteration
passNum = 0
for xslDoc in PIPELINE:
if debug: errors.append("DEBUG: Starting pass %d" % passNum)
xml = xslDoc(xml)
appendLog(xslDoc)
if outputdir is not None: writeXMLFile(os.path.join(outputdir, 'pass%d.xml' % passNum), xml)
passNum += 1
# In most cases (EIP) Invalid XML is preferable over valid but Escaped XML
if not parsable:
xml = (makeXsl('pass11_red-unescape.xsl'))(xml)
return (xml, images, errors)
def validate(xml):
# Validate against schema
schemafile = open(os.path.join(dirname,
'schema/cnxml/rng/0.7/cnxml.rng'))
relaxng_doc = etree.parse(schemafile)
relaxng = etree.RelaxNG(relaxng_doc)
if relaxng.validate(xml):
return None
else:
return relaxng.error_log
def main():
try:
import argparse
parser = argparse.ArgumentParser(description='Convert odt file to CNXML')
parser.add_argument('-v', dest='verbose', help='Verbose printing to stderr', action='store_true')
parser.add_argument('-p', dest='parsable', help='Ensure the output is Valid XML (ignore red text)', action='store_true')
parser.add_argument('odtfile', help='/path/to/odtfile', type=file)
parser.add_argument('outputdir', help='/path/to/outputdir', nargs='?')
args = parser.parse_args()
if args.verbose: print >> sys.stderr, "Transforming..."
xml, files, errors = transform(args.odtfile, debug=args.verbose, parsable=args.parsable, outputdir=args.outputdir)
if args.verbose:
for name, bytes in files.items():
print >> sys.stderr, "Extracted %s (%d)" % (name, len(bytes))
for err in errors:
print >> sys.stderr, err
if xml is not None:
if args.verbose: print >> sys.stderr, "Validating..."
invalids = validate(xml)
if invalids: print >> sys.stderr, invalids
print etree.tostring(xml, pretty_print=True)
if invalids:
return 1
except ImportError:
print "argparse is needed for commandline"
if __name__ == '__main__':
sys.exit(main())
|
openstax/rhaptos.cnxmlutils | rhaptos/cnxmlutils/odt2cnxml.py | transform | python | def transform(odtfile, debug=False, parsable=False, outputdir=None):
# Store mapping of images extracted from the ODT file (and their bits)
images = {}
# Log of Errors and Warnings generated
# For example, the text produced by XSLT should be:
# {'level':'WARNING',
# 'msg' :'Headings without text between them are not allowed',
# 'id' :'import-auto-id2376'}
# That way we can put a little * near all the cnxml where issues arose
errors = []
zip = zipfile.ZipFile(odtfile, 'r')
content = zip.read('content.xml')
xml = etree.fromstring(content)
def appendLog(xslDoc):
if hasattr(xslDoc, 'error_log'):
for entry in xslDoc.error_log:
# Entries are of the form:
# {'level':'ERROR','id':'id1234','msg':'Descriptive message'}
text = entry.message
try:
dict = json.loads(text)
errors.append(dict)
except ValueError:
errors.append({
u'level':u'CRITICAL',
u'id' :u'(none)',
u'msg' :unicode(text) })
def injectStyles(xml):
# HACK - need to find the object location from the manifest ...
strStyles = zip.read('styles.xml')
parser = etree.XMLParser()
parser.feed(strStyles)
stylesXml = parser.close()
for i, obj in enumerate(STYLES_XPATH(stylesXml)):
xml.append(obj)
return xml
# All MathML is stored in separate files "Object #/content.xml"
# This converter includes the MathML by looking up the file in the zip
def mathIncluder(xml):
for i, obj in enumerate(MATH_XPATH(xml)):
strMathPath = MATH_HREF_XPATH(obj)[0] # Or obj.get('{%s}href' % XLINK_NS)
if strMathPath[0] == '#':
strMathPath = strMathPath[1:]
# Remove leading './' Zip doesn't like it
if strMathPath[0] == '.':
strMathPath = strMathPath[2:]
# HACK - need to find the object location from the manifest ...
strMathPath = os.path.join(strMathPath, 'content.xml')
strMath = zip.read(strMathPath)
#parser = etree.XMLParser(encoding='utf-8')
#parser.feed(strMath)
#math = parser.close()
math = etree.parse(StringIO(strMath)).getroot()
# Replace the reference to the Math with the actual MathML
obj.getparent().replace(obj, math)
return xml
def imagePuller(xml):
for i, obj in enumerate(IMAGE_XPATH(xml)):
strPath = IMAGE_HREF_XPATH(obj)[0]
strName = IMAGE_NAME_XPATH(obj)[0]
fileNeedEnding = ( strName.find('.') == -1 )
if fileNeedEnding:
strName = strName + strPath[strPath.index('.'):]
if strPath[0] == '#':
strPath = strPath[1:]
# Remove leading './' Zip doesn't like it
if strPath[0] == '.':
strPath = strPath[2:]
image = zip.read(strPath)
images[strName] = image
# Later on, an XSL pass will convert the draw:frame to a c:image and
# set the @src correctly
return xml
def drawPuller(xml):
styles = DRAW_STYLES_XPATH(xml)
empty_odg_dirname = os.path.join(dirname, 'empty_odg_template')
temp_dirname = tempfile.mkdtemp()
for i, obj in enumerate(DRAW_XPATH(xml)):
# Copy everything except content.xml from the empty ODG (OOo Draw) template into a new zipfile
odg_filename = DRAW_FILENAME_PREFIX + str(i) + '.odg'
png_filename = DRAW_FILENAME_PREFIX + str(i) + '.png'
# add PNG filename as attribute to parent node. The good thing is: The child (obj) will get lost! :-)
parent = obj.getparent()
parent.attrib['ooo_drawing'] = png_filename
odg_zip = zipfile.ZipFile(os.path.join(temp_dirname, odg_filename), 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(empty_odg_dirname):
for name in files:
if name not in ('content.xml', 'styles.xml'): # copy everything inside ZIP except content.xml or styles.xml
sourcename = os.path.join(root, name)
# http://stackoverflow.com/a/1193171/756056
arcname = os.path.join(root[len(empty_odg_dirname):], name) # Path name inside the ZIP file, empty_odg_template is the root folder
odg_zip.write(sourcename, arcname)
content = etree.parse(os.path.join(empty_odg_dirname, 'content.xml'))
# Inject content styles in empty OOo Draw content.xml
content_style_xpath = etree.XPath('/office:document-content/office:automatic-styles', namespaces=NAMESPACES)
content_styles = content_style_xpath(content)
for style in styles:
content_styles[0].append(deepcopy(style))
# Inject drawing in empty OOo Draw content.xml
content_page_xpath = etree.XPath('/office:document-content/office:body/office:drawing/draw:page', namespaces=NAMESPACES)
content_page = content_page_xpath(content)
content_page[0].append(obj)
# write modified content.xml
odg_zip.writestr('content.xml', etree.tostring(content, xml_declaration=True, encoding='UTF-8'))
# copy styles.xml from odt to odg without modification
styles_xml = zip.read('styles.xml')
odg_zip.writestr('styles.xml', styles_xml)
odg_zip.close()
# TODO: Better error handling in the future.
try:
# convert every odg to png
command = '/usr/bin/soffice -headless -nologo -nofirststartwizard "macro:///Standard.Module1.SaveAsPNG(%s,%s)"' % (os.path.join(temp_dirname, odg_filename),os.path.join(temp_dirname, png_filename))
os.system(command)
# save every image to memory
image = open(os.path.join(temp_dirname, png_filename), 'r').read()
images[png_filename] = image
if outputdir is not None:
shutil.copy (os.path.join(temp_dirname, odg_filename), os.path.join(outputdir, odg_filename))
shutil.copy (os.path.join(temp_dirname, png_filename), os.path.join(outputdir, png_filename))
except:
pass
# delete temporary directory
shutil.rmtree(temp_dirname)
return xml
# Reparse after XSL because the RED-escape pass injects arbitrary XML
def redParser(xml):
xsl = makeXsl('pass1_odt2red-escape.xsl')
result = xsl(xml)
appendLog(xsl)
try:
xml = etree.fromstring(etree.tostring(result))
except etree.XMLSyntaxError, e:
msg = str(e)
xml = makeXsl('pass1_odt2red-failed.xsl')(xml, message="'%s'" % msg.replace("'", '"'))
xml = xml.getroot()
return xml
def replaceSymbols(xml):
xmlstr = etree.tostring(xml)
xmlstr = symbols.replace(xmlstr)
return etree.fromstring(xmlstr)
PIPELINE = [
drawPuller, # gets OOo Draw objects out of odt and generate odg (OOo Draw) files
replaceSymbols,
injectStyles, # include the styles.xml file because it contains list numbering info
makeXsl('pass2_odt-normalize.xsl'), # This needs to be done 2x to fix headings
makeXsl('pass2_odt-normalize.xsl'), # In the worst case all headings are 9
# and need to be 1. See (testbed) southwood__Lesson_2.doc
makeXsl('pass2_odt-collapse-spans.xsl'), # Collapse adjacent spans (for RED)
redParser, # makeXsl('pass1_odt2red-escape.xsl'),
makeXsl('pass4_odt-headers.xsl'),
imagePuller, # Need to run before math because both have a <draw:image> (see xpath)
mathIncluder,
makeXsl('pass7_odt2cnxml.xsl'),
makeXsl('pass8_cnxml-cleanup.xsl'),
makeXsl('pass8.5_cnxml-cleanup.xsl'),
makeXsl('pass9_id-generation.xsl'),
makeXsl('pass10_processing-instruction-logger.xsl'),
]
# "xml" variable gets replaced during each iteration
passNum = 0
for xslDoc in PIPELINE:
if debug: errors.append("DEBUG: Starting pass %d" % passNum)
xml = xslDoc(xml)
appendLog(xslDoc)
if outputdir is not None: writeXMLFile(os.path.join(outputdir, 'pass%d.xml' % passNum), xml)
passNum += 1
# In most cases (EIP) Invalid XML is preferable over valid but Escaped XML
if not parsable:
xml = (makeXsl('pass11_red-unescape.xsl'))(xml)
return (xml, images, errors) | Given an ODT file this returns a tuple containing
the cnxml, a dictionary of filename -> data, and a list of errors | train | https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/odt2cnxml.py#L64-L277 | [
"def makeXsl(filename):\n \"\"\" Helper that creates a XSLT stylesheet \"\"\"\n pkg = 'xsl'\n package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]\n if package != '':\n pkg = package + '.' + pkg\n path = pkg_resources.resource_filename(pkg, filename)\n xml = etree.parse(path)\n return etree.XSLT(xml)\n",
"def writeXMLFile(filename, content):\n \"\"\" Used only for debugging to write out intermediate files\"\"\"\n xmlfile = open(filename, 'w')\n # pretty print\n content = etree.tostring(content, pretty_print=True)\n xmlfile.write(content)\n xmlfile.close()\n",
"def appendLog(xslDoc):\n if hasattr(xslDoc, 'error_log'):\n for entry in xslDoc.error_log:\n # Entries are of the form:\n # {'level':'ERROR','id':'id1234','msg':'Descriptive message'}\n text = entry.message\n try:\n dict = json.loads(text)\n errors.append(dict)\n except ValueError:\n errors.append({\n u'level':u'CRITICAL',\n u'id' :u'(none)',\n u'msg' :unicode(text) })\n"
] | """
Copyright (C) 2013 Rice University
This software is subject to the provisions of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3.0 (AGPL).
See LICENSE.txt for details.
"""
import os
import sys
import tempfile
from copy import deepcopy
import shutil
import zipfile
import urllib
import pkg_resources
from cStringIO import StringIO
from lxml import etree, html
try:
import json
except ImportError:
import simplejson as json
import symbols
dirname = os.path.dirname(__file__)
NAMESPACES = {
'office':'urn:oasis:names:tc:opendocument:xmlns:office:1.0',
'draw':'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xlink':'http://www.w3.org/1999/xlink',
}
MATH_XPATH = etree.XPath('//draw:object[@xlink:href]', namespaces=NAMESPACES)
MATH_HREF_XPATH = etree.XPath('@xlink:href', namespaces=NAMESPACES)
IMAGE_XPATH = etree.XPath('//draw:frame[not(draw:object or draw:object-ole) and @draw:name and draw:image[@xlink:href and @xlink:type="simple"]]', namespaces=NAMESPACES)
IMAGE_HREF_XPATH = etree.XPath('draw:image/@xlink:href', namespaces=NAMESPACES)
IMAGE_NAME_XPATH = etree.XPath('@draw:name', namespaces=NAMESPACES)
STYLES_XPATH = etree.XPath('//office:styles', namespaces=NAMESPACES)
DRAW_XPATH = etree.XPath('//draw:g[not(parent::draw:*)]', namespaces=NAMESPACES)
DRAW_STYLES_XPATH = etree.XPath('/office:document-content/office:automatic-styles/*', namespaces=NAMESPACES)
DRAW_FILENAME_PREFIX = "draw_odg"
def makeXsl(filename):
""" Helper that creates a XSLT stylesheet """
pkg = 'xsl'
package = ''.join(['.' + x for x in __name__.split('.')[:-1]])[1:]
if package != '':
pkg = package + '.' + pkg
path = pkg_resources.resource_filename(pkg, filename)
xml = etree.parse(path)
return etree.XSLT(xml)
def writeXMLFile(filename, content):
""" Used only for debugging to write out intermediate files"""
xmlfile = open(filename, 'w')
# pretty print
content = etree.tostring(content, pretty_print=True)
xmlfile.write(content)
xmlfile.close()
def transform(odtfile, debug=False, parsable=False, outputdir=None):
""" Given an ODT file this returns a tuple containing
the cnxml, a dictionary of filename -> data, and a list of errors """
# Store mapping of images extracted from the ODT file (and their bits)
images = {}
# Log of Errors and Warnings generated
# For example, the text produced by XSLT should be:
# {'level':'WARNING',
# 'msg' :'Headings without text between them are not allowed',
# 'id' :'import-auto-id2376'}
# That way we can put a little * near all the cnxml where issues arose
errors = []
zip = zipfile.ZipFile(odtfile, 'r')
content = zip.read('content.xml')
xml = etree.fromstring(content)
def appendLog(xslDoc):
if hasattr(xslDoc, 'error_log'):
for entry in xslDoc.error_log:
# Entries are of the form:
# {'level':'ERROR','id':'id1234','msg':'Descriptive message'}
text = entry.message
try:
dict = json.loads(text)
errors.append(dict)
except ValueError:
errors.append({
u'level':u'CRITICAL',
u'id' :u'(none)',
u'msg' :unicode(text) })
def injectStyles(xml):
# HACK - need to find the object location from the manifest ...
strStyles = zip.read('styles.xml')
parser = etree.XMLParser()
parser.feed(strStyles)
stylesXml = parser.close()
for i, obj in enumerate(STYLES_XPATH(stylesXml)):
xml.append(obj)
return xml
# All MathML is stored in separate files "Object #/content.xml"
# This converter includes the MathML by looking up the file in the zip
def mathIncluder(xml):
for i, obj in enumerate(MATH_XPATH(xml)):
strMathPath = MATH_HREF_XPATH(obj)[0] # Or obj.get('{%s}href' % XLINK_NS)
if strMathPath[0] == '#':
strMathPath = strMathPath[1:]
# Remove leading './' Zip doesn't like it
if strMathPath[0] == '.':
strMathPath = strMathPath[2:]
# HACK - need to find the object location from the manifest ...
strMathPath = os.path.join(strMathPath, 'content.xml')
strMath = zip.read(strMathPath)
#parser = etree.XMLParser(encoding='utf-8')
#parser.feed(strMath)
#math = parser.close()
math = etree.parse(StringIO(strMath)).getroot()
# Replace the reference to the Math with the actual MathML
obj.getparent().replace(obj, math)
return xml
def imagePuller(xml):
for i, obj in enumerate(IMAGE_XPATH(xml)):
strPath = IMAGE_HREF_XPATH(obj)[0]
strName = IMAGE_NAME_XPATH(obj)[0]
fileNeedEnding = ( strName.find('.') == -1 )
if fileNeedEnding:
strName = strName + strPath[strPath.index('.'):]
if strPath[0] == '#':
strPath = strPath[1:]
# Remove leading './' Zip doesn't like it
if strPath[0] == '.':
strPath = strPath[2:]
image = zip.read(strPath)
images[strName] = image
# Later on, an XSL pass will convert the draw:frame to a c:image and
# set the @src correctly
return xml
def drawPuller(xml):
styles = DRAW_STYLES_XPATH(xml)
empty_odg_dirname = os.path.join(dirname, 'empty_odg_template')
temp_dirname = tempfile.mkdtemp()
for i, obj in enumerate(DRAW_XPATH(xml)):
# Copy everything except content.xml from the empty ODG (OOo Draw) template into a new zipfile
odg_filename = DRAW_FILENAME_PREFIX + str(i) + '.odg'
png_filename = DRAW_FILENAME_PREFIX + str(i) + '.png'
# add PNG filename as attribute to parent node. The good thing is: The child (obj) will get lost! :-)
parent = obj.getparent()
parent.attrib['ooo_drawing'] = png_filename
odg_zip = zipfile.ZipFile(os.path.join(temp_dirname, odg_filename), 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(empty_odg_dirname):
for name in files:
if name not in ('content.xml', 'styles.xml'): # copy everything inside ZIP except content.xml or styles.xml
sourcename = os.path.join(root, name)
# http://stackoverflow.com/a/1193171/756056
arcname = os.path.join(root[len(empty_odg_dirname):], name) # Path name inside the ZIP file, empty_odg_template is the root folder
odg_zip.write(sourcename, arcname)
content = etree.parse(os.path.join(empty_odg_dirname, 'content.xml'))
# Inject content styles in empty OOo Draw content.xml
content_style_xpath = etree.XPath('/office:document-content/office:automatic-styles', namespaces=NAMESPACES)
content_styles = content_style_xpath(content)
for style in styles:
content_styles[0].append(deepcopy(style))
# Inject drawing in empty OOo Draw content.xml
content_page_xpath = etree.XPath('/office:document-content/office:body/office:drawing/draw:page', namespaces=NAMESPACES)
content_page = content_page_xpath(content)
content_page[0].append(obj)
# write modified content.xml
odg_zip.writestr('content.xml', etree.tostring(content, xml_declaration=True, encoding='UTF-8'))
# copy styles.xml from odt to odg without modification
styles_xml = zip.read('styles.xml')
odg_zip.writestr('styles.xml', styles_xml)
odg_zip.close()
# TODO: Better error handling in the future.
try:
# convert every odg to png
command = '/usr/bin/soffice -headless -nologo -nofirststartwizard "macro:///Standard.Module1.SaveAsPNG(%s,%s)"' % (os.path.join(temp_dirname, odg_filename),os.path.join(temp_dirname, png_filename))
os.system(command)
# save every image to memory
image = open(os.path.join(temp_dirname, png_filename), 'r').read()
images[png_filename] = image
if outputdir is not None:
shutil.copy (os.path.join(temp_dirname, odg_filename), os.path.join(outputdir, odg_filename))
shutil.copy (os.path.join(temp_dirname, png_filename), os.path.join(outputdir, png_filename))
except:
pass
# delete temporary directory
shutil.rmtree(temp_dirname)
return xml
# Reparse after XSL because the RED-escape pass injects arbitrary XML
def redParser(xml):
xsl = makeXsl('pass1_odt2red-escape.xsl')
result = xsl(xml)
appendLog(xsl)
try:
xml = etree.fromstring(etree.tostring(result))
except etree.XMLSyntaxError, e:
msg = str(e)
xml = makeXsl('pass1_odt2red-failed.xsl')(xml, message="'%s'" % msg.replace("'", '"'))
xml = xml.getroot()
return xml
def replaceSymbols(xml):
xmlstr = etree.tostring(xml)
xmlstr = symbols.replace(xmlstr)
return etree.fromstring(xmlstr)
PIPELINE = [
drawPuller, # gets OOo Draw objects out of odt and generate odg (OOo Draw) files
replaceSymbols,
injectStyles, # include the styles.xml file because it contains list numbering info
makeXsl('pass2_odt-normalize.xsl'), # This needs to be done 2x to fix headings
makeXsl('pass2_odt-normalize.xsl'), # In the worst case all headings are 9
# and need to be 1. See (testbed) southwood__Lesson_2.doc
makeXsl('pass2_odt-collapse-spans.xsl'), # Collapse adjacent spans (for RED)
redParser, # makeXsl('pass1_odt2red-escape.xsl'),
makeXsl('pass4_odt-headers.xsl'),
imagePuller, # Need to run before math because both have a <draw:image> (see xpath)
mathIncluder,
makeXsl('pass7_odt2cnxml.xsl'),
makeXsl('pass8_cnxml-cleanup.xsl'),
makeXsl('pass8.5_cnxml-cleanup.xsl'),
makeXsl('pass9_id-generation.xsl'),
makeXsl('pass10_processing-instruction-logger.xsl'),
]
# "xml" variable gets replaced during each iteration
passNum = 0
for xslDoc in PIPELINE:
if debug: errors.append("DEBUG: Starting pass %d" % passNum)
xml = xslDoc(xml)
appendLog(xslDoc)
if outputdir is not None: writeXMLFile(os.path.join(outputdir, 'pass%d.xml' % passNum), xml)
passNum += 1
# In most cases (EIP) Invalid XML is preferable over valid but Escaped XML
if not parsable:
xml = (makeXsl('pass11_red-unescape.xsl'))(xml)
return (xml, images, errors)
def validate(xml):
# Validate against schema
schemafile = open(os.path.join(dirname,
'schema/cnxml/rng/0.7/cnxml.rng'))
relaxng_doc = etree.parse(schemafile)
relaxng = etree.RelaxNG(relaxng_doc)
if relaxng.validate(xml):
return None
else:
return relaxng.error_log
def main():
try:
import argparse
parser = argparse.ArgumentParser(description='Convert odt file to CNXML')
parser.add_argument('-v', dest='verbose', help='Verbose printing to stderr', action='store_true')
parser.add_argument('-p', dest='parsable', help='Ensure the output is Valid XML (ignore red text)', action='store_true')
parser.add_argument('odtfile', help='/path/to/odtfile', type=file)
parser.add_argument('outputdir', help='/path/to/outputdir', nargs='?')
args = parser.parse_args()
if args.verbose: print >> sys.stderr, "Transforming..."
xml, files, errors = transform(args.odtfile, debug=args.verbose, parsable=args.parsable, outputdir=args.outputdir)
if args.verbose:
for name, bytes in files.items():
print >> sys.stderr, "Extracted %s (%d)" % (name, len(bytes))
for err in errors:
print >> sys.stderr, err
if xml is not None:
if args.verbose: print >> sys.stderr, "Validating..."
invalids = validate(xml)
if invalids: print >> sys.stderr, invalids
print etree.tostring(xml, pretty_print=True)
if invalids:
return 1
except ImportError:
print "argparse is needed for commandline"
if __name__ == '__main__':
sys.exit(main())
|
Riffstation/flask-philo | flask_philo/db/postgresql/connection.py | init_db_conn | python | def init_db_conn(connection_name, connection_string, scopefunc=None):
engine = create_engine(connection_string)
session = scoped_session(sessionmaker(), scopefunc=scopefunc)
session.configure(bind=engine)
pool.connections[connection_name] = Connection(engine, session) | Initialize a postgresql connection by each connection string
defined in the configuration file | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/postgresql/connection.py#L47-L55 | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
import sys
class Connection(object):
def __init__(self, engine, session):
self.engine = engine
self.session = session
class ConnectionPool:
"""
Flask-philo supports multiple postgresql database connections,
this class stores one connection by every db
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
def commit(self, connection_name='DEFAULT'):
if connection_name is None:
for conn_name, conn in self.connections.items():
conn.session.commit()
else:
self.connections[connection_name].session.commit()
def rollback(self, connection_name='DEFAULT'):
if connection_name is None:
for conn_name, conn in self.connections.items():
conn.session.rollback()
else:
self.connections[connection_name].session.rollback()
pool = ConnectionPool()
pool.connections = {}
def get_pool():
return pool
def initialize(g, app):
"""
If postgresql url is defined in configuration params a
scoped session will be created
"""
if 'DATABASES' in app.config and 'POSTGRESQL' in app.config['DATABASES']:
# Database connection established for console commands
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
if 'test' not in sys.argv:
# Establish a new connection every request
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
# inject stack context if not testing
from flask import _app_ctx_stack
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v, scopefunc=_app_ctx_stack)
g.postgresql_pool = pool
# avoid to close connections if testing
@app.teardown_request
def teardown_request(exception):
"""
Releasing connection after finish request, not required in unit
testing
"""
pool = getattr(g, 'postgresql_pool', None)
if pool is not None:
for k, v in pool.connections.items():
v.session.remove()
else:
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
g.postgresql_pool = pool
|
Riffstation/flask-philo | flask_philo/db/postgresql/connection.py | initialize | python | def initialize(g, app):
if 'DATABASES' in app.config and 'POSTGRESQL' in app.config['DATABASES']:
# Database connection established for console commands
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
if 'test' not in sys.argv:
# Establish a new connection every request
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
# inject stack context if not testing
from flask import _app_ctx_stack
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v, scopefunc=_app_ctx_stack)
g.postgresql_pool = pool
# avoid to close connections if testing
@app.teardown_request
def teardown_request(exception):
"""
Releasing connection after finish request, not required in unit
testing
"""
pool = getattr(g, 'postgresql_pool', None)
if pool is not None:
for k, v in pool.connections.items():
v.session.remove()
else:
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
g.postgresql_pool = pool | If postgresql url is defined in configuration params a
scoped session will be created | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/postgresql/connection.py#L58-L102 | [
"def init_db_conn(connection_name, connection_string, scopefunc=None):\n \"\"\"\n Initialize a postgresql connection by each connection string\n defined in the configuration file\n \"\"\"\n engine = create_engine(connection_string)\n session = scoped_session(sessionmaker(), scopefunc=scopefunc)\n session.configure(bind=engine)\n pool.connections[connection_name] = Connection(engine, session)\n"
] | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
import sys
class Connection(object):
def __init__(self, engine, session):
self.engine = engine
self.session = session
class ConnectionPool:
"""
Flask-philo supports multiple postgresql database connections,
this class stores one connection by every db
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
def commit(self, connection_name='DEFAULT'):
if connection_name is None:
for conn_name, conn in self.connections.items():
conn.session.commit()
else:
self.connections[connection_name].session.commit()
def rollback(self, connection_name='DEFAULT'):
if connection_name is None:
for conn_name, conn in self.connections.items():
conn.session.rollback()
else:
self.connections[connection_name].session.rollback()
pool = ConnectionPool()
pool.connections = {}
def get_pool():
return pool
def init_db_conn(connection_name, connection_string, scopefunc=None):
"""
Initialize a postgresql connection by each connection string
defined in the configuration file
"""
engine = create_engine(connection_string)
session = scoped_session(sessionmaker(), scopefunc=scopefunc)
session.configure(bind=engine)
pool.connections[connection_name] = Connection(engine, session)
|
Riffstation/flask-philo | flask_philo/jinja2/__init__.py | load_extensions_from_config | python | def load_extensions_from_config(**config):
extensions = []
if 'EXTENSIONS' in config:
for ext in config['EXTENSIONS']:
try:
extensions.append(locate(ext))
except Exception as e:
print(e)
return extensions | Loads extensions | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/jinja2/__init__.py#L53-L64 | null | from jinja2 import (
FileSystemLoader, Environment, select_autoescape)
from pydoc import locate
class TemplatesManager:
"""
Flask-philo may support multiple template loaders
"""
_shared_state = {}
environments = {}
def __init__(self):
self.__dict__ = self._shared_state
def set_request(self, r):
"""
Appends request object to the globals dict
"""
for k in self.environments.keys():
self.environments[k].globals['REQUEST'] = r
def render(self, template_name, engine_name='DEFAULT', **data):
t = self.environments[engine_name].get_template(template_name)
return t.render(**data)
templates_manager = TemplatesManager()
def get_manager():
return templates_manager
DEFAULT_AUTOESCAPING = {
'enabled_extensions': ('html', 'htm', 'xml'),
'disabled_extensions': [],
'default_for_string': True,
'default': False
}
def get_autoescaping_params(**config):
if 'AUTOESCAPING' in config:
autoescaping_params = config['AUTOESCAPING']
else:
autoescaping_params = DEFAULT_AUTOESCAPING
return autoescaping_params
def init_filesystem_loader(**config):
params = {}
template_location = config['PARAMETERS']['path']
params['encoding'] = config['PARAMETERS'].get('encoding', 'utf-8')
params['followlinks'] = config['PARAMETERS'].get('followlinks', False)
env = Environment(
loader=FileSystemLoader(template_location, **params),
autoescape=select_autoescape(**get_autoescaping_params(**config)),
extensions=load_extensions_from_config(**config)
)
return env
def init_loader(app, **config):
loaders_dict = {
'FileSystemLoader': init_filesystem_loader
}
env = loaders_dict[config['LOADER']](**config)
env.globals['FLASK_PHILO_CONFIG'] = app.config
return env
def init_jinja2(g, app):
if 'JINJA2_TEMPLATES' in app.config:
for cname, template_config in app.config['JINJA2_TEMPLATES'].items():
templates_manager.environments[cname] =\
init_loader(app, **template_config)
@app.before_request
def before_request():
"""
Assign template manager to the global
flask object at the beginning of every request
"""
for cname, template_config in\
app.config['JINJA2_TEMPLATES'].items():
env = init_loader(app, **template_config)
templates_manager.environments[cname] = env
g.jinja2_template_manager = templates_manager
|
Riffstation/flask-philo | flask_philo/jinja2/__init__.py | TemplatesManager.set_request | python | def set_request(self, r):
for k in self.environments.keys():
self.environments[k].globals['REQUEST'] = r | Appends request object to the globals dict | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/jinja2/__init__.py#L18-L23 | null | class TemplatesManager:
"""
Flask-philo may support multiple template loaders
"""
_shared_state = {}
environments = {}
def __init__(self):
self.__dict__ = self._shared_state
def render(self, template_name, engine_name='DEFAULT', **data):
t = self.environments[engine_name].get_template(template_name)
return t.render(**data)
|
Riffstation/flask-philo | flask_philo/db/elasticsearch/connection.py | init_db_conn | python | def init_db_conn(connection_name, HOSTS=None):
el = elasticsearch.Elasticsearch(hosts=HOSTS)
el_pool.connections[connection_name] = ElasticSearchClient(el) | Initialize a redis connection by each connection string
defined in the configuration file | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/elasticsearch/connection.py#L71-L77 | null | from .client import ElasticSearchClient
import elasticsearch
import sys
class ElasticSearchPool:
"""
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
def ping(self, db='DEFAULT'):
return self.connections[db].ping()
def get_alias(self, db='DEFAULT', name=None):
return self.connections[db].get_alias(name=name)
def create_index(self, name, db='DEFAULT'):
return self.connections[db].create_index(name)
def get(self, db='DEFAULT', index=None, doc_type=None, id=None):
return self.connections[db].get(index=index, doc_type=doc_type, id=id)
def delete_index(self, index, name, db='DEFAULT'):
return self.connections[db].delete_index(index, name)
def delete(self, index, doc_type, id, db='DEFAULT'):
return self.connections[db].delete(index, doc_type, id)
def index(
self, db='DEFAULT', index=None, doc_type=None, id=None, body=None):
return self.connections[db].index(
index=index, doc_type=doc_type, id=id, body=body)
def count(self, index, db='DEFAULT'):
return self.connections[db].count(index)
def search(self, db='DEFAULT', **kwargs):
return self.connections[db].search(**kwargs)
def bulk_index(self, db='DEFAULT', data=None, index=None, doc_type=None):
self.connections[db].bulk_index(
data=data, index=index, doc_type=doc_type)
def close(self, db=None):
if db is None:
for k, v in self.connections.items():
v.close()
self.connections = {}
else:
v = self.connections[db]
v.close()
del self.connections[db]
def flushall(self):
for k, v in self.connections.items():
v.flush()
el_pool = ElasticSearchPool()
el_pool.connections = {}
def get_pool():
return el_pool
def initialize(g, app):
"""
If elastic search connection parameters are defined in configuration
params a session will be created
"""
if 'DATABASES' in app.config and\
'ELASTICSEARCH' in app.config['DATABASES']:
# Initialize connections for console commands
for k, v in app.config['DATABASES']['ELASTICSEARCH'].items():
init_db_conn(k, **v)
@app.before_request
def before_request():
"""
Assign elastic search connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['ELASTICSEARCH'].items():
init_db_conn(k, **v)
g.elasticsearch_pool = el_pool
if 'test' not in sys.argv:
@app.teardown_request
def teardown_request(exception):
pool = getattr(g, 'el_pool', None)
if pool is not None:
pool.close()
|
Riffstation/flask-philo | flask_philo/cloud/aws/utils.py | parse_tags | python | def parse_tags(targs):
tags = {}
for t in targs:
split_tag = t.split(':')
if len(split_tag) > 1:
tags['tag:' + split_tag[0]] = split_tag[1]
else:
tags['tag:' + split_tag[0]] = ''
return tags | Tags can be in the forma key:value or simply value | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/cloud/aws/utils.py#L1-L12 | null |
def run_action(actions, cmd):
if cmd not in actions:
print('{} is an invalid action\n'.format(cmd))
print('Valid actions are:')
for k in actions.keys():
print('* {} \n'.format(k))
exit(1)
else:
actions[cmd]()
|
Riffstation/flask-philo | flask_philo/views.py | BaseView.json_response | python | def json_response(self, status=200, data={}, headers={}):
'''
To set flask to inject specific headers on response request,
such as CORS_ORIGIN headers
'''
mimetype = 'application/json'
header_dict = {}
for k, v in headers.items():
header_dict[k] = v
return Response(
json.dumps(data),
status=status,
mimetype=mimetype,
headers=header_dict) | To set flask to inject specific headers on response request,
such as CORS_ORIGIN headers | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/views.py#L25-L40 | null | class BaseView(MethodView):
def __init__(self, *args, **kwargs):
# assign postgresql pool connections
if 'DATABASES' in app.config and\
'POSTGRESQL' in app.config['DATABASES']:
if hasattr(g, 'postgresql_pool'):
self.postgresql_pool = g.postgresql_pool
# assign redis pool connections
if 'DATABASES' in app.config and 'REDIS' in app.config['DATABASES']:
if hasattr(g, 'redis_pool'):
self.redis_pool = g.redis_pool
if 'JINJA2_TEMPLATES' in app.config:
if hasattr(g, 'jinja2_template_manager'):
self.jinja2_template_manager = g.jinja2_template_manager
super(BaseView, self).__init__(*args, **kwargs)
def render_template(self, template_name, engine_name='DEFAULT', **values):
if not hasattr(self, 'jinja2_template_manager'):
return render_template(template_name, **values)
else:
return self.jinja2_template_manager.render(
template_name, **values)
def template_response(self, template_name, headers={}, **values):
"""
Constructs a response, allowing custom template name and content_type
"""
response = make_response(
self.render_template(template_name, **values))
for field, value in headers.items():
response.headers.set(field, value)
return response
def get(self, *args, **kwargs):
abort(400)
def post(self, *args, **kwargs):
abort(400)
def put(self, *args, **kwargs):
abort(400)
def patch(self, *args, **kwargs):
abort(400)
def delete(self, *args, **kwargs):
abort(400)
|
Riffstation/flask-philo | flask_philo/views.py | BaseView.template_response | python | def template_response(self, template_name, headers={}, **values):
response = make_response(
self.render_template(template_name, **values))
for field, value in headers.items():
response.headers.set(field, value)
return response | Constructs a response, allowing custom template name and content_type | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/views.py#L49-L59 | null | class BaseView(MethodView):
def __init__(self, *args, **kwargs):
# assign postgresql pool connections
if 'DATABASES' in app.config and\
'POSTGRESQL' in app.config['DATABASES']:
if hasattr(g, 'postgresql_pool'):
self.postgresql_pool = g.postgresql_pool
# assign redis pool connections
if 'DATABASES' in app.config and 'REDIS' in app.config['DATABASES']:
if hasattr(g, 'redis_pool'):
self.redis_pool = g.redis_pool
if 'JINJA2_TEMPLATES' in app.config:
if hasattr(g, 'jinja2_template_manager'):
self.jinja2_template_manager = g.jinja2_template_manager
super(BaseView, self).__init__(*args, **kwargs)
def json_response(self, status=200, data={}, headers={}):
'''
To set flask to inject specific headers on response request,
such as CORS_ORIGIN headers
'''
mimetype = 'application/json'
header_dict = {}
for k, v in headers.items():
header_dict[k] = v
return Response(
json.dumps(data),
status=status,
mimetype=mimetype,
headers=header_dict)
def render_template(self, template_name, engine_name='DEFAULT', **values):
if not hasattr(self, 'jinja2_template_manager'):
return render_template(template_name, **values)
else:
return self.jinja2_template_manager.render(
template_name, **values)
def get(self, *args, **kwargs):
abort(400)
def post(self, *args, **kwargs):
abort(400)
def put(self, *args, **kwargs):
abort(400)
def patch(self, *args, **kwargs):
abort(400)
def delete(self, *args, **kwargs):
abort(400)
|
Riffstation/flask-philo | flask_philo/cloud/aws/key_pair.py | describe_key_pairs | python | def describe_key_pairs():
region_keys = {}
for r in boto3.client('ec2', 'us-west-2').describe_regions()['Regions']:
region = r['RegionName']
client = boto3.client('ec2', region_name=region)
try:
pairs = client.describe_key_pairs()
if pairs:
region_keys[region] = pairs
except Exception as e:
app.logger.info(e)
return region_keys | Returns all key pairs for region | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/cloud/aws/key_pair.py#L29-L43 | null | from .utils import run_action
from prettytable import PrettyTable
import argparse
import boto3
import boto3.ec2
import os
app = None
def create_key_pair(key_name, key_dir='~/.ssh', region=None):
client = boto3.client('ec2', region_name=region)
key = client.create_key_pair(KeyName=key_name)
if 'KeyMaterial' not in key:
print('Key could not be created\n')
raise
else:
fname = os.path.join(key_dir, '{}.pem'.format(key_name))
with open(fname, 'w') as f:
f.write(key['KeyMaterial'])
return key
def run(dapp, cmd):
global app
app = dapp
def cmd_describe_key_pairs():
t = PrettyTable([
'KeyName', 'KeyFingerprint', 'Region'
])
for region, keys in describe_key_pairs().items():
for pair in keys['KeyPairs']:
t.add_row([pair['KeyName'], pair['KeyFingerprint'], region])
print(t)
def cmd_create_key_pair():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--key_name', required=True)
parser.add_argument(
'--key_dir', required=False, default='~/.ssh',
help='Directory where key will be stored')
args, extra_params = parser.parse_known_args()
pair = create_key_pair(
args.key_name, key_dir=args.key_dir, region=args.region)
t = PrettyTable([
'KeyName', 'KeyFingerprint', 'Region'
])
t.add_row([pair['KeyName'], pair['KeyFingerprint'], args.region])
print(t)
actions = {
'create_key_pair': cmd_create_key_pair,
'describe_key_pairs': cmd_describe_key_pairs,
}
run_action(actions, cmd)
|
Riffstation/flask-philo | flask_philo/__init__.py | init_app | python | def init_app(module, BASE_DIR, **kwargs):
global app
def init_config():
"""
Load settings module and attach values to the application
config dictionary
"""
if 'FLASK_PHILO_SETTINGS_MODULE' not in os.environ:
raise ConfigurationError('No settings has been defined')
app.config['BASE_DIR'] = BASE_DIR
# default settings
for v in dir(default_settings):
if not v.startswith('_'):
app.config[v] = getattr(default_settings, v)
app.debug = app.config['DEBUG']
# app settings
settings = importlib.import_module(
os.environ['FLASK_PHILO_SETTINGS_MODULE'])
for v in dir(settings):
if not v.startswith('_'):
app.config[v] = getattr(settings, v)
def init_urls():
# Reads urls definition from URLs file and bind routes and views
urls_module = importlib.import_module(app.config['URLS'])
for route in urls_module.URLS:
app.add_url_rule(
route[0], view_func=route[1].as_view(route[2]))
def init_logging():
"""
initialize logger for the app
"""
hndlr = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
hndlr.setFormatter(formatter)
app.logger.addHandler(hndlr)
log_level = app.config['LOG_LEVEL']
app.logger.setLevel(getattr(logging, log_level))
def init_flask_oauthlib():
"""
http://flask-oauthlib.readthedocs.io/en/latest/oauth2.html
"""
oauth.init_app(app)
def init_cors(app):
"""
Initializes cors protection if config
"""
if 'CORS' in app.config:
CORS(
app,
resources=app.config['CORS'],
supports_credentials=app.config.get(
"CORS_SUPPORT_CREDENTIALS",
False
),
allow_headers=app.config.get(
"CORS_ALLOW_HEADERS",
"Content-Type,Authorization,accept-language,accept"
)
)
init_db(g, app)
init_logging()
init_urls()
init_flask_oauthlib()
init_jinja2(g, app)
init_cors(app)
app = Flask(module)
init_config()
return app | Initalize an app, call this method once from start_app | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/__init__.py#L27-L110 | [
"def init_config():\n \"\"\"\n Load settings module and attach values to the application\n config dictionary\n \"\"\"\n if 'FLASK_PHILO_SETTINGS_MODULE' not in os.environ:\n raise ConfigurationError('No settings has been defined')\n\n app.config['BASE_DIR'] = BASE_DIR\n\n # default settings\n for v in dir(default_settings):\n if not v.startswith('_'):\n app.config[v] = getattr(default_settings, v)\n\n app.debug = app.config['DEBUG']\n\n # app settings\n settings = importlib.import_module(\n os.environ['FLASK_PHILO_SETTINGS_MODULE'])\n for v in dir(settings):\n if not v.startswith('_'):\n app.config[v] = getattr(settings, v)\n\n def init_urls():\n # Reads urls definition from URLs file and bind routes and views\n urls_module = importlib.import_module(app.config['URLS'])\n for route in urls_module.URLS:\n app.add_url_rule(\n route[0], view_func=route[1].as_view(route[2]))\n\n def init_logging():\n \"\"\"\n initialize logger for the app\n \"\"\"\n\n hndlr = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n hndlr.setFormatter(formatter)\n app.logger.addHandler(hndlr)\n log_level = app.config['LOG_LEVEL']\n app.logger.setLevel(getattr(logging, log_level))\n\n def init_flask_oauthlib():\n \"\"\"\n http://flask-oauthlib.readthedocs.io/en/latest/oauth2.html\n \"\"\"\n oauth.init_app(app)\n\n def init_cors(app):\n \"\"\"\n Initializes cors protection if config\n \"\"\"\n\n if 'CORS' in app.config:\n CORS(\n app,\n resources=app.config['CORS'],\n supports_credentials=app.config.get(\n \"CORS_SUPPORT_CREDENTIALS\",\n False\n ),\n allow_headers=app.config.get(\n \"CORS_ALLOW_HEADERS\",\n \"Content-Type,Authorization,accept-language,accept\"\n )\n )\n\n init_db(g, app)\n init_logging()\n init_urls()\n init_flask_oauthlib()\n init_jinja2(g, app)\n init_cors(app)\n"
] | from flask import Flask, g
from flask_oauthlib.provider import OAuth2Provider
from flask_cors import CORS
from . import default_settings
from .commands_flask_philo import * # noqa
from .jinja2 import init_jinja2
from .exceptions import ConfigurationError
from .db import init_db
import logging
import importlib
import os
# Alex Martelli's 'Borg'
class Borg:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
app = None
oauth = OAuth2Provider()
def execute_command(cmd, **kwargs):
"""
execute a console command
"""
cmd_dict = {
c: 'flask_philo.commands_flask_philo.' + c for c
in dir(commands_flask_philo) if not c.startswith('_') and c != 'os' # noqa
}
# loading specific app commands
try:
import console_commands
for cm in console_commands.__all__:
if not cm.startswith('_'):
cmd_dict[cm] = 'console_commands.' + cm
except Exception:
pass
if cmd not in cmd_dict:
raise ConfigurationError('command {} does not exists'.format(cmd))
cmd_module = importlib.import_module(cmd_dict[cmd])
kwargs['app'] = app
cmd_module.run(**kwargs)
|
Riffstation/flask-philo | flask_philo/__init__.py | execute_command | python | def execute_command(cmd, **kwargs):
cmd_dict = {
c: 'flask_philo.commands_flask_philo.' + c for c
in dir(commands_flask_philo) if not c.startswith('_') and c != 'os' # noqa
}
# loading specific app commands
try:
import console_commands
for cm in console_commands.__all__:
if not cm.startswith('_'):
cmd_dict[cm] = 'console_commands.' + cm
except Exception:
pass
if cmd not in cmd_dict:
raise ConfigurationError('command {} does not exists'.format(cmd))
cmd_module = importlib.import_module(cmd_dict[cmd])
kwargs['app'] = app
cmd_module.run(**kwargs) | execute a console command | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/__init__.py#L113-L135 | null | from flask import Flask, g
from flask_oauthlib.provider import OAuth2Provider
from flask_cors import CORS
from . import default_settings
from .commands_flask_philo import * # noqa
from .jinja2 import init_jinja2
from .exceptions import ConfigurationError
from .db import init_db
import logging
import importlib
import os
# Alex Martelli's 'Borg'
class Borg:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
app = None
oauth = OAuth2Provider()
def init_app(module, BASE_DIR, **kwargs):
"""
Initalize an app, call this method once from start_app
"""
global app
def init_config():
"""
Load settings module and attach values to the application
config dictionary
"""
if 'FLASK_PHILO_SETTINGS_MODULE' not in os.environ:
raise ConfigurationError('No settings has been defined')
app.config['BASE_DIR'] = BASE_DIR
# default settings
for v in dir(default_settings):
if not v.startswith('_'):
app.config[v] = getattr(default_settings, v)
app.debug = app.config['DEBUG']
# app settings
settings = importlib.import_module(
os.environ['FLASK_PHILO_SETTINGS_MODULE'])
for v in dir(settings):
if not v.startswith('_'):
app.config[v] = getattr(settings, v)
def init_urls():
# Reads urls definition from URLs file and bind routes and views
urls_module = importlib.import_module(app.config['URLS'])
for route in urls_module.URLS:
app.add_url_rule(
route[0], view_func=route[1].as_view(route[2]))
def init_logging():
"""
initialize logger for the app
"""
hndlr = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
hndlr.setFormatter(formatter)
app.logger.addHandler(hndlr)
log_level = app.config['LOG_LEVEL']
app.logger.setLevel(getattr(logging, log_level))
def init_flask_oauthlib():
"""
http://flask-oauthlib.readthedocs.io/en/latest/oauth2.html
"""
oauth.init_app(app)
def init_cors(app):
"""
Initializes cors protection if config
"""
if 'CORS' in app.config:
CORS(
app,
resources=app.config['CORS'],
supports_credentials=app.config.get(
"CORS_SUPPORT_CREDENTIALS",
False
),
allow_headers=app.config.get(
"CORS_ALLOW_HEADERS",
"Content-Type,Authorization,accept-language,accept"
)
)
init_db(g, app)
init_logging()
init_urls()
init_flask_oauthlib()
init_jinja2(g, app)
init_cors(app)
app = Flask(module)
init_config()
return app
|
Riffstation/flask-philo | flask_philo/db/postgresql/__init__.py | syncdb | python | def syncdb():
from flask_philo.db.postgresql.schema import Base
from flask_philo.db.postgresql.orm import BaseModel # noqa
from flask_philo.db.postgresql.connection import get_pool
for conn_name, conn in get_pool().connections.items():
Base.metadata.create_all(conn.engine) | Create tables if they don't exist | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/postgresql/__init__.py#L1-L10 | [
"def get_pool():\n return pool\n"
] | |
Riffstation/flask-philo | flask_philo/db/postgresql/types.py | PasswordHash.new | python | def new(cls, password, rounds):
if isinstance(password, str):
password = password.encode('utf8')
value = bcrypt.hashpw(password, bcrypt.gensalt(rounds))
return cls(value) | Creates a PasswordHash from the given password. | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/postgresql/types.py#L47-L52 | null | class PasswordHash(object):
"""
http://variable-scope.com/posts/storing-and-verifying-passwords-with-sqlalchemy
"""
def __init__(self, hash_):
assert len(hash_) == 60, 'bcrypt hash should be 60 chars.'
if isinstance(hash_, bytes):
self.hash = hash_.decode('utf-8')
else:
self.hash = hash_
assert self.hash.count('$'), 'bcrypt hash should have 3x "$".'
self.rounds = int(self.hash.split('$')[2])
def __eq__(self, candidate):
"""Hashes the candidate string and compares it to the stored hash."""
if isinstance(self.hash, str):
_hash = self.hash.encode('utf-8')
else:
_hash = self.hash
if isinstance(candidate, PasswordHash):
candidate = candidate.hash
if isinstance(candidate, str):
candidate = candidate.encode('utf-8')
return bcrypt.hashpw(candidate, _hash) == _hash
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
"""Simple object representation."""
return '<{}>'.format(type(self).__name__)
@classmethod
|
Riffstation/flask-philo | flask_philo/db/postgresql/types.py | Password._convert | python | def _convert(self, value):
if isinstance(value, PasswordHash):
return value
elif isinstance(value, str):
value = value.encode('utf-8')
return PasswordHash.new(value, self.rounds)
elif value is not None:
raise TypeError(
'Cannot convert {} to a PasswordHash'.format(type(value))) | Returns a PasswordHash from the given string.
PasswordHash instances or None values will return unchanged.
Strings will be hashed and the resulting PasswordHash returned.
Any other input will result in a TypeError. | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/postgresql/types.py#L76-L90 | null | class Password(TypeDecorator):
"""Allows storing and retrieving password hashes using PasswordHash."""
impl = Text
def __init__(self, rounds=12, **kwds):
self.rounds = rounds
super(Password, self).__init__(**kwds)
def process_bind_param(self, value, dialect):
"""Ensure the value is a PasswordHash and then return its hash."""
return self._convert(value).hash
def process_result_value(self, value, dialect):
"""Convert the hash to a PasswordHash, if it's non-NULL."""
if value is not None:
return PasswordHash(value)
def validator(self, password):
"""Provides a validator/converter for @validates usage."""
return self._convert(password)
|
Riffstation/flask-philo | flask_philo/db/redis/connection.py | init_db_conn | python | def init_db_conn(
connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None):
rpool = redis.ConnectionPool(
host=HOST, port=PORT, db=DB, password=PASSWORD)
r = redis.Redis(connection_pool=rpool)
redis_pool.connections[connection_name] = RedisClient(r) | Initialize a redis connection by each connection string
defined in the configuration file | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/redis/connection.py#L62-L71 | null | from .client import RedisClient
import json
import redis
import sys
class RedisPool:
"""
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
def ping(self, db='DEFAULT'):
return self.connections[db].ping()
def set_json(self, k, v, db='DEFAULT'):
jval = json.dumps(v)
self.set(k, jval)
def set(self, k, v, db='DEFAULT'):
self.connections[db].set(k, v)
def delete(self, k, db='DEFAULT'):
self.connections[db].delete(k)
def get_json(self, k, db='DEFAULT', encode='utf-8'):
val = self.get(k, db=db)
if val is not None:
return json.loads(val.decode(encode))
else:
return {}
def get(self, k, db='DEFAULT'):
return self.connections[db].get(k)
def close(self, db=None):
if db is None:
for k, v in self.connections.items():
v.close()
self.connections = {}
else:
v = self.connections[db]
v.close()
del self.connections[db]
def flushall(self):
for k, v in self.connections.items():
v.flushdb()
redis_pool = RedisPool()
redis_pool.connections = {}
def get_pool():
return redis_pool
def initialize(g, app):
"""
If redis connection parameters are defined in configuration params a
session will be created
"""
if 'DATABASES' in app.config and 'REDIS' in app.config['DATABASES']:
# Initialize connections for console commands
for k, v in app.config['DATABASES']['REDIS'].items():
init_db_conn(k, **v)
@app.before_request
def before_request():
"""
Assign redis connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['REDIS'].items():
init_db_conn(k, **v)
g.redis_pool = redis_pool
if 'test' not in sys.argv:
@app.teardown_request
def teardown_request(exception):
pool = getattr(g, 'redis_pool', None)
if pool is not None:
pool.close()
|
Riffstation/flask-philo | flask_philo/db/redis/connection.py | initialize | python | def initialize(g, app):
if 'DATABASES' in app.config and 'REDIS' in app.config['DATABASES']:
# Initialize connections for console commands
for k, v in app.config['DATABASES']['REDIS'].items():
init_db_conn(k, **v)
@app.before_request
def before_request():
"""
Assign redis connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['REDIS'].items():
init_db_conn(k, **v)
g.redis_pool = redis_pool
if 'test' not in sys.argv:
@app.teardown_request
def teardown_request(exception):
pool = getattr(g, 'redis_pool', None)
if pool is not None:
pool.close() | If redis connection parameters are defined in configuration params a
session will be created | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/redis/connection.py#L74-L101 | [
"def init_db_conn(\n connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None):\n \"\"\"\n Initialize a redis connection by each connection string\n defined in the configuration file\n \"\"\"\n rpool = redis.ConnectionPool(\n host=HOST, port=PORT, db=DB, password=PASSWORD)\n r = redis.Redis(connection_pool=rpool)\n redis_pool.connections[connection_name] = RedisClient(r)\n"
] | from .client import RedisClient
import json
import redis
import sys
class RedisPool:
"""
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
def ping(self, db='DEFAULT'):
return self.connections[db].ping()
def set_json(self, k, v, db='DEFAULT'):
jval = json.dumps(v)
self.set(k, jval)
def set(self, k, v, db='DEFAULT'):
self.connections[db].set(k, v)
def delete(self, k, db='DEFAULT'):
self.connections[db].delete(k)
def get_json(self, k, db='DEFAULT', encode='utf-8'):
val = self.get(k, db=db)
if val is not None:
return json.loads(val.decode(encode))
else:
return {}
def get(self, k, db='DEFAULT'):
return self.connections[db].get(k)
def close(self, db=None):
if db is None:
for k, v in self.connections.items():
v.close()
self.connections = {}
else:
v = self.connections[db]
v.close()
del self.connections[db]
def flushall(self):
for k, v in self.connections.items():
v.flushdb()
redis_pool = RedisPool()
redis_pool.connections = {}
def get_pool():
return redis_pool
def init_db_conn(
connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
rpool = redis.ConnectionPool(
host=HOST, port=PORT, db=DB, password=PASSWORD)
r = redis.Redis(connection_pool=rpool)
redis_pool.connections[connection_name] = RedisClient(r)
|
Riffstation/flask-philo | flask_philo/serializers.py | BaseSerializer._initialize_from_dict | python | def _initialize_from_dict(self, data):
self._json = data
self._validate()
for name, value in self._json.items():
if name in self._properties:
if '$ref' in self._properties[name]:
if 'decimal' in self._properties[name]['$ref']:
value = Decimal(value)
# applying proper formatting when required
if 'format' in self._properties[name]:
format = self._properties[name]['format']
if 'date-time' == format:
value = utils.string_to_datetime(value)
elif 'date' == format:
value = utils.string_to_date(value)
setattr(self, name, value) | Loads serializer from a request object | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/serializers.py#L60-L81 | [
"def string_to_datetime(strdate):\n \"\"\"\n Receives a string as parameter and returns a formated\n datetime.\n Format is defined in the configuration parameter\n DATETIME_FORMAT by default '%Y-%m-%d %H:%M:%S'\n this format can be overide in the app configuration settings\n \"\"\"\n return datetime.strptime(strdate[:19], app.config['DATETIME_FORMAT'])\n",
"def string_to_date(strdate):\n \"\"\"\n Receives a string as parameter and returns a formated\n date.\n Format is defined in the configuration parameter\n DATE_FORMAT by default '%Y-%m-%d'\n this format can be overide in the app configuration settings\n \"\"\"\n return datetime.strptime(strdate, app.config['DATE_FORMAT'])\n",
"def _validate(self):\n # avoid extra values not defined in the schema\n if 'additionalProperties' not in self._schema:\n self._schema['additionalProperties'] = False\n validate(\n self._json, self._schema, format_checker=FormatChecker())\n"
] | class BaseSerializer(object):
"""
Base serializer
"""
_schema = {}
_json = {}
__model__ = None
def __init__(self, request=None, model=None, data=None):
"""
A serializer object can be built from a request object or
a model object
"""
if 'properties' not in self._schema:
raise SerializerError(
'Can not build a serializer without a schema associated')
else:
self._properties = self._schema['properties']
if request:
self._initialize_from_dict(request.json)
elif model:
self._initialize_from_model(model)
elif data:
self._initialize_from_dict(data)
else:
raise SerializerError(
'Can not build a serializer without an'
'http request or model associated')
def _validate(self):
# avoid extra values not defined in the schema
if 'additionalProperties' not in self._schema:
self._schema['additionalProperties'] = False
validate(
self._json, self._schema, format_checker=FormatChecker())
def _initialize_from_model(self, model):
"""
Loads a model from
"""
for name, value in model.__dict__.items():
if name in self._properties:
setattr(self, name, value)
def update(self):
"""
Finds record and update it based in serializer values
"""
obj = self.__model__.objects.get_for_update(id=self.id)
for name, value in self.__dict__.items():
if name in self._properties:
setattr(obj, name, value)
obj.update()
return obj
def to_json(self):
"""
Returns a json representation
"""
data = {}
for k, v in self.__dict__.items():
if not k.startswith('_'):
# values not serializable, should be converted to strings
if isinstance(v, datetime):
v = utils.datetime_to_string(v)
elif isinstance(v, date):
v = utils.date_to_string(v)
elif isinstance(v, uuid.UUID):
v = str(v)
elif isinstance(v, Decimal):
v = str(v)
data[k] = v
return data
|
Riffstation/flask-philo | flask_philo/serializers.py | BaseSerializer._initialize_from_model | python | def _initialize_from_model(self, model):
for name, value in model.__dict__.items():
if name in self._properties:
setattr(self, name, value) | Loads a model from | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/serializers.py#L83-L89 | null | class BaseSerializer(object):
"""
Base serializer
"""
_schema = {}
_json = {}
__model__ = None
def __init__(self, request=None, model=None, data=None):
"""
A serializer object can be built from a request object or
a model object
"""
if 'properties' not in self._schema:
raise SerializerError(
'Can not build a serializer without a schema associated')
else:
self._properties = self._schema['properties']
if request:
self._initialize_from_dict(request.json)
elif model:
self._initialize_from_model(model)
elif data:
self._initialize_from_dict(data)
else:
raise SerializerError(
'Can not build a serializer without an'
'http request or model associated')
def _validate(self):
# avoid extra values not defined in the schema
if 'additionalProperties' not in self._schema:
self._schema['additionalProperties'] = False
validate(
self._json, self._schema, format_checker=FormatChecker())
def _initialize_from_dict(self, data):
"""
Loads serializer from a request object
"""
self._json = data
self._validate()
for name, value in self._json.items():
if name in self._properties:
if '$ref' in self._properties[name]:
if 'decimal' in self._properties[name]['$ref']:
value = Decimal(value)
# applying proper formatting when required
if 'format' in self._properties[name]:
format = self._properties[name]['format']
if 'date-time' == format:
value = utils.string_to_datetime(value)
elif 'date' == format:
value = utils.string_to_date(value)
setattr(self, name, value)
def update(self):
"""
Finds record and update it based in serializer values
"""
obj = self.__model__.objects.get_for_update(id=self.id)
for name, value in self.__dict__.items():
if name in self._properties:
setattr(obj, name, value)
obj.update()
return obj
def to_json(self):
"""
Returns a json representation
"""
data = {}
for k, v in self.__dict__.items():
if not k.startswith('_'):
# values not serializable, should be converted to strings
if isinstance(v, datetime):
v = utils.datetime_to_string(v)
elif isinstance(v, date):
v = utils.date_to_string(v)
elif isinstance(v, uuid.UUID):
v = str(v)
elif isinstance(v, Decimal):
v = str(v)
data[k] = v
return data
|
Riffstation/flask-philo | flask_philo/serializers.py | BaseSerializer.update | python | def update(self):
obj = self.__model__.objects.get_for_update(id=self.id)
for name, value in self.__dict__.items():
if name in self._properties:
setattr(obj, name, value)
obj.update()
return obj | Finds record and update it based in serializer values | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/serializers.py#L91-L100 | null | class BaseSerializer(object):
"""
Base serializer
"""
_schema = {}
_json = {}
__model__ = None
def __init__(self, request=None, model=None, data=None):
"""
A serializer object can be built from a request object or
a model object
"""
if 'properties' not in self._schema:
raise SerializerError(
'Can not build a serializer without a schema associated')
else:
self._properties = self._schema['properties']
if request:
self._initialize_from_dict(request.json)
elif model:
self._initialize_from_model(model)
elif data:
self._initialize_from_dict(data)
else:
raise SerializerError(
'Can not build a serializer without an'
'http request or model associated')
def _validate(self):
# avoid extra values not defined in the schema
if 'additionalProperties' not in self._schema:
self._schema['additionalProperties'] = False
validate(
self._json, self._schema, format_checker=FormatChecker())
def _initialize_from_dict(self, data):
"""
Loads serializer from a request object
"""
self._json = data
self._validate()
for name, value in self._json.items():
if name in self._properties:
if '$ref' in self._properties[name]:
if 'decimal' in self._properties[name]['$ref']:
value = Decimal(value)
# applying proper formatting when required
if 'format' in self._properties[name]:
format = self._properties[name]['format']
if 'date-time' == format:
value = utils.string_to_datetime(value)
elif 'date' == format:
value = utils.string_to_date(value)
setattr(self, name, value)
def _initialize_from_model(self, model):
"""
Loads a model from
"""
for name, value in model.__dict__.items():
if name in self._properties:
setattr(self, name, value)
def to_json(self):
"""
Returns a json representation
"""
data = {}
for k, v in self.__dict__.items():
if not k.startswith('_'):
# values not serializable, should be converted to strings
if isinstance(v, datetime):
v = utils.datetime_to_string(v)
elif isinstance(v, date):
v = utils.date_to_string(v)
elif isinstance(v, uuid.UUID):
v = str(v)
elif isinstance(v, Decimal):
v = str(v)
data[k] = v
return data
|
Riffstation/flask-philo | flask_philo/serializers.py | BaseSerializer.to_json | python | def to_json(self):
data = {}
for k, v in self.__dict__.items():
if not k.startswith('_'):
# values not serializable, should be converted to strings
if isinstance(v, datetime):
v = utils.datetime_to_string(v)
elif isinstance(v, date):
v = utils.date_to_string(v)
elif isinstance(v, uuid.UUID):
v = str(v)
elif isinstance(v, Decimal):
v = str(v)
data[k] = v
return data | Returns a json representation | train | https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/serializers.py#L102-L119 | null | class BaseSerializer(object):
"""
Base serializer
"""
_schema = {}
_json = {}
__model__ = None
def __init__(self, request=None, model=None, data=None):
"""
A serializer object can be built from a request object or
a model object
"""
if 'properties' not in self._schema:
raise SerializerError(
'Can not build a serializer without a schema associated')
else:
self._properties = self._schema['properties']
if request:
self._initialize_from_dict(request.json)
elif model:
self._initialize_from_model(model)
elif data:
self._initialize_from_dict(data)
else:
raise SerializerError(
'Can not build a serializer without an'
'http request or model associated')
def _validate(self):
# avoid extra values not defined in the schema
if 'additionalProperties' not in self._schema:
self._schema['additionalProperties'] = False
validate(
self._json, self._schema, format_checker=FormatChecker())
def _initialize_from_dict(self, data):
"""
Loads serializer from a request object
"""
self._json = data
self._validate()
for name, value in self._json.items():
if name in self._properties:
if '$ref' in self._properties[name]:
if 'decimal' in self._properties[name]['$ref']:
value = Decimal(value)
# applying proper formatting when required
if 'format' in self._properties[name]:
format = self._properties[name]['format']
if 'date-time' == format:
value = utils.string_to_datetime(value)
elif 'date' == format:
value = utils.string_to_date(value)
setattr(self, name, value)
def _initialize_from_model(self, model):
"""
Loads a model from
"""
for name, value in model.__dict__.items():
if name in self._properties:
setattr(self, name, value)
def update(self):
"""
Finds record and update it based in serializer values
"""
obj = self.__model__.objects.get_for_update(id=self.id)
for name, value in self.__dict__.items():
if name in self._properties:
setattr(obj, name, value)
obj.update()
return obj
|
nerdynick/PySQLPool | src/PySQLPool/__init__.py | getNewConnection | python | def getNewConnection(*args, **kargs):
kargs = dict(kargs)
if len(args) > 0:
if len(args) >= 1:
kargs['host'] = args[0]
if len(args) >= 2:
kargs['user'] = args[1]
if len(args) >= 3:
kargs['passwd'] = args[2]
if len(args) >= 4:
kargs['db'] = args[3]
if len(args) >= 5:
kargs['port'] = args[4]
if len(args) >= 6:
kargs['commitOnEnd'] = args[5]
return connection.Connection(*args, **kargs) | Quickly Create a new PySQLConnection class
@param host: Hostname for your database
@param username: Username to use to connect to database
@param password: Password to use to connect to database
@param schema: Schema to use
@param port: Port to connect on
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a always on for this connection
@author: Nick Verbeck
@since: 5/12/2008
@updated: 7/19/2008 - Added commitOnEnd support | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/__init__.py#L15-L43 | null | __version__ = '0.4'
__author__ = 'Nick Verbeck'
__author_email__ = 'nerdynick@gmail.com'
import logging
logger = None
log_level = logging.INFO
#We rename these for legacy support. Will phase out with 1.0 most likely
import connection
import query
import pool
#Connection short cuts
def getNewConnection(*args, **kargs):
"""
Quickly Create a new PySQLConnection class
@param host: Hostname for your database
@param username: Username to use to connect to database
@param password: Password to use to connect to database
@param schema: Schema to use
@param port: Port to connect on
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a always on for this connection
@author: Nick Verbeck
@since: 5/12/2008
@updated: 7/19/2008 - Added commitOnEnd support
"""
kargs = dict(kargs)
if len(args) > 0:
if len(args) >= 1:
kargs['host'] = args[0]
if len(args) >= 2:
kargs['user'] = args[1]
if len(args) >= 3:
kargs['passwd'] = args[2]
if len(args) >= 4:
kargs['db'] = args[3]
if len(args) >= 5:
kargs['port'] = args[4]
if len(args) >= 6:
kargs['commitOnEnd'] = args[5]
return connection.Connection(*args, **kargs)
#Query short cuts
def getNewQuery(connection = None, commitOnEnd=False, *args, **kargs):
"""
Create a new PySQLQuery Class
@param PySQLConnectionObj: Connection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
@updated: 7/19/2008 - Added commitOnEnd support
"""
if connection is None:
return query.PySQLQuery(getNewConnection(*args, **kargs), commitOnEnd = commitOnEnd)
else:
#Updated 7/24/08 to include commitOnEnd here
#-Chandler Prall
return query.PySQLQuery(connection, commitOnEnd = commitOnEnd)
#Pool short cuts
def getNewPool():
"""
Create a new PySQLPool
@author: Nick Verbeck
@since: 5/12/2008
"""
return pool.Pool()
def terminatePool():
"""
Terminate all Connection
@author: Nick Verbeck
@since: 5/12/2008
"""
pool.Pool().Terminate()
def commitPool():
"""
Commits All changes in pool
@author: Nick Verbeck
@since: 9/12/2008
"""
pool.Pool().Commit()
def cleanupPool():
"""
Cleanup connection pool. Closing all inactive connections.
@author: Nick Verbeck
@since: 9/12/2008
"""
pool.Pool().Cleanup() |
nerdynick/PySQLPool | src/PySQLPool/__init__.py | getNewQuery | python | def getNewQuery(connection = None, commitOnEnd=False, *args, **kargs):
if connection is None:
return query.PySQLQuery(getNewConnection(*args, **kargs), commitOnEnd = commitOnEnd)
else:
#Updated 7/24/08 to include commitOnEnd here
#-Chandler Prall
return query.PySQLQuery(connection, commitOnEnd = commitOnEnd) | Create a new PySQLQuery Class
@param PySQLConnectionObj: Connection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
@updated: 7/19/2008 - Added commitOnEnd support | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/__init__.py#L46-L61 | [
"def getNewConnection(*args, **kargs):\n\t\"\"\"\n\tQuickly Create a new PySQLConnection class\n\n\t@param host: Hostname for your database\n\t@param username: Username to use to connect to database\n\t@param password: Password to use to connect to database \n\t@param schema: Schema to use\n\t@param port: Port to connect on\n\t@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a always on for this connection\n\t@author: Nick Verbeck\n\t@since: 5/12/2008\n\t@updated: 7/19/2008 - Added commitOnEnd support\n\t\"\"\"\n\tkargs = dict(kargs)\n\tif len(args) > 0:\n\t\tif len(args) >= 1:\n\t\t\tkargs['host'] = args[0]\n\t\tif len(args) >= 2:\n\t\t\tkargs['user'] = args[1]\n\t\tif len(args) >= 3:\n\t\t\tkargs['passwd'] = args[2]\n\t\tif len(args) >= 4:\n\t\t\tkargs['db'] = args[3]\n\t\tif len(args) >= 5:\n\t\t\tkargs['port'] = args[4]\n\t\tif len(args) >= 6:\n\t\t\tkargs['commitOnEnd'] = args[5]\n\treturn connection.Connection(*args, **kargs)\n"
] | __version__ = '0.4'
__author__ = 'Nick Verbeck'
__author_email__ = 'nerdynick@gmail.com'
import logging
logger = None
log_level = logging.INFO
#We rename these for legacy support. Will phase out with 1.0 most likely
import connection
import query
import pool
#Connection short cuts
def getNewConnection(*args, **kargs):
"""
Quickly Create a new PySQLConnection class
@param host: Hostname for your database
@param username: Username to use to connect to database
@param password: Password to use to connect to database
@param schema: Schema to use
@param port: Port to connect on
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a always on for this connection
@author: Nick Verbeck
@since: 5/12/2008
@updated: 7/19/2008 - Added commitOnEnd support
"""
kargs = dict(kargs)
if len(args) > 0:
if len(args) >= 1:
kargs['host'] = args[0]
if len(args) >= 2:
kargs['user'] = args[1]
if len(args) >= 3:
kargs['passwd'] = args[2]
if len(args) >= 4:
kargs['db'] = args[3]
if len(args) >= 5:
kargs['port'] = args[4]
if len(args) >= 6:
kargs['commitOnEnd'] = args[5]
return connection.Connection(*args, **kargs)
#Query short cuts
def getNewQuery(connection = None, commitOnEnd=False, *args, **kargs):
"""
Create a new PySQLQuery Class
@param PySQLConnectionObj: Connection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
@updated: 7/19/2008 - Added commitOnEnd support
"""
if connection is None:
return query.PySQLQuery(getNewConnection(*args, **kargs), commitOnEnd = commitOnEnd)
else:
#Updated 7/24/08 to include commitOnEnd here
#-Chandler Prall
return query.PySQLQuery(connection, commitOnEnd = commitOnEnd)
#Pool short cuts
def getNewPool():
"""
Create a new PySQLPool
@author: Nick Verbeck
@since: 5/12/2008
"""
return pool.Pool()
def terminatePool():
"""
Terminate all Connection
@author: Nick Verbeck
@since: 5/12/2008
"""
pool.Pool().Terminate()
def commitPool():
"""
Commits All changes in pool
@author: Nick Verbeck
@since: 9/12/2008
"""
pool.Pool().Commit()
def cleanupPool():
"""
Cleanup connection pool. Closing all inactive connections.
@author: Nick Verbeck
@since: 9/12/2008
"""
pool.Pool().Cleanup() |
nerdynick/PySQLPool | src/PySQLPool/query.py | PySQLQuery.query | python | def query(self, query, args=None):
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
log.logger.debug('Running query "%s" with args "%s"', query, args)
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.lastInsertID = self.conn.connection.insert_id()
self.rowcount = cursor.rowcount
log.logger.debug('Query Resulted in %s affected rows, %s rows returned, %s last insert id', self.affectedRows, self.lastInsertID, self.rowcount)
self.record = cursor.fetchall()
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows | Execute the passed in query against the database
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/query.py#L74-L114 | [
"def _GetConnection(self):\n\t\"\"\"\n\tRetieves a prelocked connection from the Pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\t#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,\n\t#we wait 1 second and try again.\n\t#The Connection is returned locked to be thread safe\n\twhile self.conn is None:\n\t\tself.conn = Pool().GetConnection(self.connInfo)\n\t\tif self.conn is not None:\n\t\t\tbreak\n\t\telse:\n\t\t\ttime.sleep(1)\n",
"def _ReturnConnection(self):\n\t\"\"\"\n\tReturns a connection back to the pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\tif self.conn is not None:\n\t\tif self.connInfo.commitOnEnd is True or self.commitOnEnd is True:\n\t\t\tself.conn.Commit()\n\n\t\tPool().returnConnection(self.conn)\n\t\tself.conn = None\n"
] | class PySQLQuery(object):
"""
Front-End class used for interaction with the PySQLPool core
This class is used to execute queries and to request a currently open connection from the pool.
If no open connections exist a new one is created by the pool.
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, PySQLConnectionObj, commitOnEnd = False):
"""
Constructor for PySQLQuery Class
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connInfo = PySQLConnectionObj
self.record = {}
self.rowcount = 0
self.affectedRows = None
#The Real Connection to the DB
self.conn = None
self.lastError = None
self.lastInsertID = None
def __del__(self):
"""
On destruct make sure the current connection is returned back to the pool for use later
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.conn is not None:
self._ReturnConnection()
def __enter__(self):
"""
Starts transaction, used with the 'with' statement.
@author: Denis Malinovsky
@since: 5/21/2010
"""
self.Query('START TRANSACTION')
log.logger.info('Starting Transaction')
def __exit__(self, exc_type, exc_value, traceback):
"""
Commits transaction, if no exception was raised.
@author: Denis Malinovsky
@since: 5/21/2010
"""
if exc_type is None:
self.Query('COMMIT')
log.logger.info('Commiting Transaction')
else:
self.Query('ROLLBACK')
log.logger.info('Rolling Back Transaction')
#TODO: In the future lets decorate all our query calls with a connection fetching and releasing handler. Help to centralize all this logic for use in transactions in the future.
def query(self, query, args=None):
"""
Execute the passed in query against the database
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
log.logger.debug('Running query "%s" with args "%s"', query, args)
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.lastInsertID = self.conn.connection.insert_id()
self.rowcount = cursor.rowcount
log.logger.debug('Query Resulted in %s affected rows, %s rows returned, %s last insert id', self.affectedRows, self.lastInsertID, self.rowcount)
self.record = cursor.fetchall()
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
execute = Query = query
def queryOne(self, query, args=None):
"""
Execute the passed in query against the database.
Uses a Generator & fetchone to reduce your process memory size.
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.conn.updateCheckTime()
while 1:
row = cursor.fetchone()
if row is None:
break
else:
self.record = row
yield row
self.rowcount = cursor.rowcount
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
raise StopIteration
executeOne = QueryOne = queryOne
def queryMany(self, query, args):
"""
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMany = queryMany
def queryMulti(self, queries):
"""
Execute a series of Deletes,Inserts, & Updates in the Queires List
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = 0
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
#Execute query and store results
cursor = self.conn.getCursor()
for query in queries:
self.conn.query = query
if query.__class__ == [].__class__:
self.affectedRows += cursor.execute(query[0], query[1])
else:
self.affectedRows += cursor.execute(query)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMulti = queryMulti
def _GetConnection(self):
"""
Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008
"""
#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,
#we wait 1 second and try again.
#The Connection is returned locked to be thread safe
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1)
def _ReturnConnection(self):
"""
Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008
"""
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None
def escape_string(self, string):
"""
This is just an adapter function to allow previus users of MySQLdb.
To be familier with there names of functions.
@see: escapeString
"""
return MySQLdb.escape_string(string)
def escapeString(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape_string()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escapeString(string)
def escape(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escape(string)
|
nerdynick/PySQLPool | src/PySQLPool/query.py | PySQLQuery.queryOne | python | def queryOne(self, query, args=None):
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.conn.updateCheckTime()
while 1:
row = cursor.fetchone()
if row is None:
break
else:
self.record = row
yield row
self.rowcount = cursor.rowcount
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
raise StopIteration | Execute the passed in query against the database.
Uses a Generator & fetchone to reduce your process memory size.
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/query.py#L117-L158 | [
"def _GetConnection(self):\n\t\"\"\"\n\tRetieves a prelocked connection from the Pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\t#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,\n\t#we wait 1 second and try again.\n\t#The Connection is returned locked to be thread safe\n\twhile self.conn is None:\n\t\tself.conn = Pool().GetConnection(self.connInfo)\n\t\tif self.conn is not None:\n\t\t\tbreak\n\t\telse:\n\t\t\ttime.sleep(1)\n",
"def _ReturnConnection(self):\n\t\"\"\"\n\tReturns a connection back to the pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\tif self.conn is not None:\n\t\tif self.connInfo.commitOnEnd is True or self.commitOnEnd is True:\n\t\t\tself.conn.Commit()\n\n\t\tPool().returnConnection(self.conn)\n\t\tself.conn = None\n"
] | class PySQLQuery(object):
"""
Front-End class used for interaction with the PySQLPool core
This class is used to execute queries and to request a currently open connection from the pool.
If no open connections exist a new one is created by the pool.
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, PySQLConnectionObj, commitOnEnd = False):
"""
Constructor for PySQLQuery Class
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connInfo = PySQLConnectionObj
self.record = {}
self.rowcount = 0
self.affectedRows = None
#The Real Connection to the DB
self.conn = None
self.lastError = None
self.lastInsertID = None
def __del__(self):
"""
On destruct make sure the current connection is returned back to the pool for use later
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.conn is not None:
self._ReturnConnection()
def __enter__(self):
"""
Starts transaction, used with the 'with' statement.
@author: Denis Malinovsky
@since: 5/21/2010
"""
self.Query('START TRANSACTION')
log.logger.info('Starting Transaction')
def __exit__(self, exc_type, exc_value, traceback):
"""
Commits transaction, if no exception was raised.
@author: Denis Malinovsky
@since: 5/21/2010
"""
if exc_type is None:
self.Query('COMMIT')
log.logger.info('Commiting Transaction')
else:
self.Query('ROLLBACK')
log.logger.info('Rolling Back Transaction')
#TODO: In the future lets decorate all our query calls with a connection fetching and releasing handler. Help to centralize all this logic for use in transactions in the future.
def query(self, query, args=None):
"""
Execute the passed in query against the database
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
log.logger.debug('Running query "%s" with args "%s"', query, args)
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.lastInsertID = self.conn.connection.insert_id()
self.rowcount = cursor.rowcount
log.logger.debug('Query Resulted in %s affected rows, %s rows returned, %s last insert id', self.affectedRows, self.lastInsertID, self.rowcount)
self.record = cursor.fetchall()
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
execute = Query = query
def queryOne(self, query, args=None):
"""
Execute the passed in query against the database.
Uses a Generator & fetchone to reduce your process memory size.
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.conn.updateCheckTime()
while 1:
row = cursor.fetchone()
if row is None:
break
else:
self.record = row
yield row
self.rowcount = cursor.rowcount
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
raise StopIteration
executeOne = QueryOne = queryOne
def queryMany(self, query, args):
"""
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMany = queryMany
def queryMulti(self, queries):
"""
Execute a series of Deletes,Inserts, & Updates in the Queires List
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = 0
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
#Execute query and store results
cursor = self.conn.getCursor()
for query in queries:
self.conn.query = query
if query.__class__ == [].__class__:
self.affectedRows += cursor.execute(query[0], query[1])
else:
self.affectedRows += cursor.execute(query)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMulti = queryMulti
def _GetConnection(self):
"""
Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008
"""
#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,
#we wait 1 second and try again.
#The Connection is returned locked to be thread safe
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1)
def _ReturnConnection(self):
"""
Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008
"""
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None
def escape_string(self, string):
"""
This is just an adapter function to allow previus users of MySQLdb.
To be familier with there names of functions.
@see: escapeString
"""
return MySQLdb.escape_string(string)
def escapeString(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape_string()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escapeString(string)
def escape(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escape(string)
|
nerdynick/PySQLPool | src/PySQLPool/query.py | PySQLQuery.queryMany | python | def queryMany(self, query, args):
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows | Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/query.py#L161-L194 | [
"def _GetConnection(self):\n\t\"\"\"\n\tRetieves a prelocked connection from the Pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\t#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,\n\t#we wait 1 second and try again.\n\t#The Connection is returned locked to be thread safe\n\twhile self.conn is None:\n\t\tself.conn = Pool().GetConnection(self.connInfo)\n\t\tif self.conn is not None:\n\t\t\tbreak\n\t\telse:\n\t\t\ttime.sleep(1)\n",
"def _ReturnConnection(self):\n\t\"\"\"\n\tReturns a connection back to the pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\tif self.conn is not None:\n\t\tif self.connInfo.commitOnEnd is True or self.commitOnEnd is True:\n\t\t\tself.conn.Commit()\n\n\t\tPool().returnConnection(self.conn)\n\t\tself.conn = None\n"
] | class PySQLQuery(object):
"""
Front-End class used for interaction with the PySQLPool core
This class is used to execute queries and to request a currently open connection from the pool.
If no open connections exist a new one is created by the pool.
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, PySQLConnectionObj, commitOnEnd = False):
"""
Constructor for PySQLQuery Class
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connInfo = PySQLConnectionObj
self.record = {}
self.rowcount = 0
self.affectedRows = None
#The Real Connection to the DB
self.conn = None
self.lastError = None
self.lastInsertID = None
def __del__(self):
"""
On destruct make sure the current connection is returned back to the pool for use later
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.conn is not None:
self._ReturnConnection()
def __enter__(self):
"""
Starts transaction, used with the 'with' statement.
@author: Denis Malinovsky
@since: 5/21/2010
"""
self.Query('START TRANSACTION')
log.logger.info('Starting Transaction')
def __exit__(self, exc_type, exc_value, traceback):
"""
Commits transaction, if no exception was raised.
@author: Denis Malinovsky
@since: 5/21/2010
"""
if exc_type is None:
self.Query('COMMIT')
log.logger.info('Commiting Transaction')
else:
self.Query('ROLLBACK')
log.logger.info('Rolling Back Transaction')
#TODO: In the future lets decorate all our query calls with a connection fetching and releasing handler. Help to centralize all this logic for use in transactions in the future.
def query(self, query, args=None):
"""
Execute the passed in query against the database
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
log.logger.debug('Running query "%s" with args "%s"', query, args)
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.lastInsertID = self.conn.connection.insert_id()
self.rowcount = cursor.rowcount
log.logger.debug('Query Resulted in %s affected rows, %s rows returned, %s last insert id', self.affectedRows, self.lastInsertID, self.rowcount)
self.record = cursor.fetchall()
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
execute = Query = query
def queryOne(self, query, args=None):
"""
Execute the passed in query against the database.
Uses a Generator & fetchone to reduce your process memory size.
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.conn.updateCheckTime()
while 1:
row = cursor.fetchone()
if row is None:
break
else:
self.record = row
yield row
self.rowcount = cursor.rowcount
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
raise StopIteration
executeOne = QueryOne = queryOne
def queryMany(self, query, args):
"""
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMany = queryMany
def queryMulti(self, queries):
"""
Execute a series of Deletes,Inserts, & Updates in the Queires List
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = 0
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
#Execute query and store results
cursor = self.conn.getCursor()
for query in queries:
self.conn.query = query
if query.__class__ == [].__class__:
self.affectedRows += cursor.execute(query[0], query[1])
else:
self.affectedRows += cursor.execute(query)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMulti = queryMulti
def _GetConnection(self):
"""
Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008
"""
#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,
#we wait 1 second and try again.
#The Connection is returned locked to be thread safe
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1)
def _ReturnConnection(self):
"""
Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008
"""
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None
def escape_string(self, string):
"""
This is just an adapter function to allow previus users of MySQLdb.
To be familier with there names of functions.
@see: escapeString
"""
return MySQLdb.escape_string(string)
def escapeString(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape_string()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escapeString(string)
def escape(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escape(string)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.