content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
# Class base Preprocessors\n# Backwards compatibility for imported name\nfrom nbclient.exceptions import CellExecutionError\n\nfrom .base import Preprocessor\nfrom .clearmetadata import ClearMetadataPreprocessor\nfrom .clearoutput import ClearOutputPreprocessor\nfrom .coalescestreams import CoalesceStreamsPreprocessor\nfrom .convertfigures import ConvertFiguresPreprocessor\nfrom .csshtmlheader import CSSHTMLHeaderPreprocessor\nfrom .execute import ExecutePreprocessor\nfrom .extractattachments import ExtractAttachmentsPreprocessor\nfrom .extractoutput import ExtractOutputPreprocessor\nfrom .highlightmagics import HighlightMagicsPreprocessor\nfrom .latex import LatexPreprocessor\nfrom .regexremove import RegexRemovePreprocessor\nfrom .svg2pdf import SVG2PDFPreprocessor\nfrom .tagremove import TagRemovePreprocessor\n\n__all__ = [\n "CellExecutionError",\n "Preprocessor",\n "ClearMetadataPreprocessor",\n "ClearOutputPreprocessor",\n "CoalesceStreamsPreprocessor",\n "ConvertFiguresPreprocessor",\n "CSSHTMLHeaderPreprocessor",\n "ExecutePreprocessor",\n "ExtractAttachmentsPreprocessor",\n "ExtractOutputPreprocessor",\n "HighlightMagicsPreprocessor",\n "LatexPreprocessor",\n "RegexRemovePreprocessor",\n "SVG2PDFPreprocessor",\n "TagRemovePreprocessor",\n]\n | .venv\Lib\site-packages\nbconvert\preprocessors\__init__.py | __init__.py | Python | 1,279 | 0.95 | 0.027778 | 0.058824 | node-utils | 643 | 2024-05-29T09:40:15.695901 | Apache-2.0 | false | b431fbf361af9e4ec88cdcc61db413b1 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\base.cpython-313.pyc | base.cpython-313.pyc | Other | 3,483 | 0.95 | 0.132353 | 0 | python-kit | 531 | 2024-12-19T08:42:04.121652 | MIT | false | 7a7b767f0be720199635a035352182be |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\clearmetadata.cpython-313.pyc | clearmetadata.cpython-313.pyc | Other | 4,945 | 0.8 | 0.105263 | 0.055556 | node-utils | 973 | 2024-07-11T08:28:25.676574 | GPL-3.0 | false | d9b85251e5fa96c488337b4ddf50d7d7 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\clearoutput.cpython-313.pyc | clearoutput.cpython-313.pyc | Other | 1,411 | 0.8 | 0.083333 | 0 | react-lib | 764 | 2025-05-16T11:09:23.801479 | MIT | false | 23cd40af8d267f360ad76445d1af8a15 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\coalescestreams.cpython-313.pyc | coalescestreams.cpython-313.pyc | Other | 1,918 | 0.8 | 0.2 | 0 | react-lib | 317 | 2023-10-29T05:31:30.283390 | MIT | false | fb9f197bd89d8bfa2244ae96d8e96139 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\convertfigures.cpython-313.pyc | convertfigures.cpython-313.pyc | Other | 2,384 | 0.8 | 0 | 0 | python-kit | 668 | 2025-05-26T00:04:52.122662 | MIT | false | db2fba8dfdf34157ee75c91a8f745be0 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\csshtmlheader.cpython-313.pyc | csshtmlheader.cpython-313.pyc | Other | 4,408 | 0.95 | 0.046154 | 0 | react-lib | 664 | 2024-02-14T20:20:44.238514 | BSD-3-Clause | false | 6717101da1381ec9dd838e47b1ba0019 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\execute.cpython-313.pyc | execute.cpython-313.pyc | Other | 5,442 | 0.95 | 0.058824 | 0 | react-lib | 267 | 2025-06-06T10:54:12.683679 | GPL-3.0 | false | 569de08b96022ffd80ae5eb5f59a911a |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\extractattachments.cpython-313.pyc | extractattachments.cpython-313.pyc | Other | 4,094 | 0.8 | 0.041667 | 0 | awesome-app | 970 | 2024-11-22T02:26:37.713702 | BSD-3-Clause | false | 8e5480fe15f271c134efa91d1220e421 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\extractoutput.cpython-313.pyc | extractoutput.cpython-313.pyc | Other | 5,292 | 0.95 | 0.029412 | 0.030769 | vue-tools | 656 | 2024-06-19T18:16:58.547958 | GPL-3.0 | false | 933915de4c7dbc2cea300ea22e007283 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\highlightmagics.cpython-313.pyc | highlightmagics.cpython-313.pyc | Other | 3,518 | 0.95 | 0.042553 | 0 | python-kit | 397 | 2024-05-26T05:54:46.462068 | MIT | false | 13afec6a90c230a7bf2706401c131cb7 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\latex.cpython-313.pyc | latex.cpython-313.pyc | Other | 2,812 | 0.8 | 0.044444 | 0 | react-lib | 616 | 2024-05-28T18:58:49.837393 | Apache-2.0 | false | fd046a529a2de5848b9782b521a4c973 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\regexremove.cpython-313.pyc | regexremove.cpython-313.pyc | Other | 3,088 | 0.8 | 0.086957 | 0 | react-lib | 762 | 2024-06-04T19:34:56.426782 | Apache-2.0 | false | e030649991e76384381894a6d4882d61 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\sanitize.cpython-313.pyc | sanitize.cpython-313.pyc | Other | 5,276 | 0.8 | 0.034483 | 0.0125 | awesome-app | 89 | 2024-09-01T02:39:51.220873 | GPL-3.0 | false | 2b7650852d221a01842ada2b63e03a09 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\svg2pdf.cpython-313.pyc | svg2pdf.cpython-313.pyc | Other | 7,330 | 0.8 | 0.032609 | 0 | vue-tools | 241 | 2024-09-04T09:10:08.431506 | BSD-3-Clause | false | 234666b3088faf5bd3ce1d9876690c2e |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\tagremove.cpython-313.pyc | tagremove.cpython-313.pyc | Other | 5,537 | 0.8 | 0.066667 | 0 | react-lib | 675 | 2025-07-05T00:49:22.741752 | MIT | false | a3bdbf8a4b7bc4bb92d0efb44334d157 |
\n\n | .venv\Lib\site-packages\nbconvert\preprocessors\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,209 | 0.7 | 0 | 0 | python-kit | 902 | 2024-04-12T09:14:35.279895 | MIT | false | 200b7bb46a7ed5fd7027437688c7c6e3 |
\n\n | .venv\Lib\site-packages\nbconvert\resources\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 194 | 0.7 | 0 | 0 | python-kit | 923 | 2024-02-08T07:05:29.970840 | Apache-2.0 | false | 23ed4d394b9e1c76f2c65ac179252bfb |
# README FIRST\n\nPlease do not add new templates for nbconvert here.\n\nIn order to speed up the distribution of nbconvert templates and make it\nsimpler to share such contributions, we encourage [sharing those links on our\nwiki\npage](https://github.com/ipython/ipython/wiki/Cookbook:%20nbconvert%20templates).\n | .venv\Lib\site-packages\nbconvert\templates\README.md | README.md | Markdown | 307 | 0.8 | 0.125 | 0.166667 | python-kit | 60 | 2024-02-06T05:51:35.061302 | GPL-3.0 | false | 4019313e838e18792ee2982e888e73c5 |
TPLS := $(patsubst %.tpl,../latex/skeleton/%.tplx,$(wildcard *.tpl))\n\nall: clean $(TPLS)\n\n# Convert standard Jinja2 syntax to LaTeX safe Jinja2\n# see http://flask.pocoo.org/snippets/55/ for more info\n../latex/skeleton/%.tplx: %.tpl\n @echo 'generating tex equivalent of $^: $@'\n @echo '((=- Auto-generated template file, DO NOT edit directly!\n' \\n ' To edit this file, please refer to ../../skeleton/README.md' \\n '-=))\n\n' > $@\n @sed \\n -e 's/{%/((*/g' \\n -e 's/%}/*))/g' \\n -e 's/{{/(((/g' \\n -e 's/}}/)))/g' \\n -e 's/{#/((=/g' \\n -e 's/#}/=))/g' \\n -e "s/tpl'/tplx'/g" \\n $^ >> $@\n\nclean:\n @echo "cleaning generated tplx files..."\n @-rm ../latex/skeleton/*.tplx\n | .venv\Lib\site-packages\nbconvert\templates\skeleton\Makefile | Makefile | Other | 682 | 0.8 | 0.041667 | 0.095238 | vue-tools | 40 | 2025-05-05T21:09:31.563283 | Apache-2.0 | false | 1e8bec728b3867c3a43f2657980ed02d |
## Template skeleton\n\nThis directory contains the template skeleton files.\n\nDo not modify the contents of the `../latex/skeleton` folder. Instead,\nif you need to, make modifications to the files in this folder and then run\n`make` to generate the corresponding latex skeleton files in the\n`../latex/skeleton` folder.\n\nIf you would like to share your resulting templates with others, we encourage\n[sharing those links on our wiki\npage](https://github.com/ipython/ipython/wiki/Cookbook:%20nbconvert%20templates).\n | .venv\Lib\site-packages\nbconvert\templates\skeleton\README.md | README.md | Markdown | 510 | 0.8 | 0.083333 | 0.111111 | react-lib | 917 | 2025-04-06T14:57:24.711079 | GPL-3.0 | false | a7bb6d4fba7eed29cefd5aaea2476a13 |
"""Global configuration class."""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom traitlets import List, Unicode\nfrom traitlets.config.configurable import LoggingConfigurable\n\n\nclass NbConvertBase(LoggingConfigurable):\n """Global configurable class for shared config\n\n Useful for display data priority that might be used by many transformers\n """\n\n display_data_priority = List(\n [\n "text/html",\n "application/pdf",\n "text/latex",\n "image/svg+xml",\n "image/png",\n "image/jpeg",\n "text/markdown",\n "text/plain",\n ],\n help="""\n An ordered list of preferred output type, the first\n encountered will usually be used when converting discarding\n the others.\n """,\n ).tag(config=True)\n\n default_language = Unicode(\n "ipython",\n help="Deprecated default highlight language as of 5.0, please use language_info metadata instead",\n ).tag(config=True)\n | .venv\Lib\site-packages\nbconvert\utils\base.py | base.py | Python | 1,088 | 0.95 | 0.135135 | 0.066667 | vue-tools | 805 | 2024-04-03T00:31:33.277794 | MIT | false | 362b8ecfae98a0457be5d5613a483c06 |
"""NbConvert specific exceptions"""\n# -----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Classes and functions\n# -----------------------------------------------------------------------------\n\n\nclass ConversionException(Exception):\n """An exception raised by the conversion process."""\n | .venv\Lib\site-packages\nbconvert\utils\exceptions.py | exceptions.py | Python | 672 | 0.95 | 0.0625 | 0.769231 | awesome-app | 847 | 2024-01-14T04:45:47.625326 | BSD-3-Clause | false | 1ad13469435f9c1bd921263481433c22 |
"""io-related utilities"""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport codecs\nimport errno\nimport os\nimport random\nimport shutil\nimport sys\nfrom typing import Any, Dict\n\n\ndef unicode_std_stream(stream="stdout"):\n """Get a wrapper to write unicode to stdout/stderr as UTF-8.\n\n This ignores environment variables and default encodings, to reliably write\n unicode to stdout or stderr.\n\n ::\n\n unicode_std_stream().write(u'ł@e¶ŧ←')\n """\n assert stream in ("stdout", "stderr")\n stream = getattr(sys, stream)\n\n try:\n stream_b = stream.buffer\n except AttributeError:\n # sys.stdout has been replaced - use it directly\n return stream\n\n return codecs.getwriter("utf-8")(stream_b)\n\n\ndef unicode_stdin_stream():\n """Get a wrapper to read unicode from stdin as UTF-8.\n\n This ignores environment variables and default encodings, to reliably read unicode from stdin.\n\n ::\n\n totreat = unicode_stdin_stream().read()\n """\n stream = sys.stdin\n try:\n stream_b = stream.buffer\n except AttributeError:\n return stream\n\n return codecs.getreader("utf-8")(stream_b)\n\n\nclass FormatSafeDict(Dict[Any, Any]):\n """Format a dictionary safely."""\n\n def __missing__(self, key):\n """Handle missing value."""\n return "{" + key + "}"\n\n\ntry:\n ENOLINK = errno.ENOLINK\nexcept AttributeError:\n ENOLINK = 1998\n\n\ndef link(src, dst):\n """Hard links ``src`` to ``dst``, returning 0 or errno.\n\n Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't\n supported by the operating system.\n """\n\n if not hasattr(os, "link"):\n return ENOLINK\n link_errno = 0\n try:\n os.link(src, dst)\n except OSError as e:\n link_errno = e.errno\n return link_errno\n\n\ndef link_or_copy(src, dst):\n """Attempts to hardlink ``src`` to ``dst``, copying if the link fails.\n\n Attempts to maintain the semantics of ``shutil.copy``.\n\n Because ``os.link`` does not overwrite files, a unique temporary file\n will be used if the target already exists, then that file will be moved\n into place.\n """\n\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n link_errno = link(src, dst)\n if link_errno == errno.EEXIST:\n if os.stat(src).st_ino == os.stat(dst).st_ino:\n # dst is already a hard link to the correct file, so we don't need\n # to do anything else. If we try to link and rename the file\n # anyway, we get duplicate files - see http://bugs.python.org/issue21876\n return\n\n new_dst = dst + f"-temp-{random.randint(1, 16**4):04X}" # noqa: S311\n try:\n link_or_copy(src, new_dst)\n except BaseException:\n try:\n os.remove(new_dst)\n except OSError:\n pass\n raise\n os.rename(new_dst, dst)\n elif link_errno != 0:\n # Either link isn't supported, or the filesystem doesn't support\n # linking, or 'src' and 'dst' are on different filesystems.\n shutil.copy(src, dst)\n | .venv\Lib\site-packages\nbconvert\utils\io.py | io.py | Python | 3,177 | 0.95 | 0.166667 | 0.089888 | vue-tools | 848 | 2025-05-01T23:43:54.313577 | BSD-3-Clause | false | b839d60b2afa7842986239ef821d5502 |
"""List of ISO639-1 language code"""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\niso639_1 = [\n "aa",\n "ab",\n "ae",\n "af",\n "ak",\n "am",\n "an",\n "ar",\n "as",\n "av",\n "ay",\n "az",\n "ba",\n "be",\n "bg",\n "bh",\n "bi",\n "bm",\n "bn",\n "bo",\n "br",\n "bs",\n "ca",\n "ce",\n "ch",\n "co",\n "cr",\n "cs",\n "cu",\n "cv",\n "cy",\n "da",\n "de",\n "dv",\n "dz",\n "ee",\n "el",\n "en",\n "eo",\n "es",\n "et",\n "eu",\n "fa",\n "ff",\n "fi",\n "fj",\n "fo",\n "fr",\n "fy",\n "ga",\n "gd",\n "gl",\n "gn",\n "gu",\n "gv",\n "ha",\n "he",\n "hi",\n "ho",\n "hr",\n "ht",\n "hu",\n "hy",\n "hz",\n "ia",\n "id",\n "ie",\n "ig",\n "ii",\n "ik",\n "io",\n "is",\n "it",\n "iu",\n "ja",\n "jv",\n "ka",\n "kg",\n "ki",\n "kj",\n "kk",\n "kl",\n "km",\n "kn",\n "ko",\n "kr",\n "ks",\n "ku",\n "kv",\n "kw",\n "ky",\n "la",\n "lb",\n "lg",\n "li",\n "ln",\n "lo",\n "lt",\n "lu",\n "lv",\n "mg",\n "mh",\n "mi",\n "mk",\n "ml",\n "mn",\n "mr",\n "ms",\n "mt",\n "my",\n "na",\n "nb",\n "nd",\n "ne",\n "ng",\n "nl",\n "nn",\n "no",\n "nr",\n "nv",\n "ny",\n "oc",\n "oj",\n "om",\n "or",\n "os",\n "pa",\n "pi",\n "pl",\n "ps",\n "pt",\n "qu",\n "rm",\n "rn",\n "ro",\n "ru",\n "rw",\n "sa",\n "sc",\n "sd",\n "se",\n "sg",\n "si",\n "sk",\n "sl",\n "sm",\n "sn",\n "so",\n "sq",\n "sr",\n "ss",\n "st",\n "su",\n "sv",\n "sw",\n "ta",\n "te",\n "tg",\n "th",\n "ti",\n "tk",\n "tl",\n "tn",\n "to",\n "tr",\n "ts",\n "tt",\n "tw",\n "ty",\n "ug",\n "uk",\n "ur",\n "uz",\n "ve",\n "vi",\n "vo",\n "wa",\n "wo",\n "xh",\n "yi",\n "yo",\n "za",\n "zh",\n "zu",\n]\n | .venv\Lib\site-packages\nbconvert\utils\iso639_1.py | iso639_1.py | Python | 1,995 | 0.8 | 0 | 0.010582 | node-utils | 232 | 2024-08-27T20:36:05.497833 | BSD-3-Clause | false | 2e2ce5a06a836e354e751ed1ee2cc279 |
"""Deprecated as of 5.0; import from IPython.lib.lexers instead."""\n\nfrom warnings import warn\n\nwarn("nbconvert.utils.lexers is deprecated as of 5.0. Use IPython.lib.lexers", stacklevel=2)\n\nfrom IPython.lib.lexers import * # noqa: F403, E402\n | .venv\Lib\site-packages\nbconvert\utils\lexers.py | lexers.py | Python | 243 | 0.95 | 0 | 0 | react-lib | 903 | 2024-05-28T00:07:59.435476 | Apache-2.0 | false | fd50f6e831916636ec4b993d8cb95247 |
"""Utility for calling pandoc"""\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport re\nimport shutil\nimport subprocess\nimport warnings\nfrom io import BytesIO, TextIOWrapper\n\nfrom nbconvert.utils.version import check_version\n\nfrom .exceptions import ConversionException\n\n_minimal_version = "2.9.2"\n_maximal_version = "4.0.0"\n\n\ndef pandoc(source, fmt, to, extra_args=None, encoding="utf-8"):\n """Convert an input string using pandoc.\n\n Pandoc converts an input string `from` a format `to` a target format.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid format `from`.\n fmt : string\n The name of the input format (markdown, etc.)\n to : string\n The name of the output format (html, etc.)\n\n Returns\n -------\n out : unicode\n Output as returned by pandoc.\n\n Raises\n ------\n PandocMissing\n If pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n """\n cmd = ["pandoc", "-f", fmt, "-t", to]\n if extra_args:\n cmd.extend(extra_args)\n\n # this will raise an exception that will pop us out of here\n check_pandoc_version()\n\n # we can safely continue\n p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) # noqa: S603\n out, _ = p.communicate(source.encode())\n out_str = TextIOWrapper(BytesIO(out), encoding, "replace").read()\n return out_str.rstrip("\n")\n\n\ndef get_pandoc_version():\n """Gets the Pandoc version if Pandoc is installed.\n\n If the minimal version is not met, it will probe Pandoc for its version, cache it and return that value.\n If the minimal version is met, it will return the cached version and stop probing Pandoc\n (unless `clean_cache()` is called).\n\n Raises\n ------\n PandocMissing\n If pandoc is unavailable.\n """\n global __version # noqa: PLW0603\n\n if __version is None:\n if not shutil.which("pandoc"):\n raise PandocMissing()\n\n out = subprocess.check_output(["pandoc", "-v"]) # noqa: S607, S603\n out_lines = out.splitlines()\n version_pattern = re.compile(r"^\d+(\.\d+){1,}$")\n for tok in out_lines[0].decode("ascii", "replace").split():\n if version_pattern.match(tok):\n __version = tok # type:ignore[assignment]\n break\n return __version\n\n\ndef check_pandoc_version():\n """Returns True if pandoc's version meets at least minimal version.\n\n Raises\n ------\n PandocMissing\n If pandoc is unavailable.\n """\n if check_pandoc_version._cached is not None: # type:ignore[attr-defined]\n return check_pandoc_version._cached # type:ignore[attr-defined]\n\n v = get_pandoc_version()\n if v is None:\n warnings.warn(\n "Sorry, we cannot determine the version of pandoc.\n"\n "Please consider reporting this issue and include the"\n "output of pandoc --version.\nContinuing...",\n RuntimeWarning,\n stacklevel=2,\n )\n return False\n ok = check_version(v, _minimal_version, max_v=_maximal_version)\n check_pandoc_version._cached = ok # type:ignore[attr-defined]\n if not ok:\n warnings.warn(\n "You are using an unsupported version of pandoc (%s).\n" % v\n + "Your version must be at least (%s) " % _minimal_version\n + "but less than (%s).\n" % _maximal_version\n + "Refer to https://pandoc.org/installing.html.\nContinuing with doubts...",\n RuntimeWarning,\n stacklevel=2,\n )\n return ok\n\n\ncheck_pandoc_version._cached = None # type:ignore[attr-defined]\n\n# -----------------------------------------------------------------------------\n# Exception handling\n# -----------------------------------------------------------------------------\n\n\nclass PandocMissing(ConversionException):\n """Exception raised when Pandoc is missing."""\n\n def __init__(self, *args, **kwargs):\n """Initialize the exception."""\n super().__init__(\n "Pandoc wasn't found.\n"\n "Please check that pandoc is installed:\n"\n "https://pandoc.org/installing.html"\n )\n\n\n# -----------------------------------------------------------------------------\n# Internal state management\n# -----------------------------------------------------------------------------\ndef clean_cache():\n """Clean the internal cache."""\n global __version # noqa: PLW0603\n __version = None\n\n\n__version = None\n | .venv\Lib\site-packages\nbconvert\utils\pandoc.py | pandoc.py | Python | 4,580 | 0.95 | 0.12 | 0.08547 | react-lib | 966 | 2024-02-03T14:39:15.187124 | Apache-2.0 | false | 7f9cc67829c063ab81d4d8991d319d91 |
"""Text related utils."""\n\nimport os\nimport re\n\n\ndef indent(instr, nspaces=4, ntabs=0, flatten=False):\n """Indent a string a given number of spaces or tabstops.\n\n indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.\n\n Parameters\n ----------\n\n instr : basestring\n The string to be indented.\n nspaces : int (default: 4)\n The number of spaces to be indented.\n ntabs : int (default: 0)\n The number of tabs to be indented.\n flatten : bool (default: False)\n Whether to scrub existing indentation. If True, all lines will be\n aligned to the same indentation. If False, existing indentation will\n be strictly increased.\n\n Returns\n -------\n\n str|unicode : string indented by ntabs and nspaces.\n\n """\n if instr is None:\n return None\n ind = "\t" * ntabs + " " * nspaces\n pat = re.compile("^\\s*", re.MULTILINE) if flatten else re.compile("^", re.MULTILINE)\n outstr = re.sub(pat, ind, instr)\n if outstr.endswith(os.linesep + ind):\n return outstr[: -len(ind)]\n return outstr\n | .venv\Lib\site-packages\nbconvert\utils\text.py | text.py | Python | 1,083 | 0.85 | 0.102564 | 0 | python-kit | 466 | 2024-09-29T01:37:40.409877 | MIT | false | 9c39b3c4a78ab1ee05dc8f9b07fa5742 |
"""\nUtilities for version comparison\n\nIt is a bit ridiculous that we need these.\n"""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom packaging.version import Version\n\n\ndef check_version(v, min_v, max_v=None):\n """check version string v >= min_v and v < max_v\n\n Parameters\n ----------\n v : str\n version of the package\n min_v : str\n minimal version supported\n max_v : str\n earliest version not supported\n Note: If dev/prerelease tags result in TypeError for string-number\n comparison, it is assumed that the check passes and the version dependency\n is satisfied. Users on dev branches are responsible for keeping their own\n packages up to date.\n """\n\n try:\n below_max = Version(v) < Version(max_v) if max_v is not None else True\n return Version(v) >= Version(min_v) and below_max\n except TypeError:\n return True\n | .venv\Lib\site-packages\nbconvert\utils\version.py | version.py | Python | 951 | 0.95 | 0.176471 | 0.074074 | vue-tools | 864 | 2024-03-01T02:50:12.389093 | Apache-2.0 | false | 326f23cdad92c1037e96c4c9ca5eb322 |
"""Backport of Python 3.11's contextlib.chdir."""\n\nimport os\nfrom contextlib import AbstractContextManager\n\n\nclass chdir(AbstractContextManager): # type:ignore[type-arg]\n """Non thread-safe context manager to change the current working directory."""\n\n def __init__(self, path):\n """Initialize the manager."""\n self.path = path\n self._old_cwd = []\n\n def __enter__(self):\n """Enter the context."""\n self._old_cwd.append(os.getcwd())\n os.chdir(self.path)\n\n def __exit__(self, *excinfo):\n """Exit the context."""\n os.chdir(self._old_cwd.pop())\n | .venv\Lib\site-packages\nbconvert\utils\_contextlib_chdir.py | _contextlib_chdir.py | Python | 609 | 0.95 | 0.181818 | 0 | awesome-app | 913 | 2025-04-19T23:56:55.781770 | Apache-2.0 | false | b9ad32a24539aec7f578173275f1fe64 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\base.cpython-313.pyc | base.cpython-313.pyc | Other | 1,354 | 0.85 | 0.142857 | 0 | python-kit | 741 | 2025-07-01T18:22:33.569537 | GPL-3.0 | false | 7d65bb9f196d7136e78766af97599d97 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\exceptions.cpython-313.pyc | exceptions.cpython-313.pyc | Other | 550 | 0.8 | 0 | 0 | react-lib | 611 | 2024-03-31T04:48:29.674770 | MIT | false | 55fd75752d2f4b1981c9c07a858dbff4 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\io.cpython-313.pyc | io.cpython-313.pyc | Other | 4,472 | 0.8 | 0.044776 | 0 | vue-tools | 881 | 2024-01-20T21:08:57.920496 | BSD-3-Clause | false | 0db9f311429a47b2766fc3478bcd389d |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\iso639_1.cpython-313.pyc | iso639_1.cpython-313.pyc | Other | 999 | 0.7 | 0 | 0 | react-lib | 391 | 2023-09-12T17:51:29.081362 | GPL-3.0 | false | bf725be34fb374bd52d9cc0e52d45f49 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\lexers.cpython-313.pyc | lexers.cpython-313.pyc | Other | 462 | 0.85 | 0 | 0 | vue-tools | 463 | 2023-12-27T04:52:56.396776 | Apache-2.0 | false | 83341be112755ae7d42fd9e6f83e7768 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\pandoc.cpython-313.pyc | pandoc.cpython-313.pyc | Other | 5,030 | 0.8 | 0.038835 | 0 | node-utils | 70 | 2024-06-24T09:41:07.339448 | Apache-2.0 | false | 2d15a6521b2a57264c8f747281ae8c1c |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\text.cpython-313.pyc | text.cpython-313.pyc | Other | 1,577 | 0.7 | 0 | 0 | node-utils | 48 | 2025-03-09T23:40:23.613015 | Apache-2.0 | false | c59850d1af2ee63ad4de8c63c5c30044 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\version.cpython-313.pyc | version.cpython-313.pyc | Other | 1,169 | 0.7 | 0.12 | 0 | react-lib | 498 | 2023-09-20T20:23:19.502796 | Apache-2.0 | false | d753b2b6d60f5255de23bacffd0f1c10 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\_contextlib_chdir.cpython-313.pyc | _contextlib_chdir.cpython-313.pyc | Other | 1,497 | 0.7 | 0 | 0 | vue-tools | 345 | 2024-09-06T00:52:55.503793 | Apache-2.0 | false | 9feb169b24e405909c0225c6210be0b8 |
\n\n | .venv\Lib\site-packages\nbconvert\utils\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 190 | 0.7 | 0 | 0 | vue-tools | 245 | 2025-02-11T06:57:05.651818 | MIT | false | cae483b7687fd8c7563f27f8f30b7f54 |
"""\nContains writer base class.\n"""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nfrom traitlets import List, Unicode\n\nfrom nbconvert.utils.base import NbConvertBase\n\n\nclass WriterBase(NbConvertBase):\n """Consumes output from nbconvert export...() methods and writes to a\n useful location."""\n\n files = List(\n Unicode(),\n help="""\n List of the files that the notebook references. Files will be\n included with written output.""",\n ).tag(config=True)\n\n def __init__(self, config=None, **kw):\n """\n Constructor\n """\n super().__init__(config=config, **kw)\n\n def write(self, output, resources, **kw):\n """\n Consume and write Jinja output.\n\n Parameters\n ----------\n output : string\n Conversion results. This string contains the file contents of the\n converted file.\n resources : dict\n Resources created and filled by the nbconvert conversion process.\n Includes output from preprocessors, such as the extract figure\n preprocessor.\n """\n\n raise NotImplementedError()\n | .venv\Lib\site-packages\nbconvert\writers\base.py | base.py | Python | 1,238 | 0.95 | 0.086957 | 0.055556 | node-utils | 708 | 2024-12-30T21:37:26.228088 | BSD-3-Clause | false | dbcffa6590d3ea6fa640ee6ebbf76fb9 |
"""\nContains debug writer.\n"""\n\nfrom pprint import pprint\n\nfrom .base import WriterBase\n\n# -----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------\n# Classes\n# -----------------------------------------------------------------------------\n\n\nclass DebugWriter(WriterBase):\n """Consumes output from nbconvert export...() methods and writes useful\n debugging information to the stdout. The information includes a list of\n resources that were extracted from the notebook(s) during export."""\n\n def write(self, output, resources, notebook_name="notebook", **kw):\n """\n Consume and write Jinja output.\n\n See base for more...\n """\n\n if isinstance(resources["outputs"], dict):\n print("outputs extracted from %s" % notebook_name)\n print("-" * 80)\n pprint(resources["outputs"], indent=2, width=70) # noqa: T203\n else:\n print("no outputs extracted from %s" % notebook_name)\n print("=" * 80)\n | .venv\Lib\site-packages\nbconvert\writers\debug.py | debug.py | Python | 1,534 | 0.95 | 0.088889 | 0.382353 | python-kit | 141 | 2025-06-14T03:24:55.716223 | BSD-3-Clause | false | adf01c0b2a611de6666534b804c83461 |
"""Contains writer for writing nbconvert output to filesystem."""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport errno\nimport glob\nimport os\nfrom pathlib import Path\n\nfrom traitlets import Unicode, observe\n\nfrom nbconvert.utils.io import link_or_copy\n\nfrom .base import WriterBase\n\n\nclass FilesWriter(WriterBase):\n """Consumes nbconvert output and produces files."""\n\n build_directory = Unicode(\n "",\n help="""Directory to write output(s) to. Defaults\n to output to the directory of each notebook. To recover\n previous default behaviour (outputting to the current\n working directory) use . as the flag value.""",\n ).tag(config=True)\n\n relpath = Unicode(\n help="""When copying files that the notebook depends on, copy them in\n relation to this path, such that the destination filename will be\n os.path.relpath(filename, relpath). If FilesWriter is operating on a\n notebook that already exists elsewhere on disk, then the default will be\n the directory containing that notebook."""\n ).tag(config=True)\n\n # Make sure that the output directory exists.\n @observe("build_directory")\n def _build_directory_changed(self, change):\n new = change["new"]\n if new:\n self._makedir(new)\n\n def __init__(self, **kw):\n """Initialize the writer."""\n super().__init__(**kw)\n self._build_directory_changed({"new": self.build_directory})\n\n def _makedir(self, path, mode=0o755):\n """ensure that a directory exists\n\n If it doesn't exist, try to create it and protect against a race condition\n if another process is doing the same.\n\n The default permissions are 755, which differ from os.makedirs default of 777.\n """\n if not os.path.exists(path):\n self.log.info("Making directory %s", path)\n try:\n os.makedirs(path, mode=mode)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n elif not os.path.isdir(path):\n raise OSError("%r exists but is not a directory" % path)\n\n def _write_items(self, items, build_dir):\n """Write a dict containing filename->binary data"""\n for filename, data in items:\n # Determine where to write the file to\n dest = os.path.join(build_dir, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Write file\n self.log.debug("Writing %i bytes to %s", len(data), dest)\n with open(dest, "wb") as f:\n f.write(data)\n\n def write(self, output, resources, notebook_name=None, **kw):\n """\n Consume and write Jinja output to the file system. Output directory\n is set via the 'build_directory' variable of this instance (a\n configurable).\n\n See base for more...\n """\n\n # Verify that a notebook name is provided.\n if notebook_name is None:\n msg = "notebook_name"\n raise TypeError(msg)\n\n # Pull the extension and subdir from the resources dict.\n output_extension = resources.get("output_extension", None)\n\n # Get the relative path for copying files\n resource_path = resources.get("metadata", {}).get("path", "")\n relpath = self.relpath or resource_path\n build_directory = self.build_directory or resource_path\n\n # Write the extracted outputs to the destination directory.\n # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG\n # PREPROCESSOR SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...\n\n items = resources.get("outputs", {}).items()\n if items:\n self.log.info(\n "Support files will be in %s",\n os.path.join(resources.get("output_files_dir", ""), ""),\n )\n self._write_items(items, build_directory)\n\n # Write the extracted attachments\n # if ExtractAttachmentsOutput specified a separate directory\n attachments = resources.get("attachments", {}).items()\n if attachments:\n self.log.info(\n "Attachments will be in %s",\n os.path.join(resources.get("attachment_files_dir", ""), ""),\n )\n self._write_items(attachments, build_directory)\n\n # Copy referenced files to output directory\n if build_directory:\n for filename in self.files:\n # Copy files that match search pattern\n for matching_filename in glob.glob(filename):\n # compute the relative path for the filename\n if relpath != "":\n dest_filename = os.path.relpath(matching_filename, relpath)\n else:\n dest_filename = matching_filename\n\n # Make sure folder exists.\n dest = os.path.join(build_directory, dest_filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Copy if destination is different.\n if os.path.normpath(dest) != os.path.normpath(matching_filename):\n self.log.info("Copying %s -> %s", matching_filename, dest)\n link_or_copy(matching_filename, dest)\n\n # Determine where to write conversion results.\n dest = notebook_name + output_extension if output_extension is not None else notebook_name\n dest_path = Path(build_directory) / dest\n\n # Write conversion results.\n self.log.info("Writing %i bytes to %s", len(output), dest_path)\n if isinstance(output, str):\n with open(dest_path, "w", encoding="utf-8") as f:\n f.write(output)\n else:\n with open(dest_path, "wb") as f:\n f.write(output)\n\n return dest_path\n | .venv\Lib\site-packages\nbconvert\writers\files.py | files.py | Python | 6,046 | 0.95 | 0.183544 | 0.15625 | vue-tools | 528 | 2024-03-15T16:35:20.878205 | BSD-3-Clause | false | 8a115c43bdfb28ce4dcad6a9b3428c07 |
"""\nContains Stdout writer\n"""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom nbconvert.utils import io\n\nfrom .base import WriterBase\n\n\nclass StdoutWriter(WriterBase):\n """Consumes output from nbconvert export...() methods and writes to the\n stdout stream."""\n\n def write(self, output, resources, **kw):\n """\n Consume and write Jinja output.\n\n See base for more...\n """\n stream = io.unicode_std_stream()\n stream.write(output)\n | .venv\Lib\site-packages\nbconvert\writers\stdout.py | stdout.py | Python | 538 | 0.95 | 0.125 | 0.117647 | vue-tools | 493 | 2024-08-16T02:50:34.914128 | BSD-3-Clause | false | ea0ad92f74481d4ecea993c2990a2ed4 |
from .base import WriterBase\nfrom .debug import DebugWriter\nfrom .files import FilesWriter\nfrom .stdout import StdoutWriter\n | .venv\Lib\site-packages\nbconvert\writers\__init__.py | __init__.py | Python | 124 | 0.85 | 0 | 0 | awesome-app | 100 | 2025-02-28T06:06:14.325125 | BSD-3-Clause | false | a95187563e119e1f456148b3c1457ffb |
\n\n | .venv\Lib\site-packages\nbconvert\writers\__pycache__\base.cpython-313.pyc | base.cpython-313.pyc | Other | 1,787 | 0.8 | 0.027027 | 0 | awesome-app | 501 | 2023-07-22T09:12:34.118032 | GPL-3.0 | false | b6a3550c009c61e0360c7833b9dc7d8e |
\n\n | .venv\Lib\site-packages\nbconvert\writers\__pycache__\debug.cpython-313.pyc | debug.cpython-313.pyc | Other | 1,542 | 0.7 | 0.052632 | 0 | node-utils | 919 | 2025-03-27T23:50:33.263359 | Apache-2.0 | false | 46414b14bb4e9af6b578476f306e76b6 |
\n\n | .venv\Lib\site-packages\nbconvert\writers\__pycache__\files.cpython-313.pyc | files.cpython-313.pyc | Other | 7,396 | 0.8 | 0.038462 | 0 | node-utils | 998 | 2024-07-07T20:52:50.042716 | BSD-3-Clause | false | bb1d11a3e5fe31d87a22135f42c688df |
\n\n | .venv\Lib\site-packages\nbconvert\writers\__pycache__\stdout.cpython-313.pyc | stdout.cpython-313.pyc | Other | 1,000 | 0.7 | 0.076923 | 0 | node-utils | 457 | 2024-10-16T01:49:36.891561 | GPL-3.0 | false | ae609e4872670f43558fecaa49735229 |
\n\n | .venv\Lib\site-packages\nbconvert\writers\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 362 | 0.7 | 0 | 0 | vue-tools | 1 | 2025-01-02T17:04:00.276823 | MIT | false | d20dc1d4c5ad62f5f214f40ac4bdf5da |
\n\n | .venv\Lib\site-packages\nbconvert\__pycache__\conftest.cpython-313.pyc | conftest.cpython-313.pyc | Other | 467 | 0.7 | 0 | 0 | python-kit | 601 | 2023-12-27T10:07:54.584613 | BSD-3-Clause | true | 1729d69ef17b28de11280f58cf32adac |
\n\n | .venv\Lib\site-packages\nbconvert\__pycache__\nbconvertapp.cpython-313.pyc | nbconvertapp.cpython-313.pyc | Other | 27,394 | 0.95 | 0.071023 | 0 | react-lib | 456 | 2024-03-22T15:40:11.311415 | GPL-3.0 | false | 89f092a915a0e5c6a94c21b0498bf7e4 |
\n\n | .venv\Lib\site-packages\nbconvert\__pycache__\_version.cpython-313.pyc | _version.cpython-313.pyc | Other | 822 | 0.7 | 0 | 0 | node-utils | 470 | 2024-10-20T20:29:54.819039 | Apache-2.0 | false | 24cc420f5f0b192688e3de37509674b5 |
\n\n | .venv\Lib\site-packages\nbconvert\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,225 | 0.7 | 0.066667 | 0 | node-utils | 792 | 2024-09-17T08:52:22.283245 | MIT | false | 46c602e8b7a3efc7fdcd6a0c78455531 |
\n\n | .venv\Lib\site-packages\nbconvert\__pycache__\__main__.cpython-313.pyc | __main__.cpython-313.pyc | Other | 291 | 0.7 | 0 | 0 | vue-tools | 201 | 2024-08-15T21:50:33.274263 | MIT | false | 39db0f379d15c64b5193fb7d81d41c60 |
[console_scripts]\njupyter-dejavu = nbconvert.nbconvertapp:dejavu_main\njupyter-nbconvert = nbconvert.nbconvertapp:main\n\n[nbconvert.exporters]\nasciidoc = nbconvert.exporters:ASCIIDocExporter\ncustom = nbconvert.exporters:TemplateExporter\nhtml = nbconvert.exporters:HTMLExporter\nlatex = nbconvert.exporters:LatexExporter\nmarkdown = nbconvert.exporters:MarkdownExporter\nnotebook = nbconvert.exporters:NotebookExporter\npdf = nbconvert.exporters:PDFExporter\npython = nbconvert.exporters:PythonExporter\nqtpdf = nbconvert.exporters:QtPDFExporter\nqtpng = nbconvert.exporters:QtPNGExporter\nrst = nbconvert.exporters:RSTExporter\nscript = nbconvert.exporters:ScriptExporter\nslides = nbconvert.exporters:SlidesExporter\nwebpdf = nbconvert.exporters:WebPDFExporter\n | .venv\Lib\site-packages\nbconvert-7.16.6.dist-info\entry_points.txt | entry_points.txt | Other | 749 | 0.7 | 0 | 0 | vue-tools | 700 | 2023-11-09T06:47:12.389502 | Apache-2.0 | false | f2bb975e42b980f4a8ae51412dfd09ed |
pip\n | .venv\Lib\site-packages\nbconvert-7.16.6.dist-info\INSTALLER | INSTALLER | Other | 4 | 0.5 | 0 | 0 | node-utils | 677 | 2025-05-07T04:22:15.054076 | BSD-3-Clause | false | 365c9bfeb7d89244f2ce01c1de44cb85 |
Metadata-Version: 2.4\nName: nbconvert\nVersion: 7.16.6\nSummary: Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`).\nProject-URL: Homepage, https://jupyter.org\nAuthor-email: Jupyter Development Team <jupyter@googlegroups.com>\nLicense: BSD 3-Clause License\n \n - Copyright (c) 2001-2015, IPython Development Team\n - Copyright (c) 2015-, Jupyter Development Team\n \n All rights reserved.\n \n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n \n 1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n \n 2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n \n 3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n \n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nLicense-File: LICENSE\nKeywords: Interactive,Interpreter,Shell,Web\nClassifier: Intended Audience :: Developers\nClassifier: Intended Audience :: Science/Research\nClassifier: Intended Audience :: System Administrators\nClassifier: License :: OSI Approved :: BSD License\nClassifier: Programming Language :: Python\nClassifier: Programming Language :: Python :: 3\nRequires-Python: >=3.8\nRequires-Dist: beautifulsoup4\nRequires-Dist: bleach[css]!=5.0.0\nRequires-Dist: defusedxml\nRequires-Dist: importlib-metadata>=3.6; python_version < '3.10'\nRequires-Dist: jinja2>=3.0\nRequires-Dist: jupyter-core>=4.7\nRequires-Dist: jupyterlab-pygments\nRequires-Dist: markupsafe>=2.0\nRequires-Dist: mistune<4,>=2.0.3\nRequires-Dist: nbclient>=0.5.0\nRequires-Dist: nbformat>=5.7\nRequires-Dist: packaging\nRequires-Dist: pandocfilters>=1.4.1\nRequires-Dist: pygments>=2.4.1\nRequires-Dist: traitlets>=5.1\nProvides-Extra: all\nRequires-Dist: flaky; extra == 'all'\nRequires-Dist: ipykernel; extra == 'all'\nRequires-Dist: ipython; extra == 'all'\nRequires-Dist: ipywidgets>=7.5; extra == 'all'\nRequires-Dist: myst-parser; extra == 'all'\nRequires-Dist: nbsphinx>=0.2.12; extra == 'all'\nRequires-Dist: playwright; extra == 'all'\nRequires-Dist: pydata-sphinx-theme; extra == 'all'\nRequires-Dist: pyqtwebengine>=5.15; extra == 'all'\nRequires-Dist: pytest>=7; extra == 'all'\nRequires-Dist: sphinx==5.0.2; extra == 'all'\nRequires-Dist: sphinxcontrib-spelling; extra == 'all'\nRequires-Dist: tornado>=6.1; extra == 'all'\nProvides-Extra: docs\nRequires-Dist: ipykernel; extra == 'docs'\nRequires-Dist: ipython; extra == 'docs'\nRequires-Dist: myst-parser; extra == 'docs'\nRequires-Dist: nbsphinx>=0.2.12; extra == 'docs'\nRequires-Dist: pydata-sphinx-theme; extra == 'docs'\nRequires-Dist: sphinx==5.0.2; extra == 'docs'\nRequires-Dist: sphinxcontrib-spelling; extra == 'docs'\nProvides-Extra: qtpdf\nRequires-Dist: pyqtwebengine>=5.15; extra == 'qtpdf'\nProvides-Extra: qtpng\nRequires-Dist: pyqtwebengine>=5.15; extra == 'qtpng'\nProvides-Extra: serve\nRequires-Dist: tornado>=6.1; extra == 'serve'\nProvides-Extra: test\nRequires-Dist: flaky; extra == 'test'\nRequires-Dist: ipykernel; extra == 'test'\nRequires-Dist: ipywidgets>=7.5; extra == 'test'\nRequires-Dist: pytest>=7; extra == 'test'\nProvides-Extra: webpdf\nRequires-Dist: playwright; extra == 'webpdf'\nDescription-Content-Type: text/markdown\n\n# nbconvert\n\n### Jupyter Notebook Conversion\n\n[](https://github.com/jupyter/nbconvert/actions/workflows/tests.yml/badge.svg?query=branch%3Amain++)\n[](https://nbconvert.readthedocs.io/en/latest/?badge=latest)\n\nThe **nbconvert** tool, `jupyter nbconvert`, converts notebooks to various other\nformats via [Jinja] templates. The nbconvert tool allows you to convert an\n`.ipynb` notebook file into various static formats including:\n\n- HTML\n- LaTeX\n- PDF\n- Reveal JS\n- Markdown (md)\n- ReStructured Text (rst)\n- executable script\n\n## Usage\n\nFrom the command line, use nbconvert to convert a Jupyter notebook (_input_) to a\na different format (_output_). The basic command structure is:\n\n```\n$ jupyter nbconvert --to <output format> <input notebook>\n```\n\nwhere `<output format>` is the desired output format and `<input notebook>` is the\nfilename of the Jupyter notebook.\n\n### Example: Convert a notebook to HTML\n\nConvert Jupyter notebook file, `mynotebook.ipynb`, to HTML using:\n\n```\n$ jupyter nbconvert --to html mynotebook.ipynb\n```\n\nThis command creates an HTML output file named `mynotebook.html`.\n\n## Dev Install\n\nCheck if pandoc is installed (`pandoc --version`); if needed, install:\n\n```\nsudo apt-get install pandoc\n```\n\nOr\n\n```\nbrew install pandoc\n```\n\nInstall nbconvert for development using:\n\n```\ngit clone https://github.com/jupyter/nbconvert.git\ncd nbconvert\npip install -e .\n```\n\nRunning the tests after a dev install above:\n\n```\npip install nbconvert[test]\npy.test --pyargs nbconvert\n```\n\n## Documentation\n\n- [Documentation for Jupyter nbconvert](https://nbconvert.readthedocs.io/en/latest/)\n- [nbconvert examples on GitHub](https://github.com/jupyter/nbconvert-examples)\n- [Documentation for Project Jupyter](https://jupyter.readthedocs.io/en/latest/index.html)\n\n## Technical Support\n\n- [Issues and Bug Reports](https://github.com/jupyter/nbconvert/issues): A place to report\n bugs or regressions found for nbconvert\n- [Community Technical Support and Discussion - Discourse](https://discourse.jupyter.org/): A place for\n installation, configuration, and troubleshooting assistannce by the Jupyter community.\n As a non-profit project and maintainers who are primarily volunteers, we encourage you\n to ask questions and share your knowledge on Discourse.\n\n## Jupyter Resources\n\n- [Jupyter mailing list](https://groups.google.com/forum/#!forum/jupyter)\n- [Project Jupyter website](https://jupyter.org)\n\n## About the Jupyter Development Team\n\nThe Jupyter Development Team is the set of all contributors to the Jupyter project.\nThis includes all of the Jupyter subprojects.\n\nThe core team that coordinates development on GitHub can be found here:\nhttps://github.com/jupyter/.\n\n## Our Copyright Policy\n\nJupyter uses a shared copyright model. Each contributor maintains copyright\nover their contributions to Jupyter. But, it is important to note that these\ncontributions are typically only changes to the repositories. Thus, the Jupyter\nsource code, in its entirety is not the copyright of any single person or\ninstitution. Instead, it is the collective copyright of the entire Jupyter\nDevelopment Team. If individual contributors want to maintain a record of what\nchanges/contributions they have specific copyright on, they should indicate\ntheir copyright in the commit message of the change, when they commit the\nchange to one of the Jupyter repositories.\n\nWith this in mind, the following banner should be used in any source code file\nto indicate the copyright and license terms:\n\n```\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n```\n\n[jinja]: http://jinja.pocoo.org/\n | .venv\Lib\site-packages\nbconvert-7.16.6.dist-info\METADATA | METADATA | Other | 8,465 | 0.95 | 0.032258 | 0.068966 | awesome-app | 951 | 2023-11-14T08:59:40.874960 | Apache-2.0 | false | 496abcae025b81f745483cba1ad5dd87 |
../../Scripts/jupyter-dejavu.exe,sha256=8DU0BhYJr0fYVIijtUP7uY1v3OOAtSlaiSpG3NIJ6So,108437\n../../Scripts/jupyter-nbconvert.exe,sha256=NZzMOSKYDJVctSGR94b7OlwHgaq9ABE1DdDtEeSPNug,108423\n../../share/jupyter/nbconvert/templates/asciidoc/conf.json,sha256=F2axcNCiJcqc6hkStkLWpp0mYmjA71CAoMVQxBH59HQ,78\n../../share/jupyter/nbconvert/templates/asciidoc/index.asciidoc.j2,sha256=ldvuvwVyz0b7bO2ZZ8NXcnI4cdyKJBfslWnd_xZLZKc,2298\n../../share/jupyter/nbconvert/templates/base/cell_id_anchor.j2,sha256=ZRdg3wkyuaQc3QQ1M6EuxIkdY7X1ouu8_j9LUW6fW4c,160\n../../share/jupyter/nbconvert/templates/base/celltags.j2,sha256=XAqjwRIJb-xaTpau4EJE9-7nMXV_O_9w5v4wDUU0rJs,231\n../../share/jupyter/nbconvert/templates/base/display_priority.j2,sha256=e8gVqbrpw4sskdse90wtJi1cN5-h4cBbbmTHaFYkXPo,1680\n../../share/jupyter/nbconvert/templates/base/jupyter_widgets.html.j2,sha256=i-hxFLlvFo6GDn4zR3d_Lj4c13KjCP_4YwTCAnR7e4c,1250\n../../share/jupyter/nbconvert/templates/base/mathjax.html.j2,sha256=Pbe7_80DrJNyaScmhwNfXyPLoScRQ8qnVkiFjdJplTg,1226\n../../share/jupyter/nbconvert/templates/base/null.j2,sha256=qKiTLBUWfpwIQjlQ00NG8PvZHXGlNycM2EGibUwVsEk,6265\n../../share/jupyter/nbconvert/templates/basic/conf.json,sha256=FkE-hYl96LL1pcM2favFl2Jhvjve7aFuRZHpnHK0p4o,77\n../../share/jupyter/nbconvert/templates/basic/index.html.j2,sha256=ovChqOqeUNBJMBSvRCDPBvF6wJVhDPPF5ly3Y-qPs6g,39\n../../share/jupyter/nbconvert/templates/classic/base.html.j2,sha256=iTvwsb6xujGDONAlVjJpdM5Zj0I9yS55WxtFmBk4UgU,8447\n../../share/jupyter/nbconvert/templates/classic/conf.json,sha256=MEz8vViXkCngNBomXNJ-qrGacil0C9NAxGBrqtx8WOg,243\n../../share/jupyter/nbconvert/templates/classic/index.html.j2,sha256=de0E1Uu8CbH4JSVZWMLuWINZ-iL0bVNB8-3kjzpL-IE,2645\n../../share/jupyter/nbconvert/templates/classic/static/style.css,sha256=WGWmCfRDewRkvBIc1We2GQdOVAoFFaO4LyIvdk61HgE,265101\n../../share/jupyter/nbconvert/templates/compatibility/display_priority.tpl,sha256=_4UtbBB260KGPPG6SilIF63CUEKQ8-KTYQIx0mO2-9I,133\n../../share/jupyter/nbconvert/templates/compatibility/full.tpl,sha256=CmpspwsWO2fHwrNH_qKf-DGrKFgo1aHjwqAuuIz5QDQ,124\n../../share/jupyter/nbconvert/templates/lab/base.html.j2,sha256=szFoAwQKL2PMzw-zguXywEI2x0w5cgcnZLyrDQ1kbdE,10171\n../../share/jupyter/nbconvert/templates/lab/conf.json,sha256=Vf3x2bOE7chgS6r4jLKo_Cvzp5mXUsLSBJ339dnDRLk,217\n../../share/jupyter/nbconvert/templates/lab/index.html.j2,sha256=jvjF4_Kklw9LXtxJmpSW43beZUfVhS4cPnlhhpvVO9A,3329\n../../share/jupyter/nbconvert/templates/lab/mermaidjs.html.j2,sha256=xfr3WznGySSO8i8dYSK8z5psgLBnwSrlrjX1lcljaos,5354\n../../share/jupyter/nbconvert/templates/lab/static/index.css,sha256=kX_0eFCnzAj9BlgCb9p2cqhSIKqrJY6ISeiRs3Qm-Uc,240379\n../../share/jupyter/nbconvert/templates/lab/static/theme-dark.css,sha256=eV8tUGlzfL61y6Aea1x8rb3iJ8kJ5DAExaYPWNUWCuw,17102\n../../share/jupyter/nbconvert/templates/lab/static/theme-light.css,sha256=Eb81WP0-01OkwUAawMFzDQHfBz9kNtNXxbvwKgO9aWI,16019\n../../share/jupyter/nbconvert/templates/latex/base.tex.j2,sha256=jCpKVS7juymNbj2v_CsCKs-yfFBO5iqcu5JArn3cFbM,10469\n../../share/jupyter/nbconvert/templates/latex/conf.json,sha256=9fg90dMWipcbQpDaUZuv_bd_Ff3LbfsGb0G7jOF1kGw,126\n../../share/jupyter/nbconvert/templates/latex/display_priority.j2,sha256=YIYQ2wZen1t34_cZfpLlMDOATYwtknYN6U3OKGil18M,1643\n../../share/jupyter/nbconvert/templates/latex/document_contents.tex.j2,sha256=iVY07fA5Ca91cPb3kEYmAfZfb7ID3um9Ll5S5qoFdeo,2782\n../../share/jupyter/nbconvert/templates/latex/index.tex.j2,sha256=FInvezHz4MY5OLRzI03j-ZYJ8onIpTLbIHOaS5Mh2g4,496\n../../share/jupyter/nbconvert/templates/latex/null.j2,sha256=Jw8b5XCLncJlyziQitXG71byKt6gRLAlvmC93MD8ZBY,5544\n../../share/jupyter/nbconvert/templates/latex/report.tex.j2,sha256=tBYb6HezxBP61noI5SurxCf6bR3rD_KYwOvNyuxsv80,909\n../../share/jupyter/nbconvert/templates/latex/style_bw_ipython.tex.j2,sha256=Gn1lVNVCdGdvCj2_79L6BPMc5LfQpDy0tFOEJOLuUwY,1989\n../../share/jupyter/nbconvert/templates/latex/style_bw_python.tex.j2,sha256=DB-Uvno82W_eOKtpppLRX89Luu5D70ahfSYELwR17lY,479\n../../share/jupyter/nbconvert/templates/latex/style_ipython.tex.j2,sha256=OoNlLCbmuxpgqujDqU8N6gDkUmzY2QnmWCFfxwci92U,2587\n../../share/jupyter/nbconvert/templates/latex/style_jupyter.tex.j2,sha256=WHUgjmAIpZZXkI6z-g10x5WtvpFCGRHCvJNX4p3ZTOQ,8233\n../../share/jupyter/nbconvert/templates/latex/style_python.tex.j2,sha256=DoY7Yu1SUnvXSGLTsXwVb6bgqlpU4Azq11ZPixBd3Vo,794\n../../share/jupyter/nbconvert/templates/markdown/conf.json,sha256=tL4FKA3l7ISHZeJXv0OJeP-ybzdt40M7e_b-ljmfS-c,78\n../../share/jupyter/nbconvert/templates/markdown/index.md.j2,sha256=C0kRgRAqG0UwXaFFON7WM83iUcbAoY8F5r4-Oj8J8Ps,2028\n../../share/jupyter/nbconvert/templates/python/conf.json,sha256=paZQoNtoCLGZAtlXkk4NwyTju0vt3WcVLyR_xtoKcHE,78\n../../share/jupyter/nbconvert/templates/python/index.py.j2,sha256=hibA2B5Z8sPySgJT9GngREcxkRp1VSk9fgUQO5N23VM,472\n../../share/jupyter/nbconvert/templates/reveal/base.html.j2,sha256=FN5A3-zvliyMgZEAkZkaT9TI3bIzo_puN0YVWOh_odU,872\n../../share/jupyter/nbconvert/templates/reveal/cellslidedata.j2,sha256=C90yZmSO2w56ax_XDXbmyx4ha48F7Yv4pPUhS-NODZ8,410\n../../share/jupyter/nbconvert/templates/reveal/conf.json,sha256=mKqvK9Hj0Wk1TqgnT-kAt7MoWf-MtawWF6Kj6ltro_c,337\n../../share/jupyter/nbconvert/templates/reveal/index.html.j2,sha256=qj_NSKhZe5yULTdCInwASUVTGUZHbxUoyZgGNxi6v0g,5810\n../../share/jupyter/nbconvert/templates/reveal/static/custom_reveal.css,sha256=WLSnsWL1o98lvoOcclJkEc0_tHsjUjLRxU4KKK9533I,2400\n../../share/jupyter/nbconvert/templates/rst/conf.json,sha256=HacJE97lE-jve6-l7Ti1HuHnkBNJMUhHHu0eBMZD0Z8,75\n../../share/jupyter/nbconvert/templates/rst/index.rst.j2,sha256=sWbjOZ_3qoWqRODbXoYRAYNnHq73QkoYBkKLizUWv64,2922\n../../share/jupyter/nbconvert/templates/script/conf.json,sha256=x9Wy_nThhDWnATyrT1OAjRsSkAG4anXNBMCrA84lzT8,75\n../../share/jupyter/nbconvert/templates/script/script.j2,sha256=y0yFbSu4hdd1rfX2yrzduG5zqeNv_mcmuE38MPK1ZYI,84\n../../share/jupyter/nbconvert/templates/webpdf/conf.json,sha256=Y3YD2wpnN1P16_Q-HyE7VZZGGMPgJgZLyFZcEijWVPI,79\n../../share/jupyter/nbconvert/templates/webpdf/index.pdf.j2,sha256=mRxh8ncmiS1NgNhsCxrlO_ZokpTYwQVhm-WDdzamqOo,36\nnbconvert-7.16.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nnbconvert-7.16.6.dist-info/METADATA,sha256=pnpVQz5TNwyxtSsvOWV5tMujC5ra2k-JVFxOJXIMM3M,8465\nnbconvert-7.16.6.dist-info/RECORD,,\nnbconvert-7.16.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87\nnbconvert-7.16.6.dist-info/entry_points.txt,sha256=0Rn9xnvHu7jIRt5e0bixsttz9sJsLK1tplqvzO5OlDs,749\nnbconvert-7.16.6.dist-info/licenses/LICENSE,sha256=XKdOTS7rkzCw0SnCX4dNNUShNBO8Yq6NNngZEA0JUHI,1588\nnbconvert/__init__.py,sha256=DY-lv0I8d0Vn1IB1mZBrFal_c6zPCgxY8YLLTIaYlb0,1364\nnbconvert/__main__.py,sha256=M2eMPkTt3Ly9z0-gia_J8yWE1KbABvWK-XFeaG9yxns,73\nnbconvert/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/__pycache__/__main__.cpython-313.pyc,,\nnbconvert/__pycache__/_version.cpython-313.pyc,,\nnbconvert/__pycache__/conftest.cpython-313.pyc,,\nnbconvert/__pycache__/nbconvertapp.cpython-313.pyc,,\nnbconvert/_version.py,sha256=HSagf2wWpomw3YV16nffH7gcm4_QXxiGsf2c6spf56o,492\nnbconvert/conftest.py,sha256=AJorRGE9QcZ9n68zdM-Q0JOEOtk9RZZlmxTl2sahwUk,194\nnbconvert/exporters/__init__.py,sha256=F3hd1sinbRqn3HJ7lCAjwLxYvXXCmPm0C3K2zI3Q01M,1150\nnbconvert/exporters/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/asciidoc.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/base.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/exporter.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/html.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/latex.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/markdown.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/notebook.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/pdf.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/python.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/qt_exporter.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/qt_screenshot.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/qtpdf.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/qtpng.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/rst.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/script.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/slides.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/templateexporter.cpython-313.pyc,,\nnbconvert/exporters/__pycache__/webpdf.cpython-313.pyc,,\nnbconvert/exporters/asciidoc.py,sha256=IJ-Rwf2qKvNYKT3xQpHFemS0n6od5Ga4GZyOWfG-wxc,1530\nnbconvert/exporters/base.py,sha256=T990RgYbq9qWf5EpC-PCypZl2ZI-BOcCUB8I9fOC01A,4972\nnbconvert/exporters/exporter.py,sha256=DiYYzAl1A-WQCWZqFvLl1EOAKkiRzxBg3ZU2hLJyyl0,12877\nnbconvert/exporters/html.py,sha256=GOjTB5vLbTVyImfW5wP8thQjzPMEWhJqeQZZ0bvTpKM,13779\nnbconvert/exporters/latex.py,sha256=8NfRchQzGVIgA5b-dhft1eRi_0C6OunN0IYXqmusjSo,3847\nnbconvert/exporters/markdown.py,sha256=B_JYda_F4ncvMvkwUvem8ooJ2M_zoHAEZZacrwXlR8c,1569\nnbconvert/exporters/notebook.py,sha256=Sz2zluOj9qYJ8Kuo04HHYgJtEI0ZFsdxQLhKq4Q0LH0,1423\nnbconvert/exporters/pdf.py,sha256=IRjYnUzSkR35wrcwPtGMZUzqYY5JMfeUuWU4W-hxKiQ,7932\nnbconvert/exporters/python.py,sha256=pWDIb1K1Uux0Mn0uGmiVMP-cIqSt3v3ZZrGB-yGybxo,675\nnbconvert/exporters/qt_exporter.py,sha256=ZuiiUeoPPbvSAD3xygA386MHprpRZB9Jb9Vuex2KAAQ,2113\nnbconvert/exporters/qt_screenshot.py,sha256=Y8LMvEmy1IBTnI_-5IWD3pugQ2-GFJl4RveeLAiNrIM,3312\nnbconvert/exporters/qtpdf.py,sha256=GoEekiEMtEi5kwTFrLP_QvEvSi5Ys_sDPSKq26NSO0g,808\nnbconvert/exporters/qtpng.py,sha256=cnwA1UFbEynY1abMIFEgHHSTX6VbQA3KNoDYmQ-yKM8,479\nnbconvert/exporters/rst.py,sha256=jMZmG8pVmJqhfgMxy4wJ4aL_GLaRgT3DHPeiTY92MUE,1762\nnbconvert/exporters/script.py,sha256=TMAlE9Tsa0F5zu-RASn90phcK57XBOX5y8lpVCbvoVw,3177\nnbconvert/exporters/slides.py,sha256=mXj7NGDOmz8T2ghvCalfw1HgVsyEpSFW4ewgONchSBI,7099\nnbconvert/exporters/templateexporter.py,sha256=iNbONCJmR36yTC7ue5iOMVVMF__w_9LwLtKg74gQWOw,27557\nnbconvert/exporters/webpdf.py,sha256=_j7dcv9yCDGGs_kv9SqkbZzxiX1DYbxIYdikR91php0,6678\nnbconvert/filters/__init__.py,sha256=ugWPdHG3QSEFwXYIjlFXOnxaS77gRQqyOd1d1eA3wG4,1578\nnbconvert/filters/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/filters/__pycache__/ansi.cpython-313.pyc,,\nnbconvert/filters/__pycache__/citation.cpython-313.pyc,,\nnbconvert/filters/__pycache__/datatypefilter.cpython-313.pyc,,\nnbconvert/filters/__pycache__/filter_links.cpython-313.pyc,,\nnbconvert/filters/__pycache__/highlight.cpython-313.pyc,,\nnbconvert/filters/__pycache__/latex.cpython-313.pyc,,\nnbconvert/filters/__pycache__/markdown.cpython-313.pyc,,\nnbconvert/filters/__pycache__/markdown_mistune.cpython-313.pyc,,\nnbconvert/filters/__pycache__/metadata.cpython-313.pyc,,\nnbconvert/filters/__pycache__/pandoc.cpython-313.pyc,,\nnbconvert/filters/__pycache__/strings.cpython-313.pyc,,\nnbconvert/filters/__pycache__/widgetsdatatypefilter.cpython-313.pyc,,\nnbconvert/filters/ansi.py,sha256=QLfZSDSu3HA73XZwBlahaTs6sS5AK0FEAFyXSnWrg04,8042\nnbconvert/filters/citation.py,sha256=aQvABngmtbjLfF2QCH53DXbGjXnKklqwhYzpanG0pyk,3694\nnbconvert/filters/datatypefilter.py,sha256=abMhSSAFMYYTZY_LFEgHzEdeoGqWVnz3MZogTral3sM,1556\nnbconvert/filters/filter_links.py,sha256=BMhEMJQvdGFCZL7oj7jZh96swdGxcHl6VOu3VyGxwb0,1556\nnbconvert/filters/highlight.py,sha256=7vvKS4jfTuuC4penBLUookwUnlIECUp24FwOR6WKkW8,6330\nnbconvert/filters/latex.py,sha256=tY8wesCcqTY7i9KEY9ZrUNBQ3DLMCrjnv_dnvi54gfc,1819\nnbconvert/filters/markdown.py,sha256=ee8i6aTpgyQIOUIS55sF0SO3_UbONpeAOQYNSEoHtao,3399\nnbconvert/filters/markdown_mistune.py,sha256=8Jtud0afO-zVxPLfGjmb6EzhxqrcDg6BjXxzJ4glOfE,18978\nnbconvert/filters/metadata.py,sha256=1qmhTm8J3DsasT9aMd31QnBFoJpGCBQd_kwTWi-Shps,477\nnbconvert/filters/pandoc.py,sha256=Fpayj_QLokAVs0rjB6Zau7t1lbCnVF6RZlZBs0ZHGdY,2867\nnbconvert/filters/strings.py,sha256=ESqpBAkvzUORgKy9SyGoW7qhfrj8lk-Zj0RqkyUS3zY,7536\nnbconvert/filters/widgetsdatatypefilter.py,sha256=fSj9EQNmAigkceq4s_XVfxj6QmF43y-2qjC99yqAU9E,2820\nnbconvert/nbconvertapp.py,sha256=1XkWM4-BBDOeq1Aadn0b_58siKWk0NsurQTD6bwtLkk,24790\nnbconvert/postprocessors/__init__.py,sha256=GiflccS-Lmvo_o37OuBNyI8-_lbLYOX5tBDMBeZdBQc,259\nnbconvert/postprocessors/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/postprocessors/__pycache__/base.cpython-313.pyc,,\nnbconvert/postprocessors/__pycache__/serve.cpython-313.pyc,,\nnbconvert/postprocessors/base.py,sha256=-9ABKhp0rE-ObpVcdXfSIpZ3_6NnGuPu4J1DqggDW8M,1154\nnbconvert/postprocessors/serve.py,sha256=qkl0zrGFQRxm7Wd-Tmb60l1YXsBaqU7-rU8G0KXTHJo,4366\nnbconvert/preprocessors/__init__.py,sha256=iX4cTIYG_J6tn-yj3OGHM7_LSTz2WBzXykaCLnBpXI0,1279\nnbconvert/preprocessors/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/base.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/clearmetadata.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/clearoutput.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/coalescestreams.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/convertfigures.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/csshtmlheader.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/execute.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/extractattachments.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/extractoutput.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/highlightmagics.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/latex.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/regexremove.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/sanitize.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/svg2pdf.cpython-313.pyc,,\nnbconvert/preprocessors/__pycache__/tagremove.cpython-313.pyc,,\nnbconvert/preprocessors/base.py,sha256=wshQOC5MuR6cJaY0bkXn14lOt3XekuPtjFL-KOHhzPM,2793\nnbconvert/preprocessors/clearmetadata.py,sha256=wUanwENDiXI0H5VMStyjPJQjGeCHpck6uDujOheJOcg,3764\nnbconvert/preprocessors/clearoutput.py,sha256=gWV9reOMX4JEkVegfiT0P_miUYaWcgZHQj285kffBUY,930\nnbconvert/preprocessors/coalescestreams.py,sha256=7mQUkEARedg6DXXaR-zk2lXyLza_YlDiK3pkBsWFKOk,1385\nnbconvert/preprocessors/convertfigures.py,sha256=CtEy69J9ommtfGWpJxkZ8rMVn0lz4uqgKcLVgRD0AA8,1539\nnbconvert/preprocessors/csshtmlheader.py,sha256=nLfFzEB-EiC1bPkHHNTVv95MUbSDdIA6PLKJdFEs1C8,3307\nnbconvert/preprocessors/execute.py,sha256=Fy1cO9aH-sBUOhOkRbJrqisnbLn78WrdG2u8KizRRDY,4604\nnbconvert/preprocessors/extractattachments.py,sha256=4y0iO3CL_FeW6CLOSJ0LgOkqwobvZzGbg6S9tGgGMns,4080\nnbconvert/preprocessors/extractoutput.py,sha256=AO-VlGvjOpu8KvDcDhLpQXLZBjKNMQ3Z_s3NrWo-ZIE,6445\nnbconvert/preprocessors/highlightmagics.py,sha256=2qZ2IdW3iI90KOL4Kxq4Jf_OrqP9LG6cdTIOoPoOtAI,3197\nnbconvert/preprocessors/latex.py,sha256=DoerhpNNqE2RGX0FtuQ_dZZ-DsUHHqbpTHyhDJqRQ3o,2735\nnbconvert/preprocessors/regexremove.py,sha256=ii981TTxIQyDVr2AjoElkXfBwdJJMt_SxbC5PBuFQOY,2498\nnbconvert/preprocessors/sanitize.py,sha256=e42iUy_bqLgve3i9oap6xTau0KLCvNGrCTUZC4J1E4U,5463\nnbconvert/preprocessors/svg2pdf.py,sha256=qcb0RHoABdnE2ksmneKlBqMDG4DS2rAHuU01hqO4sg0,5589\nnbconvert/preprocessors/tagremove.py,sha256=6yOetzRsUmYEkB7doSqRFB1z5bRwIYWLFsxUV3tTDQA,4731\nnbconvert/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nnbconvert/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nnbconvert/resources/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/templates/README.md,sha256=8UVl0AXs1TyntM12GaaBmAWPPb9nOprz1DOcqWvHAT4,307\nnbconvert/templates/skeleton/Makefile,sha256=MJUi6n1C3pYwlG_03vBVb8Kx5hYxQJtjjRnhr9CcC9M,682\nnbconvert/templates/skeleton/README.md,sha256=LuD-m3XXkK7AHYsIghkz3-5BBYMY2nQxyGtRstf0LcI,510\nnbconvert/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nnbconvert/utils/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/utils/__pycache__/_contextlib_chdir.cpython-313.pyc,,\nnbconvert/utils/__pycache__/base.cpython-313.pyc,,\nnbconvert/utils/__pycache__/exceptions.cpython-313.pyc,,\nnbconvert/utils/__pycache__/io.cpython-313.pyc,,\nnbconvert/utils/__pycache__/iso639_1.cpython-313.pyc,,\nnbconvert/utils/__pycache__/lexers.cpython-313.pyc,,\nnbconvert/utils/__pycache__/pandoc.cpython-313.pyc,,\nnbconvert/utils/__pycache__/text.cpython-313.pyc,,\nnbconvert/utils/__pycache__/version.cpython-313.pyc,,\nnbconvert/utils/_contextlib_chdir.py,sha256=4dI_9Pg468w_fGjdc8iq3BxK8PfRPAn3Uvagah9dw-o,609\nnbconvert/utils/base.py,sha256=UPoDpMkMJdXCMyr6bMenja7d8XFHRjG_622cgbi1bnw,1088\nnbconvert/utils/exceptions.py,sha256=9Rp0G1za4iwPvh6nodUEZcak6TrOYUktLeZFhWD6_RI,672\nnbconvert/utils/io.py,sha256=y5OIvfyp_MSyRSmjiNYeaMmw_TiKNlMEar9W5Y0aDV0,3177\nnbconvert/utils/iso639_1.py,sha256=I6M4iFbwv3lieLuZDi53XQ7_3evwgP9V-N_ehWMN5XY,1995\nnbconvert/utils/lexers.py,sha256=HaII0tT3u_Bk0Fxov6puL4HbOBqydQmn5Y5L6f-s4Ys,243\nnbconvert/utils/pandoc.py,sha256=F41vDlCibRCEpa_K9kafyNG5h2ieA36jLrgdg37_CAA,4580\nnbconvert/utils/text.py,sha256=F7tJvoHhWgyoc8mWvSVmYqJOflT03gCFA4Yvt5U_27k,1083\nnbconvert/utils/version.py,sha256=k1b0vZTfOH5qFYBc1PGxUz7KBbdfF4R60-rGQnIAwrc,951\nnbconvert/writers/__init__.py,sha256=2lRA2ReO1nIifY2aBkPWDWcqNgvxey45Y3dqk9ia3bY,124\nnbconvert/writers/__pycache__/__init__.cpython-313.pyc,,\nnbconvert/writers/__pycache__/base.cpython-313.pyc,,\nnbconvert/writers/__pycache__/debug.cpython-313.pyc,,\nnbconvert/writers/__pycache__/files.cpython-313.pyc,,\nnbconvert/writers/__pycache__/stdout.cpython-313.pyc,,\nnbconvert/writers/base.py,sha256=DdpzXCqMnyYlZxwgKnlh5qOTj_y5rAVzJ6aHLa6EMQE,1238\nnbconvert/writers/debug.py,sha256=FfOQzIbw_XLPE32O2Hv8bMFM49PpqfR7Sy4G-gf1EYc,1534\nnbconvert/writers/files.py,sha256=kBZ_tFyg3o8BsjnVACXCrK069xwAkbn3nvypUz8KX-o,6046\nnbconvert/writers/stdout.py,sha256=lCOHcb7pE6dTElmoWl-Fabg0zYdzXZ-hvmsEyxLQIDQ,538\n | .venv\Lib\site-packages\nbconvert-7.16.6.dist-info\RECORD | RECORD | Other | 17,605 | 0.7 | 0 | 0 | react-lib | 910 | 2023-08-27T10:20:01.800233 | MIT | false | 5c1748954e92c79576413d5aaa9712bc |
Wheel-Version: 1.0\nGenerator: hatchling 1.27.0\nRoot-Is-Purelib: true\nTag: py3-none-any\n | .venv\Lib\site-packages\nbconvert-7.16.6.dist-info\WHEEL | WHEEL | Other | 87 | 0.5 | 0 | 0 | awesome-app | 582 | 2025-05-03T23:37:31.188526 | Apache-2.0 | false | e2fcb0ad9ea59332c808928b4b439e7a |
BSD 3-Clause License\n\n- Copyright (c) 2001-2015, IPython Development Team\n- Copyright (c) 2015-, Jupyter Development Team\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n | .venv\Lib\site-packages\nbconvert-7.16.6.dist-info\licenses\LICENSE | LICENSE | Other | 1,588 | 0.7 | 0 | 0 | awesome-app | 599 | 2024-12-04T13:48:41.612078 | MIT | false | 083556a9912a35360dae8281fb57e886 |
"""API for converting notebooks between versions."""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nfrom . import versions\nfrom .reader import get_version\nfrom .validator import ValidationError\n\n\ndef convert(nb, to_version):\n """Convert a notebook node object to a specific version. Assumes that\n all the versions starting from 1 to the latest major X are implemented.\n In other words, there should never be a case where v1 v2 v3 v5 exist without\n a v4. Also assumes that all conversions can be made in one step increments\n between major versions and ignores minor revisions.\n\n Parameters\n ----------\n nb : NotebookNode\n to_version : int\n Major revision to convert the notebook to. Can either be an upgrade or\n a downgrade.\n\n Raises\n ------\n ValueError\n Notebook failed to convert.\n ValueError\n The version specified is invalid or doesn't exist.\n ValidationError\n Conversion failed due to missing expected attributes.\n """\n\n # Get input notebook version.\n (version, version_minor) = get_version(nb)\n\n # Check if destination is target version, if so return contents\n if version == to_version:\n return nb\n\n # If the version exist, try to convert to it one step at a time.\n if to_version in versions:\n # Get the the version that this recursion will convert to as a step\n # closer to the final revision. Make sure the newer of the conversion\n # functions is used to perform the conversion.\n if to_version > version:\n step_version = version + 1\n convert_function = versions[step_version].upgrade\n else:\n step_version = version - 1\n convert_function = versions[version].downgrade\n\n try:\n # Convert and make sure version changed during conversion.\n converted = convert_function(nb)\n if converted.get("nbformat", 1) == version:\n msg = "Failed to convert notebook from v%d to v%d." % (version, step_version)\n raise ValueError(msg)\n except AttributeError as e:\n msg = f"Notebook could not be converted from version {version} to version {step_version} because it's missing a key: {e}"\n raise ValidationError(msg) from None\n\n # Recursively convert until target version is reached.\n return convert(converted, to_version)\n raise ValueError(\n "Cannot convert notebook to v%d because that version doesn't exist" % (to_version)\n )\n | .venv\Lib\site-packages\nbformat\converter.py | converter.py | Python | 2,619 | 0.95 | 0.144928 | 0.172414 | vue-tools | 46 | 2024-01-06T11:07:50.677356 | MIT | false | 8d9814583dad41150bd659d07ad91b04 |
"""Deprecated API for working with notebooks\n\n- use nbformat for read/write/validate public API\n- use nbformat.vX directly for Python API for composing notebooks\n"""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nimport re\nimport warnings\n\nfrom traitlets.log import get_logger\n\nfrom nbformat import v3 as _v_latest\nfrom nbformat.v3 import (\n NotebookNode,\n nbformat,\n nbformat_minor,\n nbformat_schema,\n new_author,\n new_code_cell,\n new_heading_cell,\n new_metadata,\n new_notebook,\n new_output,\n new_text_cell,\n new_worksheet,\n parse_filename,\n to_notebook_json,\n)\n\nfrom . import versions\nfrom .converter import convert\nfrom .reader import reads as reader_reads\nfrom .validator import ValidationError, validate\n\nwarnings.warn(\n """nbformat.current is deprecated since before nbformat 3.0\n\n- use nbformat for read/write/validate public API\n- use nbformat.vX directly to composing notebooks of a particular version\n""",\n DeprecationWarning,\n stacklevel=2,\n)\n\n__all__ = [\n "NotebookNode",\n "new_code_cell",\n "new_text_cell",\n "new_notebook",\n "new_output",\n "new_worksheet",\n "parse_filename",\n "new_metadata",\n "new_author",\n "new_heading_cell",\n "nbformat",\n "nbformat_minor",\n "nbformat_schema",\n "to_notebook_json",\n "convert",\n "validate",\n "NBFormatError",\n "parse_py",\n "reads_json",\n "writes_json",\n "reads_py",\n "writes_py",\n "reads",\n "writes",\n "read",\n "write",\n]\n\ncurrent_nbformat = nbformat\ncurrent_nbformat_minor = nbformat_minor\ncurrent_nbformat_module = _v_latest.__name__\n\n\nclass NBFormatError(ValueError):\n """An error raised for an nbformat error."""\n\n\ndef _warn_format():\n warnings.warn(\n """Non-JSON file support in nbformat is deprecated since nbformat 1.0.\n Use nbconvert to create files of other formats.""",\n stacklevel=2,\n )\n\n\ndef parse_py(s, **kwargs):\n """Parse a string into a (nbformat, string) tuple."""\n nbf = current_nbformat\n nbm = current_nbformat_minor\n\n pattern = r"# <nbformat>(?P<nbformat>\d+[\.\d+]*)</nbformat>"\n m = re.search(pattern, s)\n if m is not None:\n digits = m.group("nbformat").split(".")\n nbf = int(digits[0])\n if len(digits) > 1:\n nbm = int(digits[1])\n\n return nbf, nbm, s\n\n\ndef reads_json(nbjson, **kwargs):\n """DEPRECATED, use reads"""\n warnings.warn(\n "reads_json is deprecated since nbformat 3.0, use reads",\n DeprecationWarning,\n stacklevel=2,\n )\n return reads(nbjson)\n\n\ndef writes_json(nb, **kwargs):\n """DEPRECATED, use writes"""\n warnings.warn(\n "writes_json is deprecated since nbformat 3.0, use writes",\n DeprecationWarning,\n stacklevel=2,\n )\n return writes(nb, **kwargs)\n\n\ndef reads_py(s, **kwargs):\n """DEPRECATED: use nbconvert"""\n _warn_format()\n nbf, nbm, s = parse_py(s, **kwargs)\n if nbf in (2, 3):\n nb = versions[nbf].to_notebook_py(s, **kwargs)\n else:\n raise NBFormatError("Unsupported PY nbformat version: %i" % nbf)\n return nb\n\n\ndef writes_py(nb, **kwargs):\n """DEPRECATED: use nbconvert"""\n _warn_format()\n return versions[3].writes_py(nb, **kwargs)\n\n\n# High level API\n\n\ndef reads(s, format="DEPRECATED", version=current_nbformat, **kwargs):\n """Read a notebook from a string and return the NotebookNode object.\n\n This function properly handles notebooks of any version. The notebook\n returned will always be in the current version's format.\n\n Parameters\n ----------\n s : unicode\n The raw unicode string to read the notebook from.\n\n Returns\n -------\n nb : NotebookNode\n The notebook that was read.\n """\n if format not in {"DEPRECATED", "json"}:\n _warn_format()\n nb = reader_reads(s, **kwargs)\n nb = convert(nb, version)\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore", category=DeprecationWarning)\n validate(nb, repair_duplicate_cell_ids=False)\n except ValidationError as e:\n get_logger().error("Notebook JSON is invalid: %s", e)\n return nb\n\n\ndef writes(nb, format="DEPRECATED", version=current_nbformat, **kwargs):\n """Write a notebook to a string in a given format in the current nbformat version.\n\n This function always writes the notebook in the current nbformat version.\n\n Parameters\n ----------\n nb : NotebookNode\n The notebook to write.\n version : int\n The nbformat version to write.\n Used for downgrading notebooks.\n\n Returns\n -------\n s : unicode\n The notebook string.\n """\n if format not in {"DEPRECATED", "json"}:\n _warn_format()\n nb = convert(nb, version)\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore", category=DeprecationWarning)\n validate(nb, repair_duplicate_cell_ids=False)\n except ValidationError as e:\n get_logger().error("Notebook JSON is invalid: %s", e)\n return versions[version].writes_json(nb, **kwargs)\n\n\ndef read(fp, format="DEPRECATED", **kwargs):\n """Read a notebook from a file and return the NotebookNode object.\n\n This function properly handles notebooks of any version. The notebook\n returned will always be in the current version's format.\n\n Parameters\n ----------\n fp : file\n Any file-like object with a read method.\n\n Returns\n -------\n nb : NotebookNode\n The notebook that was read.\n """\n return reads(fp.read(), **kwargs)\n\n\ndef write(nb, fp, format="DEPRECATED", **kwargs):\n """Write a notebook to a file in a given format in the current nbformat version.\n\n This function always writes the notebook in the current nbformat version.\n\n Parameters\n ----------\n nb : NotebookNode\n The notebook to write.\n fp : file\n Any file-like object with a write method.\n """\n s = writes(nb, **kwargs)\n if isinstance(s, bytes):\n s = s.decode("utf8")\n return fp.write(s)\n | .venv\Lib\site-packages\nbformat\current.py | current.py | Python | 6,137 | 0.95 | 0.122951 | 0.015228 | react-lib | 988 | 2024-08-08T07:35:27.879748 | Apache-2.0 | false | 49bc0cf535b1540c2844395c0d32f82e |
"""\nCommon validator wrapper to provide a uniform usage of other schema validation\nlibraries.\n"""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nimport os\n\nimport fastjsonschema\nimport jsonschema\nfrom fastjsonschema import JsonSchemaException as _JsonSchemaException\nfrom jsonschema import Draft4Validator as _JsonSchemaValidator\nfrom jsonschema.exceptions import ErrorTree, ValidationError\n\n__all__ = [\n "ValidationError",\n "JsonSchemaValidator",\n "FastJsonSchemaValidator",\n "get_current_validator",\n "VALIDATORS",\n]\n\n\nclass JsonSchemaValidator:\n """A json schema validator."""\n\n name = "jsonschema"\n\n def __init__(self, schema):\n """Initialize the validator."""\n self._schema = schema\n self._default_validator = _JsonSchemaValidator(schema) # Default\n self._validator = self._default_validator\n\n def validate(self, data):\n """Validate incoming data."""\n self._default_validator.validate(data)\n\n def iter_errors(self, data, schema=None):\n """Iterate over errors in incoming data."""\n if schema is None:\n return self._default_validator.iter_errors(data)\n if hasattr(self._default_validator, "evolve"):\n return self._default_validator.evolve(schema=schema).iter_errors(data)\n return self._default_validator.iter_errors(data, schema)\n\n def error_tree(self, errors):\n """Create an error tree for the errors."""\n return ErrorTree(errors=errors)\n\n\nclass FastJsonSchemaValidator(JsonSchemaValidator):\n """A schema validator using fastjsonschema."""\n\n name = "fastjsonschema"\n\n def __init__(self, schema):\n """Initialize the validator."""\n super().__init__(schema)\n self._validator = fastjsonschema.compile(schema)\n\n def validate(self, data):\n """Validate incoming data."""\n try:\n self._validator(data)\n except _JsonSchemaException as error:\n raise ValidationError(str(error), schema_path=error.path) from error\n\n def iter_errors(self, data, schema=None):\n """Iterate over errors in incoming data."""\n if schema is not None:\n return super().iter_errors(data, schema)\n\n errors = []\n validate_func = self._validator\n try:\n validate_func(data)\n except _JsonSchemaException as error:\n errors = [ValidationError(str(error), schema_path=error.path)]\n\n return errors\n\n def error_tree(self, errors):\n """Create an error tree for the errors."""\n # fastjsonschema's exceptions don't contain the same information that the jsonschema ValidationErrors\n # do. This method is primarily used for introspecting metadata schema failures so that we can strip\n # them if asked to do so in `nbformat.validate`.\n # Another way forward for compatibility: we could distill both validator errors into a custom collection\n # for this data. Since implementation details of ValidationError is used elsewhere, we would probably\n # just use this data for schema introspection.\n msg = "JSON schema error introspection not enabled for fastjsonschema"\n raise NotImplementedError(msg)\n\n\n_VALIDATOR_MAP = [\n ("fastjsonschema", fastjsonschema, FastJsonSchemaValidator),\n ("jsonschema", jsonschema, JsonSchemaValidator),\n]\nVALIDATORS = [item[0] for item in _VALIDATOR_MAP]\n\n\ndef _validator_for_name(validator_name):\n if validator_name not in VALIDATORS:\n msg = f"Invalid validator '{validator_name}' value!\nValid values are: {VALIDATORS}"\n raise ValueError(msg)\n\n for name, module, validator_cls in _VALIDATOR_MAP:\n if module and validator_name == name:\n return validator_cls\n # we always return something.\n msg = f"Missing validator for {validator_name!r}"\n raise ValueError(msg)\n\n\ndef get_current_validator():\n """\n Return the default validator based on the value of an environment variable.\n """\n validator_name = os.environ.get("NBFORMAT_VALIDATOR", "fastjsonschema")\n return _validator_for_name(validator_name)\n | .venv\Lib\site-packages\nbformat\json_compat.py | json_compat.py | Python | 4,200 | 0.95 | 0.243902 | 0.09375 | vue-tools | 288 | 2025-05-15T18:23:00.583150 | Apache-2.0 | false | 07dae484aea15c947d9083fce8150e03 |
"""NotebookNode - adding attribute access to dicts"""\n\nfrom __future__ import annotations\n\nfrom collections.abc import Mapping\n\nfrom ._struct import Struct\n\n\nclass NotebookNode(Struct):\n """A dict-like node with attribute-access"""\n\n def __setitem__(self, key, value):\n """Set an item on the notebook."""\n if isinstance(value, Mapping) and not isinstance(value, NotebookNode):\n value = from_dict(value)\n super().__setitem__(key, value)\n\n def update(self, *args, **kwargs):\n """\n A dict-like update method based on CPython's MutableMapping `update`\n method.\n """\n if len(args) > 1:\n raise TypeError("update expected at most 1 arguments, got %d" % len(args))\n if args:\n other = args[0]\n if isinstance(other, Mapping): # noqa: SIM114\n for key in other:\n self[key] = other[key]\n elif hasattr(other, "keys"):\n for key in other:\n self[key] = other[key]\n else:\n for key, value in other:\n self[key] = value\n for key, value in kwargs.items():\n self[key] = value\n\n\ndef from_dict(d):\n """Convert dict to dict-like NotebookNode\n\n Recursively converts any dict in the container to a NotebookNode.\n This does not check that the contents of the dictionary make a valid\n notebook or part of a notebook.\n """\n if isinstance(d, dict):\n return NotebookNode({k: from_dict(v) for k, v in d.items()})\n if isinstance(d, (tuple, list)):\n return [from_dict(i) for i in d]\n return d\n | .venv\Lib\site-packages\nbformat\notebooknode.py | notebooknode.py | Python | 1,654 | 0.95 | 0.307692 | 0 | awesome-app | 7 | 2024-05-24T13:07:21.369527 | GPL-3.0 | false | ce5dbde059e97394e12777c639ae79a0 |
"""API for reading notebooks of different versions"""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nimport json\n\nfrom .validator import ValidationError\n\n\nclass NotJSONError(ValueError):\n """An error raised when an object is not valid JSON."""\n\n\ndef parse_json(s, **kwargs):\n """Parse a JSON string into a dict."""\n try:\n nb_dict = json.loads(s, **kwargs)\n except ValueError as e:\n message = f"Notebook does not appear to be JSON: {s!r}"\n # Limit the error message to 80 characters. Display whatever JSON will fit.\n if len(message) > 80:\n message = message[:77] + "..."\n raise NotJSONError(message) from e\n return nb_dict\n\n\n# High level API\n\n\ndef get_version(nb):\n """Get the version of a notebook.\n\n Parameters\n ----------\n nb : dict\n NotebookNode or dict containing notebook data.\n\n Returns\n -------\n Tuple containing major (int) and minor (int) version numbers\n """\n major = nb.get("nbformat", 1)\n minor = nb.get("nbformat_minor", 0)\n return (major, minor)\n\n\ndef reads(s, **kwargs):\n """Read a notebook from a json string and return the\n NotebookNode object.\n\n This function properly reads notebooks of any version. No version\n conversion is performed.\n\n Parameters\n ----------\n s : unicode | bytes\n The raw string or bytes object to read the notebook from.\n\n Returns\n -------\n nb : NotebookNode\n The notebook that was read.\n\n Raises\n ------\n ValidationError\n Notebook JSON for a given version is missing an expected key and cannot be read.\n NBFormatError\n Specified major version is invalid or unsupported.\n """\n from . import NBFormatError, versions\n\n nb_dict = parse_json(s, **kwargs)\n (major, minor) = get_version(nb_dict)\n if major in versions:\n try:\n return versions[major].to_notebook_json(nb_dict, minor=minor)\n except AttributeError as e:\n msg = f"The notebook is invalid and is missing an expected key: {e}"\n raise ValidationError(msg) from None\n else:\n raise NBFormatError("Unsupported nbformat version %s" % major)\n\n\ndef read(fp, **kwargs):\n """Read a notebook from a file and return the NotebookNode object.\n\n This function properly reads notebooks of any version. No version\n conversion is performed.\n\n Parameters\n ----------\n fp : file\n Any file-like object with a read method.\n\n Returns\n -------\n nb : NotebookNode\n The notebook that was read.\n """\n return reads(fp.read(), **kwargs)\n | .venv\Lib\site-packages\nbformat\reader.py | reader.py | Python | 2,687 | 0.95 | 0.126214 | 0.051282 | awesome-app | 116 | 2025-02-06T05:35:33.770931 | Apache-2.0 | false | 5e77c3ae5878304cc6b15eaf9f951abe |
"""Sentinel class for constants with useful reprs"""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\n\nclass Sentinel:\n """Sentinel class for constants with useful reprs"""\n\n def __init__(self, name, module, docstring=None):\n """Initialize the sentinel."""\n self.name = name\n self.module = module\n if docstring:\n self.__doc__ = docstring\n\n def __repr__(self):\n """The string repr for the sentinel."""\n return str(self.module) + "." + self.name\n | .venv\Lib\site-packages\nbformat\sentinel.py | sentinel.py | Python | 595 | 0.95 | 0.45 | 0.133333 | awesome-app | 321 | 2024-01-31T19:36:41.912044 | MIT | false | 5c7966d0523b4e509984176b0376c282 |
"""Utilities for signing notebooks"""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nimport hashlib\nimport os\nimport sys\nimport typing as t\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom datetime import datetime, timezone\nfrom hmac import HMAC\nfrom pathlib import Path\n\ntry:\n import sqlite3\n\n # Use adapters recommended by Python 3.12 stdlib docs.\n # https://docs.python.org/3.12/library/sqlite3.html#default-adapters-and-converters-deprecated\n def adapt_datetime_iso(val):\n """Adapt datetime.datetime to timezone-naive ISO 8601 date."""\n return val.isoformat()\n\n def convert_datetime(val):\n """Convert ISO 8601 datetime to datetime.datetime object."""\n return datetime.fromisoformat(val.decode())\n\n sqlite3.register_adapter(datetime, adapt_datetime_iso)\n sqlite3.register_converter("datetime", convert_datetime)\nexcept ImportError:\n try:\n from pysqlite2 import dbapi2 as sqlite3 # type:ignore[no-redef]\n except ImportError:\n sqlite3 = None # type:ignore[assignment]\n\nfrom base64 import encodebytes\n\nfrom jupyter_core.application import JupyterApp, base_flags\nfrom traitlets import Any, Bool, Bytes, Callable, Enum, Instance, Integer, Unicode, default, observe\nfrom traitlets.config import LoggingConfigurable, MultipleInstanceError\n\nfrom . import NO_CONVERT, __version__, read, reads\n\nalgorithms_set = hashlib.algorithms_guaranteed\n# The shake algorithms in are not compatible with hmac\n# due to required length argument in digests\nalgorithms = [a for a in algorithms_set if not a.startswith("shake_")]\n\n\nclass SignatureStore:\n """Base class for a signature store."""\n\n def store_signature(self, digest, algorithm):\n """Implement in subclass to store a signature.\n\n Should not raise if the signature is already stored.\n """\n raise NotImplementedError\n\n def check_signature(self, digest, algorithm):\n """Implement in subclass to check if a signature is known.\n\n Return True for a known signature, False for unknown.\n """\n raise NotImplementedError\n\n def remove_signature(self, digest, algorithm):\n """Implement in subclass to delete a signature.\n\n Should not raise if the signature is not stored.\n """\n raise NotImplementedError\n\n def close(self):\n """Close any open connections this store may use.\n\n If the store maintains any open connections (e.g. to a database),\n they should be closed.\n """\n\n\nclass MemorySignatureStore(SignatureStore):\n """Non-persistent storage of signatures in memory."""\n\n cache_size = 65535\n\n def __init__(self):\n """Initialize a memory signature store."""\n # We really only want an ordered set, but the stdlib has OrderedDict,\n # and it's easy to use a dict as a set.\n self.data = OrderedDict()\n\n def store_signature(self, digest, algorithm):\n """Store a signature."""\n key = (digest, algorithm)\n # Pop it so it goes to the end when we reinsert it\n self.data.pop(key, None)\n self.data[key] = None\n\n self._maybe_cull()\n\n def _maybe_cull(self):\n """If more than cache_size signatures are stored, delete the oldest 25%"""\n if len(self.data) < self.cache_size:\n return\n\n for _ in range(len(self.data) // 4):\n self.data.popitem(last=False)\n\n def check_signature(self, digest, algorithm):\n """Check a signature."""\n key = (digest, algorithm)\n if key in self.data:\n # Move it to the end (.move_to_end() method is new in Py3)\n del self.data[key]\n self.data[key] = None\n return True\n return False\n\n def remove_signature(self, digest, algorithm):\n """Remove a signature."""\n self.data.pop((digest, algorithm), None)\n\n\nclass SQLiteSignatureStore(SignatureStore, LoggingConfigurable):\n """Store signatures in an SQLite database."""\n\n # 64k entries ~ 12MB\n cache_size = Integer(\n 65535,\n help="""The number of notebook signatures to cache.\n When the number of signatures exceeds this value,\n the oldest 25% of signatures will be culled.\n """,\n ).tag(config=True)\n\n def __init__(self, db_file, **kwargs):\n """Initialize a sql signature store."""\n super().__init__(**kwargs)\n self.db_file = db_file\n self.db = self._connect_db(db_file)\n\n def close(self):\n """Close the db."""\n if self.db is not None:\n self.db.close()\n\n def _connect_db(self, db_file):\n kwargs: dict[str, t.Any] = {\n "detect_types": sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES\n }\n db = None\n try:\n db = sqlite3.connect(db_file, **kwargs)\n self.init_db(db)\n except (sqlite3.DatabaseError, sqlite3.OperationalError):\n if db_file != ":memory:":\n old_db_location = db_file + ".bak"\n if db is not None:\n db.close()\n self.log.warning(\n (\n "The signatures database cannot be opened; maybe it is corrupted or encrypted. "\n "You may need to rerun your notebooks to ensure that they are trusted to run Javascript. "\n "The old signatures database has been renamed to %s and a new one has been created."\n ),\n old_db_location,\n )\n try:\n Path(db_file).rename(old_db_location)\n db = sqlite3.connect(db_file, **kwargs)\n self.init_db(db)\n except (sqlite3.DatabaseError, sqlite3.OperationalError, OSError):\n if db is not None:\n db.close()\n self.log.warning(\n "Failed committing signatures database to disk. "\n "You may need to move the database file to a non-networked file system, "\n "using config option `NotebookNotary.db_file`. "\n "Using in-memory signatures database for the remainder of this session."\n )\n self.db_file = ":memory:"\n db = sqlite3.connect(":memory:", **kwargs)\n self.init_db(db)\n else:\n raise\n return db\n\n def init_db(self, db):\n """Initialize the db."""\n db.execute(\n """\n CREATE TABLE IF NOT EXISTS nbsignatures\n (\n id integer PRIMARY KEY AUTOINCREMENT,\n algorithm text,\n signature text,\n path text,\n last_seen timestamp\n )"""\n )\n db.execute(\n """\n CREATE INDEX IF NOT EXISTS algosig ON nbsignatures(algorithm, signature)\n """\n )\n db.commit()\n\n def store_signature(self, digest, algorithm):\n """Store a signature in the db."""\n if self.db is None:\n return\n if not self.check_signature(digest, algorithm):\n self.db.execute(\n """\n INSERT INTO nbsignatures (algorithm, signature, last_seen)\n VALUES (?, ?, ?)\n """,\n (algorithm, digest, datetime.now(tz=timezone.utc)),\n )\n else:\n self.db.execute(\n """UPDATE nbsignatures SET last_seen = ? WHERE\n algorithm = ? AND\n signature = ?;\n """,\n (datetime.now(tz=timezone.utc), algorithm, digest),\n )\n self.db.commit()\n\n # Check size and cull old entries if necessary\n (n,) = self.db.execute("SELECT Count(*) FROM nbsignatures").fetchone()\n if n > self.cache_size:\n self.cull_db()\n\n def check_signature(self, digest, algorithm):\n """Check a signature against the db."""\n if self.db is None:\n return False\n r = self.db.execute(\n """SELECT id FROM nbsignatures WHERE\n algorithm = ? AND\n signature = ?;\n """,\n (algorithm, digest),\n ).fetchone()\n if r is None:\n return False\n self.db.execute(\n """UPDATE nbsignatures SET last_seen = ? WHERE\n algorithm = ? AND\n signature = ?;\n """,\n (datetime.now(tz=timezone.utc), algorithm, digest),\n )\n self.db.commit()\n return True\n\n def remove_signature(self, digest, algorithm):\n """Remove a signature from the db."""\n self.db.execute(\n """DELETE FROM nbsignatures WHERE\n algorithm = ? AND\n signature = ?;\n """,\n (algorithm, digest),\n )\n\n self.db.commit()\n\n def cull_db(self):\n """Cull oldest 25% of the trusted signatures when the size limit is reached"""\n self.db.execute(\n """DELETE FROM nbsignatures WHERE id IN (\n SELECT id FROM nbsignatures ORDER BY last_seen DESC LIMIT -1 OFFSET ?\n );\n """,\n (max(int(0.75 * self.cache_size), 1),),\n )\n\n\ndef yield_everything(obj):\n """Yield every item in a container as bytes\n\n Allows any JSONable object to be passed to an HMAC digester\n without having to serialize the whole thing.\n """\n if isinstance(obj, dict):\n for key in sorted(obj):\n value = obj[key]\n assert isinstance(key, str)\n yield key.encode()\n yield from yield_everything(value)\n elif isinstance(obj, (list, tuple)):\n for element in obj:\n yield from yield_everything(element)\n elif isinstance(obj, str):\n yield obj.encode("utf8")\n else:\n yield str(obj).encode("utf8")\n\n\ndef yield_code_cells(nb):\n """Iterator that yields all cells in a notebook\n\n nbformat version independent\n """\n if nb.nbformat >= 4:\n for cell in nb["cells"]:\n if cell["cell_type"] == "code":\n yield cell\n elif nb.nbformat == 3:\n for ws in nb["worksheets"]:\n for cell in ws["cells"]:\n if cell["cell_type"] == "code":\n yield cell\n\n\n@contextmanager\ndef signature_removed(nb):\n """Context manager for operating on a notebook with its signature removed\n\n Used for excluding the previous signature when computing a notebook's signature.\n """\n save_signature = nb["metadata"].pop("signature", None)\n try:\n yield\n finally:\n if save_signature is not None:\n nb["metadata"]["signature"] = save_signature\n\n\nclass NotebookNotary(LoggingConfigurable):\n """A class for computing and verifying notebook signatures."""\n\n data_dir = Unicode(help="""The storage directory for notary secret and database.""").tag(\n config=True\n )\n\n @default("data_dir")\n def _data_dir_default(self):\n app = None\n try:\n if JupyterApp.initialized():\n app = JupyterApp.instance()\n except MultipleInstanceError:\n pass\n if app is None:\n # create an app, without the global instance\n app = JupyterApp()\n app.initialize(argv=[])\n return app.data_dir\n\n store_factory = Callable(\n help="""A callable returning the storage backend for notebook signatures.\n The default uses an SQLite database."""\n ).tag(config=True)\n\n @default("store_factory")\n def _store_factory_default(self):\n def factory():\n if sqlite3 is None:\n self.log.warning( # type:ignore[unreachable]\n "Missing SQLite3, all notebooks will be untrusted!"\n )\n return MemorySignatureStore()\n return SQLiteSignatureStore(self.db_file)\n\n return factory\n\n db_file = Unicode(\n help="""The sqlite file in which to store notebook signatures.\n By default, this will be in your Jupyter data directory.\n You can set it to ':memory:' to disable sqlite writing to the filesystem.\n """\n ).tag(config=True)\n\n @default("db_file")\n def _db_file_default(self):\n if not self.data_dir:\n return ":memory:"\n return str(Path(self.data_dir) / "nbsignatures.db")\n\n algorithm = Enum(\n algorithms,\n default_value="sha256",\n help="""The hashing algorithm used to sign notebooks.""",\n ).tag(config=True)\n\n @observe("algorithm")\n def _algorithm_changed(self, change):\n self.digestmod = getattr(hashlib, change["new"])\n\n digestmod = Any()\n\n @default("digestmod")\n def _digestmod_default(self):\n return getattr(hashlib, self.algorithm)\n\n secret_file = Unicode(help="""The file where the secret key is stored.""").tag(config=True)\n\n @default("secret_file")\n def _secret_file_default(self):\n if not self.data_dir:\n return ""\n return str(Path(self.data_dir) / "notebook_secret")\n\n secret = Bytes(help="""The secret key with which notebooks are signed.""").tag(config=True)\n\n @default("secret")\n def _secret_default(self):\n # note : this assumes an Application is running\n if Path(self.secret_file).exists():\n with Path(self.secret_file).open("rb") as f:\n return f.read()\n else:\n secret = encodebytes(os.urandom(1024))\n self._write_secret_file(secret)\n return secret\n\n def __init__(self, **kwargs):\n """Initialize the notary."""\n super().__init__(**kwargs)\n self.store = self.store_factory()\n\n def _write_secret_file(self, secret):\n """write my secret to my secret_file"""\n self.log.info("Writing notebook-signing key to %s", self.secret_file)\n with Path(self.secret_file).open("wb") as f:\n f.write(secret)\n try:\n Path(self.secret_file).chmod(0o600)\n except OSError:\n self.log.warning("Could not set permissions on %s", self.secret_file)\n return secret\n\n def compute_signature(self, nb):\n """Compute a notebook's signature\n\n by hashing the entire contents of the notebook via HMAC digest.\n """\n hmac = HMAC(self.secret, digestmod=self.digestmod)\n # don't include the previous hash in the content to hash\n with signature_removed(nb):\n # sign the whole thing\n for b in yield_everything(nb):\n hmac.update(b)\n\n return hmac.hexdigest()\n\n def check_signature(self, nb):\n """Check a notebook's stored signature\n\n If a signature is stored in the notebook's metadata,\n a new signature is computed and compared with the stored value.\n\n Returns True if the signature is found and matches, False otherwise.\n\n The following conditions must all be met for a notebook to be trusted:\n - a signature is stored in the form 'scheme:hexdigest'\n - the stored scheme matches the requested scheme\n - the requested scheme is available from hashlib\n - the computed hash from notebook_signature matches the stored hash\n """\n if nb.nbformat < 3:\n return False\n signature = self.compute_signature(nb)\n return self.store.check_signature(signature, self.algorithm)\n\n def sign(self, nb):\n """Sign a notebook, indicating that its output is trusted on this machine\n\n Stores hash algorithm and hmac digest in a local database of trusted notebooks.\n """\n if nb.nbformat < 3:\n return\n signature = self.compute_signature(nb)\n self.store.store_signature(signature, self.algorithm)\n\n def unsign(self, nb):\n """Ensure that a notebook is untrusted\n\n by removing its signature from the trusted database, if present.\n """\n signature = self.compute_signature(nb)\n self.store.remove_signature(signature, self.algorithm)\n\n def mark_cells(self, nb, trusted):\n """Mark cells as trusted if the notebook's signature can be verified\n\n Sets ``cell.metadata.trusted = True | False`` on all code cells,\n depending on the *trusted* parameter. This will typically be the return\n value from ``self.check_signature(nb)``.\n\n This function is the inverse of check_cells\n """\n if nb.nbformat < 3:\n return\n\n for cell in yield_code_cells(nb):\n cell["metadata"]["trusted"] = trusted\n\n def _check_cell(self, cell, nbformat_version):\n """Do we trust an individual cell?\n\n Return True if:\n\n - cell is explicitly trusted\n - cell has no potentially unsafe rich output\n\n If a cell has no output, or only simple print statements,\n it will always be trusted.\n """\n # explicitly trusted\n if cell["metadata"].pop("trusted", False):\n return True\n\n # explicitly safe output\n if nbformat_version >= 4:\n unsafe_output_types = ["execute_result", "display_data"]\n safe_keys = {"output_type", "execution_count", "metadata"}\n else: # v3\n unsafe_output_types = ["pyout", "display_data"]\n safe_keys = {"output_type", "prompt_number", "metadata"}\n\n for output in cell["outputs"]:\n output_type = output["output_type"]\n if output_type in unsafe_output_types:\n # if there are any data keys not in the safe whitelist\n output_keys = set(output)\n if output_keys.difference(safe_keys):\n return False\n\n return True\n\n def check_cells(self, nb):\n """Return whether all code cells are trusted.\n\n A cell is trusted if the 'trusted' field in its metadata is truthy, or\n if it has no potentially unsafe outputs.\n If there are no code cells, return True.\n\n This function is the inverse of mark_cells.\n """\n if nb.nbformat < 3:\n return False\n trusted = True\n for cell in yield_code_cells(nb):\n # only distrust a cell if it actually has some output to distrust\n if not self._check_cell(cell, nb.nbformat):\n trusted = False\n\n return trusted\n\n\ntrust_flags: dict[str, t.Any] = {\n "reset": (\n {"TrustNotebookApp": {"reset": True}},\n """Delete the trusted notebook cache.\n All previously signed notebooks will become untrusted.\n """,\n ),\n}\ntrust_flags.update(base_flags)\n\n\nclass TrustNotebookApp(JupyterApp):\n """An application for handling notebook trust."""\n\n version = __version__\n description = """Sign one or more Jupyter notebooks with your key,\n to trust their dynamic (HTML, Javascript) output.\n\n Otherwise, you will have to re-execute the notebook to see output.\n """\n # This command line tool should use the same config file as the notebook\n\n @default("config_file_name")\n def _config_file_name_default(self):\n return "jupyter_notebook_config"\n\n examples = """\n jupyter trust mynotebook.ipynb and_this_one.ipynb\n """\n\n flags = trust_flags\n\n reset = Bool(\n False,\n help="""If True, delete the trusted signature cache.\n After reset, all previously signed notebooks will become untrusted.\n """,\n ).tag(config=True)\n\n notary = Instance(NotebookNotary)\n\n @default("notary")\n def _notary_default(self):\n return NotebookNotary(parent=self, data_dir=self.data_dir)\n\n def sign_notebook_file(self, notebook_path):\n """Sign a notebook from the filesystem"""\n if not Path(notebook_path).exists():\n self.log.error("Notebook missing: %s", notebook_path)\n self.exit(1)\n with Path(notebook_path).open(encoding="utf8") as f:\n nb = read(f, NO_CONVERT)\n self.sign_notebook(nb, notebook_path)\n\n def sign_notebook(self, nb, notebook_path="<stdin>"):\n """Sign a notebook that's been loaded"""\n if self.notary.check_signature(nb):\n print("Notebook already signed: %s" % notebook_path) # noqa: T201\n else:\n print("Signing notebook: %s" % notebook_path) # noqa: T201\n self.notary.sign(nb)\n\n def generate_new_key(self):\n """Generate a new notebook signature key"""\n print("Generating new notebook key: %s" % self.notary.secret_file) # noqa: T201\n self.notary._write_secret_file(os.urandom(1024))\n\n def start(self):\n """Start the trust notebook app."""\n if self.reset:\n if Path(self.notary.db_file).exists():\n print("Removing trusted signature cache: %s" % self.notary.db_file) # noqa: T201\n Path(self.notary.db_file).unlink()\n self.generate_new_key()\n return\n if not self.extra_args:\n self.log.debug("Reading notebook from stdin")\n nb_s = sys.stdin.read()\n assert isinstance(nb_s, str)\n nb = reads(nb_s, NO_CONVERT)\n self.sign_notebook(nb, "<stdin>")\n else:\n for notebook_path in self.extra_args:\n self.sign_notebook_file(notebook_path)\n\n\nmain = TrustNotebookApp.launch_instance\n\nif __name__ == "__main__":\n main()\n | .venv\Lib\site-packages\nbformat\sign.py | sign.py | Python | 21,566 | 0.95 | 0.208655 | 0.039548 | awesome-app | 900 | 2025-03-08T03:16:01.662854 | GPL-3.0 | false | 87f617a870a5db957ba455a0ddb6ad0b |
"""Notebook format validators."""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nimport json\nimport pprint\nimport warnings\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom typing import Any, Optional\n\nfrom ._imports import import_item\nfrom .corpus.words import generate_corpus_id\nfrom .json_compat import ValidationError, _validator_for_name, get_current_validator\nfrom .reader import get_version\nfrom .warnings import DuplicateCellId, MissingIDFieldWarning\n\nvalidators = {}\n_deprecated = object()\n\n\n__all__ = [\n "ValidationError",\n "get_validator",\n "isvalid",\n "NotebookValidationError",\n "better_validation_error",\n "normalize",\n "validate",\n "iter_validate",\n]\n\n\ndef _relax_additional_properties(obj):\n """relax any `additionalProperties`"""\n if isinstance(obj, dict):\n for key, value in obj.items():\n value = ( # noqa: PLW2901\n True if key == "additionalProperties" else _relax_additional_properties(value)\n )\n obj[key] = value\n elif isinstance(obj, list):\n for i, value in enumerate(obj):\n obj[i] = _relax_additional_properties(value)\n return obj\n\n\ndef _allow_undefined(schema):\n schema["definitions"]["cell"]["oneOf"].append({"$ref": "#/definitions/unrecognized_cell"})\n schema["definitions"]["output"]["oneOf"].append({"$ref": "#/definitions/unrecognized_output"})\n return schema\n\n\ndef get_validator(version=None, version_minor=None, relax_add_props=False, name=None):\n """Load the JSON schema into a Validator"""\n if version is None:\n from . import current_nbformat\n\n version = current_nbformat\n\n v = import_item("nbformat.v%s" % version)\n current_minor = getattr(v, "nbformat_minor", 0)\n if version_minor is None:\n version_minor = current_minor\n\n current_validator = _validator_for_name(name) if name else get_current_validator()\n\n version_tuple = (current_validator.name, version, version_minor)\n\n if version_tuple not in validators:\n try:\n schema_json = _get_schema_json(v, version=version, version_minor=version_minor)\n except AttributeError:\n return None\n\n if current_minor < version_minor:\n # notebook from the future, relax all `additionalProperties: False` requirements\n schema_json = _relax_additional_properties(schema_json)\n # and allow undefined cell types and outputs\n schema_json = _allow_undefined(schema_json)\n\n validators[version_tuple] = current_validator(schema_json)\n\n if relax_add_props:\n try:\n schema_json = _get_schema_json(v, version=version, version_minor=version_minor)\n except AttributeError:\n return None\n\n # this allows properties to be added for intermediate\n # representations while validating for all other kinds of errors\n schema_json = _relax_additional_properties(schema_json)\n validators[version_tuple] = current_validator(schema_json)\n\n return validators[version_tuple]\n\n\ndef _get_schema_json(v, version=None, version_minor=None):\n """\n Gets the json schema from a given imported library and nbformat version.\n """\n if (version, version_minor) in v.nbformat_schema:\n schema_path = str(Path(v.__file__).parent / v.nbformat_schema[(version, version_minor)])\n elif version_minor > v.nbformat_minor:\n # load the latest schema\n schema_path = str(Path(v.__file__).parent / v.nbformat_schema[(None, None)])\n else:\n msg = "Cannot find appropriate nbformat schema file."\n raise AttributeError(msg)\n with Path(schema_path).open(encoding="utf8") as f:\n schema_json = json.load(f)\n return schema_json # noqa: RET504\n\n\ndef isvalid(nbjson, ref=None, version=None, version_minor=None):\n """Checks whether the given notebook JSON conforms to the current\n notebook format schema. Returns True if the JSON is valid, and\n False otherwise.\n\n To see the individual errors that were encountered, please use the\n `validate` function instead.\n """\n orig = deepcopy(nbjson)\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore", category=DeprecationWarning)\n warnings.filterwarnings("ignore", category=MissingIDFieldWarning)\n validate(nbjson, ref, version, version_minor, repair_duplicate_cell_ids=False)\n except ValidationError:\n return False\n else:\n return True\n finally:\n if nbjson != orig:\n raise AssertionError\n\n\ndef _format_as_index(indices):\n """\n (from jsonschema._utils.format_as_index, copied to avoid relying on private API)\n\n Construct a single string containing indexing operations for the indices.\n\n For example, [1, 2, "foo"] -> [1][2]["foo"]\n """\n\n if not indices:\n return ""\n return "[%s]" % "][".join(repr(index) for index in indices)\n\n\n_ITEM_LIMIT = 16\n_STR_LIMIT = 64\n\n\ndef _truncate_obj(obj):\n """Truncate objects for use in validation tracebacks\n\n Cell and output lists are squashed, as are long strings, lists, and dicts.\n """\n if isinstance(obj, dict):\n truncated_dict = {k: _truncate_obj(v) for k, v in list(obj.items())[:_ITEM_LIMIT]}\n if isinstance(truncated_dict.get("cells"), list):\n truncated_dict["cells"] = ["...%i cells..." % len(obj["cells"])]\n if isinstance(truncated_dict.get("outputs"), list):\n truncated_dict["outputs"] = ["...%i outputs..." % len(obj["outputs"])]\n\n if len(obj) > _ITEM_LIMIT:\n truncated_dict["..."] = "%i keys truncated" % (len(obj) - _ITEM_LIMIT)\n return truncated_dict\n if isinstance(obj, list):\n truncated_list = [_truncate_obj(item) for item in obj[:_ITEM_LIMIT]]\n if len(obj) > _ITEM_LIMIT:\n truncated_list.append("...%i items truncated..." % (len(obj) - _ITEM_LIMIT))\n return truncated_list\n if isinstance(obj, str):\n truncated_str = obj[:_STR_LIMIT]\n if len(obj) > _STR_LIMIT:\n truncated_str += "..."\n return truncated_str\n return obj\n\n\nclass NotebookValidationError(ValidationError): # type:ignore[misc]\n """Schema ValidationError with truncated representation\n\n to avoid massive verbose tracebacks.\n """\n\n def __init__(self, original, ref=None):\n """Initialize the error class."""\n self.original = original\n self.ref = getattr(self.original, "ref", ref)\n self.message = self.original.message\n\n def __getattr__(self, key):\n """Get an attribute from the error."""\n return getattr(self.original, key)\n\n def __unicode__(self):\n """Custom str for validation errors\n\n avoids dumping full schema and notebook to logs\n """\n error = self.original\n instance = _truncate_obj(error.instance)\n\n return "\n".join(\n [\n error.message,\n "",\n "Failed validating {!r} in {}{}:".format(\n error.validator,\n self.ref or "notebook",\n _format_as_index(list(error.relative_schema_path)[:-1]),\n ),\n "",\n "On instance%s:" % _format_as_index(error.relative_path),\n pprint.pformat(instance, width=78),\n ]\n )\n\n __str__ = __unicode__\n\n\ndef better_validation_error(error, version, version_minor):\n """Get better ValidationError on oneOf failures\n\n oneOf errors aren't informative.\n if it's a cell type or output_type error,\n try validating directly based on the type for a better error message\n """\n if not len(error.schema_path):\n return error\n key = error.schema_path[-1]\n ref = None\n if key.endswith("Of"):\n if isinstance(error.instance, dict):\n if "cell_type" in error.instance:\n ref = error.instance["cell_type"] + "_cell"\n elif "output_type" in error.instance:\n ref = error.instance["output_type"]\n\n if ref:\n try:\n validate(\n error.instance,\n ref,\n version=version,\n version_minor=version_minor,\n )\n except ValidationError as sub_error:\n # keep extending relative path\n error.relative_path.extend(sub_error.relative_path)\n sub_error.relative_path = error.relative_path\n better = better_validation_error(sub_error, version, version_minor)\n if better.ref is None:\n better.ref = ref\n return better\n except Exception: # noqa: S110\n # if it fails for some reason,\n # let the original error through\n pass\n return NotebookValidationError(error, ref)\n\n\ndef normalize(\n nbdict: Any,\n version: Optional[int] = None,\n version_minor: Optional[int] = None,\n *,\n relax_add_props: bool = False,\n strip_invalid_metadata: bool = False,\n) -> tuple[int, Any]:\n """\n Normalise a notebook prior to validation.\n\n This tries to implement a couple of normalisation steps to standardise\n notebooks and make validation easier.\n\n You should in general not rely on this function and make sure the notebooks\n that reach nbformat are already in a normal form. If not you likely have a bug,\n and may have security issues.\n\n Parameters\n ----------\n nbdict : dict\n notebook document\n version : int\n version_minor : int\n relax_add_props : bool\n Whether to allow extra property in the Json schema validating the\n notebook.\n strip_invalid_metadata : bool\n Whether to strip metadata that does not exist in the Json schema when\n validating the notebook.\n\n Returns\n -------\n changes : int\n number of changes in the notebooks\n notebook : dict\n deep-copy of the original object with relevant changes.\n\n """\n nbdict = deepcopy(nbdict)\n nbdict_version, nbdict_version_minor = get_version(nbdict)\n if version is None:\n version = nbdict_version\n if version_minor is None:\n version_minor = nbdict_version_minor\n return _normalize(\n nbdict,\n version,\n version_minor,\n True,\n relax_add_props=relax_add_props,\n strip_invalid_metadata=strip_invalid_metadata,\n )\n\n\ndef _normalize(\n nbdict: Any,\n version: int,\n version_minor: int,\n repair_duplicate_cell_ids: bool,\n relax_add_props: bool,\n strip_invalid_metadata: bool,\n) -> tuple[int, Any]:\n """\n Private normalisation routine.\n\n This function attempts to normalize the `nbdict` passed to it.\n\n As `_normalize()` is currently used both in `validate()` (for\n historical reasons), and in the `normalize()` public function,\n `_normalize()` does currently mutate `nbdict`.\n Ideally, once `validate()` stops calling `_normalize()`, `_normalize()`\n may stop mutating `nbdict`.\n\n """\n changes = 0\n\n if (version, version_minor) >= (4, 5):\n # if we support cell ids ensure default ids are provided\n for cell in nbdict["cells"]:\n if "id" not in cell:\n warnings.warn(\n "Cell is missing an id field, this will become"\n " a hard error in future nbformat versions. You may want"\n " to use `normalize()` on your notebooks before validations"\n " (available since nbformat 5.1.4). Previous versions of nbformat"\n " are fixing this issue transparently, and will stop doing so"\n " in the future.",\n MissingIDFieldWarning,\n stacklevel=3,\n )\n # Generate cell ids if any are missing\n if repair_duplicate_cell_ids:\n cell["id"] = generate_corpus_id()\n changes += 1\n\n # if we support cell ids check for uniqueness when validating the whole notebook\n seen_ids = set()\n for cell in nbdict["cells"]:\n if "id" not in cell:\n continue\n cell_id = cell["id"]\n if cell_id in seen_ids:\n # Best effort to repair if we find a duplicate id\n if repair_duplicate_cell_ids:\n new_id = generate_corpus_id()\n cell["id"] = new_id\n changes += 1\n warnings.warn(\n f"Non-unique cell id {cell_id!r} detected. Corrected to {new_id!r}.",\n DuplicateCellId,\n stacklevel=3,\n )\n else:\n msg = f"Non-unique cell id '{cell_id}' detected."\n raise ValidationError(msg)\n seen_ids.add(cell_id)\n if strip_invalid_metadata:\n changes += _strip_invalida_metadata(\n nbdict, version, version_minor, relax_add_props=relax_add_props\n )\n return changes, nbdict\n\n\ndef _dep_warn(field):\n warnings.warn(\n dedent(\n f"""`{field}` kwargs of validate has been deprecated for security\n reasons, and will be removed soon.\n\n Please explicitly use the `n_changes, new_notebook = nbformat.validator.normalize(old_notebook, ...)` if you wish to\n normalise your notebook. `normalize` is available since nbformat 5.5.0\n\n """\n ),\n DeprecationWarning,\n stacklevel=3,\n )\n\n\ndef validate(\n nbdict: Any = None,\n ref: Optional[str] = None,\n version: Optional[int] = None,\n version_minor: Optional[int] = None,\n relax_add_props: bool = False,\n nbjson: Any = None,\n repair_duplicate_cell_ids: bool = _deprecated, # type: ignore[assignment]\n strip_invalid_metadata: bool = _deprecated, # type: ignore[assignment]\n) -> None:\n """Checks whether the given notebook dict-like object\n conforms to the relevant notebook format schema.\n\n Parameters\n ----------\n nbdict : dict\n notebook document\n ref : optional, str\n reference to the subset of the schema we want to validate against.\n for example ``"markdown_cell"``, `"code_cell"` ....\n version : int\n version_minor : int\n relax_add_props : bool\n Whether to allow extra properties in the JSON schema validating the notebook.\n When True, all known fields are validated, but unknown fields are ignored.\n nbjson\n repair_duplicate_cell_ids : bool\n Deprecated since 5.5.0 - will be removed in the future.\n strip_invalid_metadata : bool\n Deprecated since 5.5.0 - will be removed in the future.\n\n Returns\n -------\n None\n\n Raises\n ------\n ValidationError if not valid.\n\n Notes\n -----\n Prior to Nbformat 5.5.0 the `validate` and `isvalid` method would silently\n try to fix invalid notebook and mutate arguments. This behavior is deprecated\n and will be removed in a near future.\n\n Please explicitly call `normalize` if you need to normalize notebooks.\n """\n assert isinstance(ref, str) or ref is None\n\n if strip_invalid_metadata is _deprecated:\n strip_invalid_metadata = False\n else:\n _dep_warn("strip_invalid_metadata")\n\n if repair_duplicate_cell_ids is _deprecated:\n repair_duplicate_cell_ids = True\n else:\n _dep_warn("repair_duplicate_cell_ids")\n\n # backwards compatibility for nbjson argument\n if nbdict is not None:\n pass\n elif nbjson is not None:\n nbdict = nbjson\n else:\n msg = "validate() missing 1 required argument: 'nbdict'"\n raise TypeError(msg)\n\n if ref is None:\n # if ref is not specified, we have a whole notebook, so we can get the version\n nbdict_version, nbdict_version_minor = get_version(nbdict)\n if version is None:\n version = nbdict_version\n if version_minor is None:\n version_minor = nbdict_version_minor\n # if ref is specified, and we don't have a version number, assume we're validating against 1.0\n elif version is None:\n version, version_minor = 1, 0\n\n if ref is None:\n assert isinstance(version, int)\n assert isinstance(version_minor, int)\n _normalize(\n nbdict,\n version,\n version_minor,\n repair_duplicate_cell_ids,\n relax_add_props=relax_add_props,\n strip_invalid_metadata=strip_invalid_metadata,\n )\n\n for error in iter_validate(\n nbdict,\n ref=ref,\n version=version,\n version_minor=version_minor,\n relax_add_props=relax_add_props,\n strip_invalid_metadata=strip_invalid_metadata,\n ):\n raise error\n\n\ndef _get_errors(\n nbdict: Any, version: int, version_minor: int, relax_add_props: bool, *args: Any\n) -> Any:\n validator = get_validator(version, version_minor, relax_add_props=relax_add_props)\n if not validator:\n msg = f"No schema for validating v{version}.{version_minor} notebooks"\n raise ValidationError(msg)\n iter_errors = validator.iter_errors(nbdict, *args)\n errors = list(iter_errors)\n # jsonschema gives the best error messages.\n if len(errors) and validator.name != "jsonschema":\n validator = get_validator(\n version=version,\n version_minor=version_minor,\n relax_add_props=relax_add_props,\n name="jsonschema",\n )\n return validator.iter_errors(nbdict, *args)\n return iter(errors)\n\n\ndef _strip_invalida_metadata(\n nbdict: Any, version: int, version_minor: int, relax_add_props: bool\n) -> int:\n """\n This function tries to extract metadata errors from the validator and fix\n them if necessary. This mostly mean stripping unknown keys from metadata\n fields, or removing metadata fields altogether.\n\n Parameters\n ----------\n nbdict : dict\n notebook document\n version : int\n version_minor : int\n relax_add_props : bool\n Whether to allow extra property in the Json schema validating the\n notebook.\n\n Returns\n -------\n int\n number of modifications\n\n """\n errors = _get_errors(nbdict, version, version_minor, relax_add_props)\n changes = 0\n if len(list(errors)) > 0:\n # jsonschema gives a better error tree.\n validator = get_validator(\n version=version,\n version_minor=version_minor,\n relax_add_props=relax_add_props,\n name="jsonschema",\n )\n if not validator:\n msg = f"No jsonschema for validating v{version}.{version_minor} notebooks"\n raise ValidationError(msg)\n errors = validator.iter_errors(nbdict)\n error_tree = validator.error_tree(errors)\n if "metadata" in error_tree:\n for key in error_tree["metadata"]:\n nbdict["metadata"].pop(key, None)\n changes += 1\n\n if "cells" in error_tree:\n number_of_cells = len(nbdict.get("cells", 0))\n for cell_idx in range(number_of_cells):\n # Cells don't report individual metadata keys as having failed validation\n # Instead it reports that it failed to validate against each cell-type definition.\n # We have to delve into why those definitions failed to uncover which metadata\n # keys are misbehaving.\n if "oneOf" in error_tree["cells"][cell_idx].errors:\n intended_cell_type = nbdict["cells"][cell_idx]["cell_type"]\n schemas_by_index = [\n ref["$ref"]\n for ref in error_tree["cells"][cell_idx].errors["oneOf"].schema["oneOf"]\n ]\n cell_type_definition_name = f"#/definitions/{intended_cell_type}_cell"\n if cell_type_definition_name in schemas_by_index:\n schema_index = schemas_by_index.index(cell_type_definition_name)\n for error in error_tree["cells"][cell_idx].errors["oneOf"].context:\n rel_path = error.relative_path\n error_for_intended_schema = error.schema_path[0] == schema_index\n is_top_level_metadata_key = (\n len(rel_path) == 2 and rel_path[0] == "metadata"\n )\n if error_for_intended_schema and is_top_level_metadata_key:\n nbdict["cells"][cell_idx]["metadata"].pop(rel_path[1], None)\n changes += 1\n\n return changes\n\n\ndef iter_validate(\n nbdict=None,\n ref=None,\n version=None,\n version_minor=None,\n relax_add_props=False,\n nbjson=None,\n strip_invalid_metadata=False,\n):\n """Checks whether the given notebook dict-like object conforms to the\n relevant notebook format schema.\n\n Returns a generator of all ValidationErrors if not valid.\n\n Notes\n -----\n To fix: For security reasons, this function should *never* mutate its `nbdict` argument, and\n should *never* try to validate a mutated or modified version of its notebook.\n\n """\n # backwards compatibility for nbjson argument\n if nbdict is not None:\n pass\n elif nbjson is not None:\n nbdict = nbjson\n else:\n msg = "iter_validate() missing 1 required argument: 'nbdict'"\n raise TypeError(msg)\n\n if version is None:\n version, version_minor = get_version(nbdict)\n\n if ref:\n try:\n errors = _get_errors(\n nbdict,\n version,\n version_minor,\n relax_add_props,\n {"$ref": "#/definitions/%s" % ref},\n )\n except ValidationError as e:\n yield e\n return\n\n else:\n if strip_invalid_metadata:\n _strip_invalida_metadata(nbdict, version, version_minor, relax_add_props)\n\n # Validate one more time to ensure that us removing metadata\n # didn't cause another complex validation issue in the schema.\n # Also to ensure that higher-level errors produced by individual metadata validation\n # failures are removed.\n try:\n errors = _get_errors(nbdict, version, version_minor, relax_add_props)\n except ValidationError as e:\n yield e\n return\n\n for error in errors:\n yield better_validation_error(error, version, version_minor)\n | .venv\Lib\site-packages\nbformat\validator.py | validator.py | Python | 22,741 | 0.95 | 0.198198 | 0.051327 | vue-tools | 259 | 2024-10-13T20:57:28.766363 | BSD-3-Clause | false | 36b21dba55b4ace1d4c7d0e65ecce259 |
"""\nWarnings that can be emitted by nbformat.\n"""\n\nfrom __future__ import annotations\n\n\nclass MissingIDFieldWarning(FutureWarning):\n """\n\n This warning is emitted in the validation step of nbformat as we used to\n mutate the structure which is cause signature issues.\n\n This will be turned into an error at later point.\n\n We subclass FutureWarning as we will change the behavior in the future.\n\n """\n\n\nclass DuplicateCellId(FutureWarning):\n """\n\n This warning is emitted in the validation step of nbformat as we used to\n mutate the structure which is cause signature issues.\n\n This will be turned into an error at later point.\n\n We subclass FutureWarning as we will change the behavior in the future.\n """\n | .venv\Lib\site-packages\nbformat\warnings.py | warnings.py | Python | 741 | 0.85 | 0.066667 | 0 | node-utils | 145 | 2024-03-01T12:09:28.350596 | GPL-3.0 | false | 416d8cf51b454929699b7a4f4af8ddba |
"""\nA simple utility to import something by its string name.\n\nVendored form ipython_genutils\n"""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\n\ndef import_item(name):\n """Import and return ``bar`` given the string ``foo.bar``.\n\n Calling ``bar = import_item("foo.bar")`` is the functional equivalent of\n executing the code ``from foo import bar``.\n\n Parameters\n ----------\n name : string\n The fully qualified name of the module/package being imported.\n\n Returns\n -------\n mod : module object\n The module that was imported.\n """\n\n parts = name.rsplit(".", 1)\n if len(parts) == 2:\n # called with 'foo.bar....'\n package, obj = parts\n module = __import__(package, fromlist=[obj])\n try:\n pak = getattr(module, obj)\n except AttributeError:\n raise ImportError("No module named %s" % obj) from None\n return pak\n # called with un-dotted string\n return __import__(parts[0])\n | .venv\Lib\site-packages\nbformat\_imports.py | _imports.py | Python | 1,075 | 0.95 | 0.075 | 0.125 | awesome-app | 778 | 2025-02-22T02:03:03.319419 | Apache-2.0 | false | 101ece0098694752e81db797cdb5b750 |
"""A dict subclass that supports attribute style access.\n\nCan probably be replaced by types.SimpleNamespace from Python 3.3\n"""\n\nfrom __future__ import annotations\n\nfrom typing import Any, Dict\n\n__all__ = ["Struct"]\n\n\nclass Struct(Dict[Any, Any]):\n """A dict subclass with attribute style access.\n\n This dict subclass has a a few extra features:\n\n * Attribute style access.\n * Protection of class members (like keys, items) when using attribute\n style access.\n * The ability to restrict assignment to only existing keys.\n * Intelligent merging.\n * Overloaded operators.\n """\n\n _allownew = True\n\n def __init__(self, *args, **kw):\n """Initialize with a dictionary, another Struct, or data.\n\n Parameters\n ----------\n *args : dict, Struct\n Initialize with one dict or Struct\n **kw : dict\n Initialize with key, value pairs.\n\n Examples\n --------\n >>> s = Struct(a=10,b=30)\n >>> s.a\n 10\n >>> s.b\n 30\n >>> s2 = Struct(s,c=30)\n >>> sorted(s2.keys())\n ['a', 'b', 'c']\n """\n object.__setattr__(self, "_allownew", True)\n dict.__init__(self, *args, **kw)\n\n def __setitem__(self, key, value):\n """Set an item with check for allownew.\n\n Examples\n --------\n >>> s = Struct()\n >>> s['a'] = 10\n >>> s.allow_new_attr(False)\n >>> s['a'] = 10\n >>> s['a']\n 10\n >>> try:\n ... s['b'] = 20\n ... except KeyError:\n ... print('this is not allowed')\n ...\n this is not allowed\n """\n if not self._allownew and key not in self:\n raise KeyError("can't create new attribute %s when allow_new_attr(False)" % key)\n dict.__setitem__(self, key, value)\n\n def __setattr__(self, key, value):\n """Set an attr with protection of class members.\n\n This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to\n :exc:`AttributeError`.\n\n Examples\n --------\n >>> s = Struct()\n >>> s.a = 10\n >>> s.a\n 10\n >>> try:\n ... s.get = 10\n ... except AttributeError:\n ... print("you can't set a class member")\n ...\n you can't set a class member\n """\n # If key is an str it might be a class member or instance var\n if isinstance(key, str): # noqa: SIM102\n # I can't simply call hasattr here because it calls getattr, which\n # calls self.__getattr__, which returns True for keys in\n # self._data. But I only want keys in the class and in\n # self.__dict__\n if key in self.__dict__ or hasattr(Struct, key):\n raise AttributeError("attr %s is a protected member of class Struct." % key)\n try:\n self.__setitem__(key, value)\n except KeyError as e:\n raise AttributeError(e) from None\n\n def __getattr__(self, key):\n """Get an attr by calling :meth:`dict.__getitem__`.\n\n Like :meth:`__setattr__`, this method converts :exc:`KeyError` to\n :exc:`AttributeError`.\n\n Examples\n --------\n >>> s = Struct(a=10)\n >>> s.a\n 10\n >>> type(s.get)\n <... 'builtin_function_or_method'>\n >>> try:\n ... s.b\n ... except AttributeError:\n ... print("I don't have that key")\n ...\n I don't have that key\n """\n try:\n result = self[key]\n except KeyError:\n raise AttributeError(key) from None\n else:\n return result\n\n def __iadd__(self, other):\n """s += s2 is a shorthand for s.merge(s2).\n\n Examples\n --------\n >>> s = Struct(a=10,b=30)\n >>> s2 = Struct(a=20,c=40)\n >>> s += s2\n >>> sorted(s.keys())\n ['a', 'b', 'c']\n """\n self.merge(other)\n return self\n\n def __add__(self, other):\n """s + s2 -> New Struct made from s.merge(s2).\n\n Examples\n --------\n >>> s1 = Struct(a=10,b=30)\n >>> s2 = Struct(a=20,c=40)\n >>> s = s1 + s2\n >>> sorted(s.keys())\n ['a', 'b', 'c']\n """\n sout = self.copy()\n sout.merge(other)\n return sout\n\n def __sub__(self, other):\n """s1 - s2 -> remove keys in s2 from s1.\n\n Examples\n --------\n >>> s1 = Struct(a=10,b=30)\n >>> s2 = Struct(a=40)\n >>> s = s1 - s2\n >>> s\n {'b': 30}\n """\n sout = self.copy()\n sout -= other\n return sout\n\n def __isub__(self, other):\n """Inplace remove keys from self that are in other.\n\n Examples\n --------\n >>> s1 = Struct(a=10,b=30)\n >>> s2 = Struct(a=40)\n >>> s1 -= s2\n >>> s1\n {'b': 30}\n """\n for k in other:\n if k in self:\n del self[k]\n return self\n\n def __dict_invert(self, data):\n """Helper function for merge.\n\n Takes a dictionary whose values are lists and returns a dict with\n the elements of each list as keys and the original keys as values.\n """\n outdict = {}\n for k, lst in data.items():\n if isinstance(lst, str):\n lst = lst.split() # noqa: PLW2901\n for entry in lst:\n outdict[entry] = k\n return outdict\n\n def dict(self):\n """Get the dict representation of the struct."""\n return self\n\n def copy(self):\n """Return a copy as a Struct.\n\n Examples\n --------\n >>> s = Struct(a=10,b=30)\n >>> s2 = s.copy()\n >>> type(s2) is Struct\n True\n """\n return Struct(dict.copy(self))\n\n def hasattr(self, key):\n """hasattr function available as a method.\n\n Implemented like has_key.\n\n Examples\n --------\n >>> s = Struct(a=10)\n >>> s.hasattr('a')\n True\n >>> s.hasattr('b')\n False\n >>> s.hasattr('get')\n False\n """\n return key in self\n\n def allow_new_attr(self, allow=True):\n """Set whether new attributes can be created in this Struct.\n\n This can be used to catch typos by verifying that the attribute user\n tries to change already exists in this Struct.\n """\n object.__setattr__(self, "_allownew", allow)\n\n def merge(self, __loc_data__=None, __conflict_solve=None, **kw):\n """Merge two Structs with customizable conflict resolution.\n\n This is similar to :meth:`update`, but much more flexible. First, a\n dict is made from data+key=value pairs. When merging this dict with\n the Struct S, the optional dictionary 'conflict' is used to decide\n what to do.\n\n If conflict is not given, the default behavior is to preserve any keys\n with their current value (the opposite of the :meth:`update` method's\n behavior).\n\n Parameters\n ----------\n __loc_data__ : dict, Struct\n The data to merge into self\n __conflict_solve : dict\n The conflict policy dict. The keys are binary functions used to\n resolve the conflict and the values are lists of strings naming\n the keys the conflict resolution function applies to. Instead of\n a list of strings a space separated string can be used, like\n 'a b c'.\n **kw : dict\n Additional key, value pairs to merge in\n\n Notes\n -----\n The `__conflict_solve` dict is a dictionary of binary functions which will be used to\n solve key conflicts. Here is an example::\n\n __conflict_solve = dict(\n func1=['a','b','c'],\n func2=['d','e']\n )\n\n In this case, the function :func:`func1` will be used to resolve\n keys 'a', 'b' and 'c' and the function :func:`func2` will be used for\n keys 'd' and 'e'. This could also be written as::\n\n __conflict_solve = dict(func1='a b c',func2='d e')\n\n These functions will be called for each key they apply to with the\n form::\n\n func1(self['a'], other['a'])\n\n The return value is used as the final merged value.\n\n As a convenience, merge() provides five (the most commonly needed)\n pre-defined policies: preserve, update, add, add_flip and add_s. The\n easiest explanation is their implementation::\n\n preserve = lambda old,new: old\n update = lambda old,new: new\n add = lambda old,new: old + new\n add_flip = lambda old,new: new + old # note change of order!\n add_s = lambda old,new: old + ' ' + new # only for str!\n\n You can use those four words (as strings) as keys instead\n of defining them as functions, and the merge method will substitute\n the appropriate functions for you.\n\n For more complicated conflict resolution policies, you still need to\n construct your own functions.\n\n Examples\n --------\n This show the default policy:\n\n >>> s = Struct(a=10,b=30)\n >>> s2 = Struct(a=20,c=40)\n >>> s.merge(s2)\n >>> sorted(s.items())\n [('a', 10), ('b', 30), ('c', 40)]\n\n Now, show how to specify a conflict dict:\n\n >>> s = Struct(a=10,b=30)\n >>> s2 = Struct(a=20,b=40)\n >>> conflict = {'update':'a','add':'b'}\n >>> s.merge(s2,conflict)\n >>> sorted(s.items())\n [('a', 20), ('b', 70)]\n """\n\n data_dict = dict(__loc_data__, **kw)\n\n # policies for conflict resolution: two argument functions which return\n # the value that will go in the new struct\n preserve = lambda old, new: old\n update = lambda old, new: new\n add = lambda old, new: old + new\n add_flip = lambda old, new: new + old # note change of order!\n add_s = lambda old, new: old + " " + new\n\n # default policy is to keep current keys when there's a conflict\n conflict_solve = dict.fromkeys(self, preserve)\n\n # the confli_allownewct_solve dictionary is given by the user 'inverted': we\n # need a name-function mapping, it comes as a function -> names\n # dict. Make a local copy (b/c we'll make changes), replace user\n # strings for the three builtin policies and invert it.\n if __conflict_solve:\n inv_conflict_solve_user = __conflict_solve.copy()\n for name, func in [\n ("preserve", preserve),\n ("update", update),\n ("add", add),\n ("add_flip", add_flip),\n ("add_s", add_s),\n ]:\n if name in inv_conflict_solve_user:\n inv_conflict_solve_user[func] = inv_conflict_solve_user[name]\n del inv_conflict_solve_user[name]\n conflict_solve.update(self.__dict_invert(inv_conflict_solve_user))\n for key in data_dict:\n if key not in self:\n self[key] = data_dict[key]\n else:\n self[key] = conflict_solve[key](self[key], data_dict[key])\n | .venv\Lib\site-packages\nbformat\_struct.py | _struct.py | Python | 11,297 | 0.95 | 0.158038 | 0.065359 | react-lib | 578 | 2023-09-02T11:39:42.518355 | GPL-3.0 | false | 24ebb60620fa0e4221a8fdcc3e17fd84 |
"""The version information for nbformat."""\n\n# Use "hatchling version xx.yy.zz" to handle version changes\nfrom __future__ import annotations\n\nimport re\nfrom importlib.metadata import version\n\n__version__ = version("nbformat") or "0.0.0"\n\n# matches tbump regex in pyproject.toml\n_version_regex = re.compile(\n r"""\n (?P<major>\d+)\n \.\n (?P<minor>\d+)\n \.\n (?P<patch>\d+)\n (?P<pre>((a|b|rc)\d+))?\n (\.\n (?P<dev>dev\d*)\n )?\n """,\n re.VERBOSE,\n)\n\n_version_fields = _version_regex.match(__version__).groupdict() # type:ignore[union-attr]\nversion_info = tuple(\n field\n for field in (\n int(_version_fields["major"]),\n int(_version_fields["minor"]),\n int(_version_fields["patch"]),\n _version_fields["pre"],\n _version_fields["dev"],\n )\n if field is not None\n)\n | .venv\Lib\site-packages\nbformat\_version.py | _version.py | Python | 816 | 0.95 | 0.078947 | 0.060606 | react-lib | 890 | 2025-04-09T15:33:45.999958 | GPL-3.0 | false | fecc6d06899fd80754f5bf3202ff6e33 |
"""The Jupyter notebook format\n\nUse this module to read or write notebook files as particular nbformat versions.\n"""\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nfrom pathlib import Path\n\nfrom traitlets.log import get_logger\n\nfrom . import v1, v2, v3, v4\nfrom ._version import __version__, version_info\nfrom .sentinel import Sentinel\n\n__all__ = [\n "versions",\n "validate",\n "ValidationError",\n "convert",\n "from_dict",\n "NotebookNode",\n "current_nbformat",\n "current_nbformat_minor",\n "NBFormatError",\n "NO_CONVERT",\n "reads",\n "read",\n "writes",\n "write",\n "version_info",\n "__version__",\n "Sentinel",\n]\n\nversions = {\n 1: v1,\n 2: v2,\n 3: v3,\n 4: v4,\n}\n\nfrom . import reader # noqa: E402\nfrom .converter import convert # noqa: E402\nfrom .notebooknode import NotebookNode, from_dict # noqa: E402\nfrom .v4 import nbformat as current_nbformat # noqa: E402\nfrom .v4 import nbformat_minor as current_nbformat_minor # noqa: E402\nfrom .validator import ValidationError, validate # noqa: E402\n\n\nclass NBFormatError(ValueError):\n pass\n\n\n# no-conversion singleton\nNO_CONVERT = Sentinel(\n "NO_CONVERT",\n __name__,\n """Value to prevent nbformat to convert notebooks to most recent version.\n """,\n)\n\n\ndef reads(s, as_version, capture_validation_error=None, **kwargs):\n """Read a notebook from a string and return the NotebookNode object as the given version.\n\n The string can contain a notebook of any version.\n The notebook will be returned `as_version`, converting, if necessary.\n\n Notebook format errors will be logged.\n\n Parameters\n ----------\n s : unicode\n The raw unicode string to read the notebook from.\n as_version : int\n The version of the notebook format to return.\n The notebook will be converted, if necessary.\n Pass nbformat.NO_CONVERT to prevent conversion.\n capture_validation_error : dict, optional\n If provided, a key of "ValidationError" with a\n value of the ValidationError instance will be added\n to the dictionary.\n\n Returns\n -------\n nb : NotebookNode\n The notebook that was read.\n """\n nb = reader.reads(s, **kwargs)\n if as_version is not NO_CONVERT:\n nb = convert(nb, as_version)\n try:\n validate(nb)\n except ValidationError as e:\n get_logger().error("Notebook JSON is invalid: %s", e)\n if isinstance(capture_validation_error, dict):\n capture_validation_error["ValidationError"] = e\n return nb\n\n\ndef writes(nb, version=NO_CONVERT, capture_validation_error=None, **kwargs):\n """Write a notebook to a string in a given format in the given nbformat version.\n\n Any notebook format errors will be logged.\n\n Parameters\n ----------\n nb : NotebookNode\n The notebook to write.\n version : int, optional\n The nbformat version to write.\n If unspecified, or specified as nbformat.NO_CONVERT,\n the notebook's own version will be used and no conversion performed.\n capture_validation_error : dict, optional\n If provided, a key of "ValidationError" with a\n value of the ValidationError instance will be added\n to the dictionary.\n\n Returns\n -------\n s : unicode\n The notebook as a JSON string.\n """\n if version is not NO_CONVERT:\n nb = convert(nb, version)\n else:\n version, _ = reader.get_version(nb)\n try:\n validate(nb)\n except ValidationError as e:\n get_logger().error("Notebook JSON is invalid: %s", e)\n if isinstance(capture_validation_error, dict):\n capture_validation_error["ValidationError"] = e\n return versions[version].writes_json(nb, **kwargs)\n\n\ndef read(fp, as_version, capture_validation_error=None, **kwargs):\n """Read a notebook from a file as a NotebookNode of the given version.\n\n The string can contain a notebook of any version.\n The notebook will be returned `as_version`, converting, if necessary.\n\n Notebook format errors will be logged.\n\n Parameters\n ----------\n fp : file or str\n A file-like object with a read method that returns unicode (use\n ``io.open()`` in Python 2), or a path to a file.\n as_version : int\n The version of the notebook format to return.\n The notebook will be converted, if necessary.\n Pass nbformat.NO_CONVERT to prevent conversion.\n capture_validation_error : dict, optional\n If provided, a key of "ValidationError" with a\n value of the ValidationError instance will be added\n to the dictionary.\n\n Returns\n -------\n nb : NotebookNode\n The notebook that was read.\n """\n\n try:\n buf = fp.read()\n except AttributeError:\n with open(fp, encoding="utf8") as f: # noqa: PTH123\n return reads(f.read(), as_version, capture_validation_error, **kwargs)\n\n return reads(buf, as_version, capture_validation_error, **kwargs)\n\n\ndef write(nb, fp, version=NO_CONVERT, capture_validation_error=None, **kwargs):\n """Write a notebook to a file in a given nbformat version.\n\n The file-like object must accept unicode input.\n\n Parameters\n ----------\n nb : NotebookNode\n The notebook to write.\n fp : file or str\n Any file-like object with a write method that accepts unicode, or\n a path to write a file.\n version : int, optional\n The nbformat version to write.\n If nb is not this version, it will be converted.\n If unspecified, or specified as nbformat.NO_CONVERT,\n the notebook's own version will be used and no conversion performed.\n capture_validation_error : dict, optional\n If provided, a key of "ValidationError" with a\n value of the ValidationError instance will be added\n to the dictionary.\n """\n s = writes(nb, version, capture_validation_error, **kwargs)\n if isinstance(s, bytes):\n s = s.decode("utf8")\n\n try:\n fp.write(s)\n if not s.endswith("\n"):\n fp.write("\n")\n except AttributeError:\n with Path(fp).open("w", encoding="utf8") as f:\n f.write(s)\n if not s.endswith("\n"):\n f.write("\n")\n | .venv\Lib\site-packages\nbformat\__init__.py | __init__.py | Python | 6,314 | 0.95 | 0.094787 | 0.017143 | vue-tools | 618 | 2023-08-17T00:55:45.831179 | MIT | false | 7c3b9c2aeda4dad2f4af659d6ce642f2 |
"""Generate a corpus id."""\n\nfrom __future__ import annotations\n\nimport uuid\n\n\ndef generate_corpus_id():\n """Generate a corpus id."""\n return uuid.uuid4().hex[:8]\n | .venv\Lib\site-packages\nbformat\corpus\words.py | words.py | Python | 169 | 0.85 | 0.1 | 0 | react-lib | 669 | 2025-05-05T01:25:56.564982 | Apache-2.0 | false | 557e89c7e53c9d6b471e572258bdd3a3 |
"""Tests for nbformat corpus"""\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nfrom nbformat.corpus import words\n\n\ndef test_generate_corpus_id(recwarn):\n """Test generating a corpus id."""\n assert len(words.generate_corpus_id()) > 7\n # 1 in 4294967296 (2^32) times this will fail\n assert words.generate_corpus_id() != words.generate_corpus_id()\n assert len(recwarn) == 0\n | .venv\Lib\site-packages\nbformat\corpus\tests\test_words.py | test_words.py | Python | 477 | 0.95 | 0.133333 | 0.272727 | python-kit | 491 | 2024-11-23T23:21:20.882026 | MIT | true | fe4695a6e1810f34c3e2a03da2408fcc |
\n\n | .venv\Lib\site-packages\nbformat\corpus\tests\__pycache__\test_words.cpython-313.pyc | test_words.cpython-313.pyc | Other | 803 | 0.8 | 0.2 | 0 | python-kit | 837 | 2024-07-08T05:25:55.210391 | MIT | true | b02232656066d861d963ea8e3ab55009 |
\n\n | .venv\Lib\site-packages\nbformat\corpus\tests\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 196 | 0.7 | 0 | 0 | awesome-app | 744 | 2025-06-14T23:54:09.141834 | MIT | true | 3aee08fe124a6b491ed346605b5ef4ed |
\n\n | .venv\Lib\site-packages\nbformat\corpus\__pycache__\words.cpython-313.pyc | words.cpython-313.pyc | Other | 517 | 0.7 | 0 | 0 | awesome-app | 779 | 2023-12-27T20:38:37.714997 | GPL-3.0 | false | eedb25015a9c5fe7cf63ca515bcd9ae1 |
\n\n | .venv\Lib\site-packages\nbformat\corpus\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 190 | 0.7 | 0 | 0 | vue-tools | 407 | 2024-11-17T06:40:35.601578 | MIT | false | 862a13d3b8eb58dd57fcdbb25f7be763 |
"""Convert notebook to the v1 format."""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\n\ndef upgrade(nb, orig_version=None):\n """Upgrade a notebook."""\n msg = "Cannot convert to v1 notebook format"\n raise ValueError(msg)\n | .venv\Lib\site-packages\nbformat\v1\convert.py | convert.py | Python | 741 | 0.95 | 0.052632 | 0.6 | vue-tools | 424 | 2025-07-06T08:28:21.436129 | MIT | false | 4b06840bbd98d318319313b6f57a28ac |
"""The basic dict based notebook format.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nfrom nbformat._struct import Struct\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\nclass NotebookNode(Struct):\n """A notebook node object."""\n\n\ndef from_dict(d):\n """Create notebook node(s) from an object."""\n if isinstance(d, dict):\n newd = NotebookNode()\n for k, v in d.items():\n newd[k] = from_dict(v)\n return newd\n if isinstance(d, (tuple, list)):\n return [from_dict(i) for i in d]\n return d\n\n\ndef new_code_cell(code=None, prompt_number=None):\n """Create a new code cell with input and output"""\n cell = NotebookNode()\n cell.cell_type = "code"\n if code is not None:\n cell.code = str(code)\n if prompt_number is not None:\n cell.prompt_number = int(prompt_number)\n return cell\n\n\ndef new_text_cell(text=None):\n """Create a new text cell."""\n cell = NotebookNode()\n if text is not None:\n cell.text = str(text)\n cell.cell_type = "text"\n return cell\n\n\ndef new_notebook(cells=None):\n """Create a notebook by name, id and a list of worksheets."""\n nb = NotebookNode()\n if cells is not None:\n nb.cells = cells\n else:\n nb.cells = []\n return nb\n | .venv\Lib\site-packages\nbformat\v1\nbbase.py | nbbase.py | Python | 1,930 | 0.95 | 0.185714 | 0.240741 | react-lib | 582 | 2023-10-19T20:14:42.580984 | MIT | false | 45e5f97433af8c5c2de3d5df0abe4b44 |
"""Read and write notebooks in JSON format.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nimport json\n\nfrom .nbbase import from_dict\nfrom .rwbase import NotebookReader, NotebookWriter\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\nclass JSONReader(NotebookReader):\n """A JSON notebook reader."""\n\n def reads(self, s, **kwargs):\n """Convert a string to a notebook object."""\n nb = json.loads(s, **kwargs)\n return self.to_notebook(nb, **kwargs)\n\n def to_notebook(self, d, **kwargs):\n """Convert from a raw JSON dict to a nested NotebookNode structure."""\n return from_dict(d)\n\n\nclass JSONWriter(NotebookWriter):\n """A JSON notebook writer."""\n\n def writes(self, nb, **kwargs):\n """Convert a notebook object to a string."""\n kwargs["indent"] = 4\n return json.dumps(nb, **kwargs)\n\n\n_reader = JSONReader()\n_writer = JSONWriter()\n\nreads = _reader.reads\nread = _reader.read\nto_notebook = _reader.to_notebook\nwrite = _writer.write\nwrites = _writer.writes\n | .venv\Lib\site-packages\nbformat\v1\nbjson.py | nbjson.py | Python | 1,687 | 0.95 | 0.084746 | 0.309524 | vue-tools | 839 | 2023-12-20T20:24:14.645687 | GPL-3.0 | false | 5bc8c9874a212be3a372faa2dd96fa50 |
"""Base classes and function for readers and writers.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\n\nclass NotebookReader:\n """The base notebook reader."""\n\n def reads(self, s, **kwargs):\n """Read a notebook from a string."""\n msg = "loads must be implemented in a subclass"\n raise NotImplementedError(msg)\n\n def read(self, fp, **kwargs):\n """Read a notebook from a file like object"""\n return self.reads(fp.read(), **kwargs)\n\n\nclass NotebookWriter:\n """The base notebook writer."""\n\n def writes(self, nb, **kwargs):\n """Write a notebook to a string."""\n msg = "loads must be implemented in a subclass"\n raise NotImplementedError(msg)\n\n def write(self, nb, fp, **kwargs):\n """Write a notebook to a file like object"""\n return fp.write(self.writes(nb, **kwargs))\n | .venv\Lib\site-packages\nbformat\v1\rwbase.py | rwbase.py | Python | 1,564 | 0.95 | 0.166667 | 0.371429 | vue-tools | 645 | 2025-03-07T05:53:28.356817 | BSD-3-Clause | false | ffffc7301933f4b4520351c44120d73e |
"""The main module for the v1 notebook format."""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nfrom .convert import upgrade\nfrom .nbbase import NotebookNode, new_code_cell, new_notebook, new_text_cell\nfrom .nbjson import reads as read_json\nfrom .nbjson import reads as reads_json\nfrom .nbjson import to_notebook as to_notebook_json\nfrom .nbjson import writes as write_json\nfrom .nbjson import writes as writes_json\n | .venv\Lib\site-packages\nbformat\v1\__init__.py | __init__.py | Python | 931 | 0.95 | 0.047619 | 0.5 | awesome-app | 235 | 2024-01-09T15:00:12.554821 | Apache-2.0 | false | 3d29a19a485a78916726ba27d3688dcc |
\n\n | .venv\Lib\site-packages\nbformat\v1\__pycache__\convert.cpython-313.pyc | convert.cpython-313.pyc | Other | 523 | 0.8 | 0 | 0 | react-lib | 17 | 2025-03-06T16:44:35.964051 | Apache-2.0 | false | c48cf318fab6624374c4aa3436ebdc7b |
\n\n | .venv\Lib\site-packages\nbformat\v1\__pycache__\nbbase.cpython-313.pyc | nbbase.cpython-313.pyc | Other | 2,169 | 0.8 | 0 | 0.058824 | python-kit | 15 | 2023-07-24T04:08:09.535611 | GPL-3.0 | false | ca5af3c5305c83bad0cc008004319c77 |
\n\n | .venv\Lib\site-packages\nbformat\v1\__pycache__\nbjson.cpython-313.pyc | nbjson.cpython-313.pyc | Other | 2,015 | 0.8 | 0 | 0.035714 | node-utils | 3 | 2024-03-04T13:19:45.502119 | BSD-3-Clause | false | 7389de1f932949f0ab882d8e945a37a0 |
\n\n | .venv\Lib\site-packages\nbformat\v1\__pycache__\rwbase.cpython-313.pyc | rwbase.cpython-313.pyc | Other | 1,827 | 0.95 | 0.111111 | 0.066667 | python-kit | 627 | 2025-03-01T20:17:29.964458 | MIT | false | ecded8484335aad3d536d95ed5274098 |
\n\n | .venv\Lib\site-packages\nbformat\v1\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 659 | 0.8 | 0.066667 | 0 | awesome-app | 411 | 2024-07-06T01:54:00.694435 | GPL-3.0 | false | e26e803baaacf2016d7391d1fd300c87 |
"""Code for converting notebooks to and from the v2 format.\n\nAuthors:\n\n* Brian Granger\n* Jonathan Frederic\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nfrom .nbbase import new_code_cell, new_notebook, new_text_cell, new_worksheet\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\ndef upgrade(nb, from_version=1):\n """Convert a notebook to the v2 format.\n\n Parameters\n ----------\n nb : NotebookNode\n The Python representation of the notebook to convert.\n from_version : int\n The version of the notebook to convert from.\n """\n if from_version == 1:\n newnb = new_notebook()\n ws = new_worksheet()\n for cell in nb.cells:\n if cell.cell_type == "code":\n newcell = new_code_cell(\n input=cell.get("code"), prompt_number=cell.get("prompt_number")\n )\n elif cell.cell_type == "text":\n newcell = new_text_cell("markdown", source=cell.get("text"))\n ws.cells.append(newcell)\n newnb.worksheets.append(ws)\n return newnb\n\n raise ValueError("Cannot convert a notebook from v%s to v2" % from_version)\n\n\ndef downgrade(nb):\n """Convert a v2 notebook to v1.\n\n Parameters\n ----------\n nb : NotebookNode\n The Python representation of the notebook to convert.\n """\n msg = "Downgrade from notebook v2 to v1 is not supported."\n raise Exception(msg)\n | .venv\Lib\site-packages\nbformat\v2\convert.py | convert.py | Python | 2,059 | 0.95 | 0.09375 | 0.27451 | vue-tools | 743 | 2025-01-22T05:38:21.162584 | BSD-3-Clause | false | ac6d4e11cabeda12ed2dc9cbf469cac1 |
"""The basic dict based notebook format.\n\nThe Python representation of a notebook is a nested structure of\ndictionary subclasses that support attribute access.\nThe functions in this module are merely\nhelpers to build the structs in the right form.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nfrom nbformat._struct import Struct\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\nclass NotebookNode(Struct):\n """A notebook node object."""\n\n\ndef from_dict(d):\n """Create notebook node(s) from a value."""\n if isinstance(d, dict):\n newd = NotebookNode()\n for k, v in d.items():\n newd[k] = from_dict(v)\n return newd\n if isinstance(d, (tuple, list)):\n return [from_dict(i) for i in d]\n return d\n\n\ndef new_output(\n output_type=None,\n output_text=None,\n output_png=None,\n output_html=None,\n output_svg=None,\n output_latex=None,\n output_json=None,\n output_javascript=None,\n output_jpeg=None,\n prompt_number=None,\n etype=None,\n evalue=None,\n traceback=None,\n):\n """Create a new code cell with input and output"""\n output = NotebookNode()\n if output_type is not None:\n output.output_type = str(output_type)\n\n if output_type != "pyerr":\n if output_text is not None:\n output.text = str(output_text)\n if output_png is not None:\n output.png = bytes(output_png)\n if output_jpeg is not None:\n output.jpeg = bytes(output_jpeg)\n if output_html is not None:\n output.html = str(output_html)\n if output_svg is not None:\n output.svg = str(output_svg)\n if output_latex is not None:\n output.latex = str(output_latex)\n if output_json is not None:\n output.json = str(output_json)\n if output_javascript is not None:\n output.javascript = str(output_javascript)\n\n if output_type == "pyout" and prompt_number is not None:\n output.prompt_number = int(prompt_number)\n\n if output_type == "pyerr":\n if etype is not None:\n output.etype = str(etype)\n if evalue is not None:\n output.evalue = str(evalue)\n if traceback is not None:\n output.traceback = [str(frame) for frame in list(traceback)]\n\n return output\n\n\ndef new_code_cell(\n input=None,\n prompt_number=None,\n outputs=None,\n language="python",\n collapsed=False,\n):\n """Create a new code cell with input and output"""\n cell = NotebookNode()\n cell.cell_type = "code"\n if language is not None:\n cell.language = str(language)\n if input is not None:\n cell.input = str(input)\n if prompt_number is not None:\n cell.prompt_number = int(prompt_number)\n if outputs is None:\n cell.outputs = []\n else:\n cell.outputs = outputs\n if collapsed is not None:\n cell.collapsed = bool(collapsed)\n\n return cell\n\n\ndef new_text_cell(cell_type, source=None, rendered=None):\n """Create a new text cell."""\n cell = NotebookNode()\n if source is not None:\n cell.source = str(source)\n if rendered is not None:\n cell.rendered = str(rendered)\n cell.cell_type = cell_type\n return cell\n\n\ndef new_worksheet(name=None, cells=None):\n """Create a worksheet by name with with a list of cells."""\n ws = NotebookNode()\n if name is not None:\n ws.name = str(name)\n if cells is None:\n ws.cells = []\n else:\n ws.cells = list(cells)\n return ws\n\n\ndef new_notebook(metadata=None, worksheets=None):\n """Create a notebook by name, id and a list of worksheets."""\n nb = NotebookNode()\n nb.nbformat = 2\n if worksheets is None:\n nb.worksheets = []\n else:\n nb.worksheets = list(worksheets)\n if metadata is None:\n nb.metadata = new_metadata()\n else:\n nb.metadata = NotebookNode(metadata)\n return nb\n\n\ndef new_metadata(\n name=None,\n authors=None,\n license=None,\n created=None,\n modified=None,\n gistid=None,\n):\n """Create a new metadata node."""\n metadata = NotebookNode()\n if name is not None:\n metadata.name = str(name)\n if authors is not None:\n metadata.authors = list(authors)\n if created is not None:\n metadata.created = str(created)\n if modified is not None:\n metadata.modified = str(modified)\n if license is not None:\n metadata.license = str(license)\n if gistid is not None:\n metadata.gistid = str(gistid)\n return metadata\n\n\ndef new_author(name=None, email=None, affiliation=None, url=None):\n """Create a new author."""\n author = NotebookNode()\n if name is not None:\n author.name = str(name)\n if email is not None:\n author.email = str(email)\n if affiliation is not None:\n author.affiliation = str(affiliation)\n if url is not None:\n author.url = str(url)\n return author\n | .venv\Lib\site-packages\nbformat\v2\nbbase.py | nbbase.py | Python | 5,574 | 0.95 | 0.25 | 0.076471 | react-lib | 657 | 2024-08-19T10:00:10.712760 | BSD-3-Clause | false | 9d9251efefeac045e2088a883fff8092 |
"""Read and write notebooks in JSON format.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nimport copy\nimport json\n\nfrom .nbbase import from_dict\nfrom .rwbase import NotebookReader, NotebookWriter, rejoin_lines, restore_bytes, split_lines\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\nclass BytesEncoder(json.JSONEncoder):\n """A JSON encoder that accepts b64 (and other *ascii*) bytestrings."""\n\n def default(self, obj):\n """The default value of an object."""\n if isinstance(obj, bytes):\n return obj.decode("ascii")\n return json.JSONEncoder.default(self, obj)\n\n\nclass JSONReader(NotebookReader):\n """A JSON notebook reader."""\n\n def reads(self, s, **kwargs):\n """Convert a string to a notebook."""\n nb = json.loads(s, **kwargs)\n nb = self.to_notebook(nb, **kwargs)\n return nb # noqa: RET504\n\n def to_notebook(self, d, **kwargs):\n """Convert a string to a notebook."""\n return restore_bytes(rejoin_lines(from_dict(d)))\n\n\nclass JSONWriter(NotebookWriter):\n """A JSON notebook writer."""\n\n def writes(self, nb, **kwargs):\n """Convert a notebook object to a string."""\n kwargs["cls"] = BytesEncoder\n kwargs["indent"] = 1\n kwargs["sort_keys"] = True\n if kwargs.pop("split_lines", True):\n nb = split_lines(copy.deepcopy(nb))\n return json.dumps(nb, **kwargs)\n\n\n_reader = JSONReader()\n_writer = JSONWriter()\n\nreads = _reader.reads\nread = _reader.read\nto_notebook = _reader.to_notebook\nwrite = _writer.write\nwrites = _writer.writes\n | .venv\Lib\site-packages\nbformat\v2\nbjson.py | nbjson.py | Python | 2,241 | 0.95 | 0.12 | 0.236364 | react-lib | 355 | 2024-04-01T10:46:09.882858 | GPL-3.0 | false | 393bff691208b5978b03f32127cabcb4 |
"""Read and write notebooks as regular .py files.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nimport re\n\nfrom .nbbase import new_code_cell, new_notebook, new_text_cell, new_worksheet\nfrom .rwbase import NotebookReader, NotebookWriter\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n_encoding_declaration_re = re.compile(r"^#.*coding[:=]\s*([-\w.]+)")\n\n\nclass PyReaderError(Exception):\n """An error raised by the PyReader."""\n\n\nclass PyReader(NotebookReader):\n """A Python notebook reader."""\n\n def reads(self, s, **kwargs):\n """Convert a string to a notebook."""\n return self.to_notebook(s, **kwargs)\n\n def to_notebook(self, s, **kwargs):\n """Convert a string to a notebook."""\n lines = s.splitlines()\n cells = []\n cell_lines: list[str] = []\n state = "codecell"\n for line in lines:\n if line.startswith("# <nbformat>") or _encoding_declaration_re.match(line):\n pass\n elif line.startswith("# <codecell>"):\n cell = self.new_cell(state, cell_lines)\n if cell is not None:\n cells.append(cell)\n state = "codecell"\n cell_lines = []\n elif line.startswith("# <htmlcell>"):\n cell = self.new_cell(state, cell_lines)\n if cell is not None:\n cells.append(cell)\n state = "htmlcell"\n cell_lines = []\n elif line.startswith("# <markdowncell>"):\n cell = self.new_cell(state, cell_lines)\n if cell is not None:\n cells.append(cell)\n state = "markdowncell"\n cell_lines = []\n else:\n cell_lines.append(line)\n if cell_lines and state == "codecell":\n cell = self.new_cell(state, cell_lines)\n if cell is not None:\n cells.append(cell)\n ws = new_worksheet(cells=cells)\n return new_notebook(worksheets=[ws])\n\n def new_cell(self, state, lines):\n """Create a new cell."""\n if state == "codecell":\n input_ = "\n".join(lines)\n input_ = input_.strip("\n")\n if input_:\n return new_code_cell(input=input_)\n elif state == "htmlcell":\n text = self._remove_comments(lines)\n if text:\n return new_text_cell("html", source=text)\n elif state == "markdowncell":\n text = self._remove_comments(lines)\n if text:\n return new_text_cell("markdown", source=text)\n\n def _remove_comments(self, lines):\n new_lines = []\n for line in lines:\n if line.startswith("#"):\n new_lines.append(line[2:])\n else:\n new_lines.append(line)\n text = "\n".join(new_lines)\n text = text.strip("\n")\n return text # noqa: RET504\n\n def split_lines_into_blocks(self, lines):\n """Split lines into code blocks."""\n if len(lines) == 1:\n yield lines[0]\n raise StopIteration()\n import ast\n\n source = "\n".join(lines)\n code = ast.parse(source)\n starts = [x.lineno - 1 for x in code.body]\n for i in range(len(starts) - 1):\n yield "\n".join(lines[starts[i] : starts[i + 1]]).strip("\n")\n yield "\n".join(lines[starts[-1] :]).strip("\n")\n\n\nclass PyWriter(NotebookWriter):\n """A Python notebook writer."""\n\n def writes(self, nb, **kwargs):\n """Convert a notebook object to a string."""\n lines = ["# -*- coding: utf-8 -*-"]\n lines.extend(["# <nbformat>2</nbformat>", ""])\n for ws in nb.worksheets:\n for cell in ws.cells:\n if cell.cell_type == "code":\n input_ = cell.get("input")\n if input_ is not None:\n lines.extend(["# <codecell>", ""])\n lines.extend(input_.splitlines())\n lines.append("")\n elif cell.cell_type == "html":\n input_ = cell.get("source")\n if input_ is not None:\n lines.extend(["# <htmlcell>", ""])\n lines.extend(["# " + line for line in input_.splitlines()])\n lines.append("")\n elif cell.cell_type == "markdown":\n input_ = cell.get("source")\n if input_ is not None:\n lines.extend(["# <markdowncell>", ""])\n lines.extend(["# " + line for line in input_.splitlines()])\n lines.append("")\n lines.append("")\n return str("\n".join(lines))\n\n\n_reader = PyReader()\n_writer = PyWriter()\n\nreads = _reader.reads\nread = _reader.read\nto_notebook = _reader.to_notebook\nwrite = _writer.write\nwrites = _writer.writes\n | .venv\Lib\site-packages\nbformat\v2\nbpy.py | nbpy.py | Python | 5,600 | 0.95 | 0.207547 | 0.096296 | react-lib | 481 | 2024-09-08T10:12:05.385549 | GPL-3.0 | false | 7440039fc5666a3bddd1144ee51788ce |
"""REMOVED: Read and write notebook files as XML."""\n\nfrom __future__ import annotations\n\nREMOVED_MSG = """\\nReading notebooks as XML has been removed to harden security and avoid\npossible denial-of-service attacks.\n\nThe XML notebook format was deprecated before the Jupyter (previously IPython)\nNotebook was ever released. We are not aware of anyone using it, so we have\nremoved it.\n\nIf you were using this code, and you need to continue using it, feel free to\nfork an earlier version of the nbformat package and maintain it yourself.\nThe issue which prompted this removal is:\n\nhttps://github.com/jupyter/nbformat/issues/132\n"""\n\n\ndef reads(s, **kwargs):\n """REMOVED"""\n raise Exception(REMOVED_MSG)\n\n\ndef read(fp, **kwargs):\n """REMOVED"""\n raise Exception(REMOVED_MSG)\n\n\ndef to_notebook(root, **kwargs):\n """REMOVED"""\n raise Exception(REMOVED_MSG)\n | .venv\Lib\site-packages\nbformat\v2\nbxml.py | nbxml.py | Python | 870 | 0.95 | 0.090909 | 0 | awesome-app | 99 | 2024-12-12T21:37:36.105820 | Apache-2.0 | false | 3757ac38ab7905dfdb258ecd9e91a97e |
"""Base classes and utilities for readers and writers.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nfrom base64 import decodebytes, encodebytes\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\ndef restore_bytes(nb):\n """Restore bytes of image data from unicode-only formats.\n\n Base64 encoding is handled elsewhere. Bytes objects in the notebook are\n always b64-encoded. We DO NOT encode/decode around file formats.\n """\n for ws in nb.worksheets:\n for cell in ws.cells:\n if cell.cell_type == "code":\n for output in cell.outputs:\n if "png" in output:\n output.png = output.png.encode("ascii")\n if "jpeg" in output:\n output.jpeg = output.jpeg.encode("ascii")\n return nb\n\n\n# output keys that are likely to have multiline values\n_multiline_outputs = ["text", "html", "svg", "latex", "javascript", "json"]\n\n\ndef rejoin_lines(nb):\n """rejoin multiline text into strings\n\n For reversing effects of ``split_lines(nb)``.\n\n This only rejoins lines that have been split, so if text objects were not split\n they will pass through unchanged.\n\n Used when reading JSON files that may have been passed through split_lines.\n """\n for ws in nb.worksheets:\n for cell in ws.cells:\n if cell.cell_type == "code":\n if "input" in cell and isinstance(cell.input, list):\n cell.input = "\n".join(cell.input)\n for output in cell.outputs:\n for key in _multiline_outputs:\n item = output.get(key, None)\n if isinstance(item, list):\n output[key] = "\n".join(item)\n else: # text cell\n for key in ["source", "rendered"]:\n item = cell.get(key, None)\n if isinstance(item, list):\n cell[key] = "\n".join(item)\n return nb\n\n\ndef split_lines(nb):\n """split likely multiline text into lists of strings\n\n For file output more friendly to line-based VCS. ``rejoin_lines(nb)`` will\n reverse the effects of ``split_lines(nb)``.\n\n Used when writing JSON files.\n """\n for ws in nb.worksheets:\n for cell in ws.cells:\n if cell.cell_type == "code":\n if "input" in cell and isinstance(cell.input, str):\n cell.input = cell.input.splitlines()\n for output in cell.outputs:\n for key in _multiline_outputs:\n item = output.get(key, None)\n if isinstance(item, str):\n output[key] = item.splitlines()\n else: # text cell\n for key in ["source", "rendered"]:\n item = cell.get(key, None)\n if isinstance(item, str):\n cell[key] = item.splitlines()\n return nb\n\n\n# b64 encode/decode are never actually used, because all bytes objects in\n# the notebook are already b64-encoded, and we don't need/want to double-encode\n\n\ndef base64_decode(nb):\n """Restore all bytes objects in the notebook from base64-encoded strings.\n\n Note: This is never used\n """\n for ws in nb.worksheets:\n for cell in ws.cells:\n if cell.cell_type == "code":\n for output in cell.outputs:\n if "png" in output:\n if isinstance(output.png, str):\n output.png = output.png.encode("ascii")\n output.png = decodebytes(output.png)\n if "jpeg" in output:\n if isinstance(output.jpeg, str):\n output.jpeg = output.jpeg.encode("ascii")\n output.jpeg = decodebytes(output.jpeg)\n return nb\n\n\ndef base64_encode(nb):\n """Base64 encode all bytes objects in the notebook.\n\n These will be b64-encoded unicode strings\n\n Note: This is never used\n """\n for ws in nb.worksheets:\n for cell in ws.cells:\n if cell.cell_type == "code":\n for output in cell.outputs:\n if "png" in output:\n output.png = encodebytes(output.png).decode("ascii")\n if "jpeg" in output:\n output.jpeg = encodebytes(output.jpeg).decode("ascii")\n return nb\n\n\nclass NotebookReader:\n """A class for reading notebooks."""\n\n def reads(self, s, **kwargs):\n """Read a notebook from a string."""\n msg = "loads must be implemented in a subclass"\n raise NotImplementedError(msg)\n\n def read(self, fp, **kwargs):\n """Read a notebook from a file like object"""\n return self.read(fp.read(), **kwargs)\n\n\nclass NotebookWriter:\n """A class for writing notebooks."""\n\n def writes(self, nb, **kwargs):\n """Write a notebook to a string."""\n msg = "loads must be implemented in a subclass"\n raise NotImplementedError(msg)\n\n def write(self, nb, fp, **kwargs):\n """Write a notebook to a file like object"""\n return fp.write(self.writes(nb, **kwargs))\n | .venv\Lib\site-packages\nbformat\v2\rwbase.py | rwbase.py | Python | 5,864 | 0.95 | 0.329341 | 0.123077 | node-utils | 950 | 2025-05-16T16:38:42.295087 | Apache-2.0 | false | daadd87b3c8f93b18321c0331959531a |
"""The main API for the v2 notebook format.\n\nAuthors:\n\n* Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file LICENSE, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom __future__ import annotations\n\nimport os\n\nfrom .convert import downgrade, upgrade\nfrom .nbbase import (\n NotebookNode,\n new_author,\n new_code_cell,\n new_metadata,\n new_notebook,\n new_output,\n new_text_cell,\n new_worksheet,\n)\nfrom .nbjson import reads as read_json\nfrom .nbjson import reads as reads_json\nfrom .nbjson import to_notebook as to_notebook_json\nfrom .nbjson import writes as write_json\nfrom .nbjson import writes as writes_json\nfrom .nbpy import reads as read_py\nfrom .nbpy import reads as reads_py\nfrom .nbpy import to_notebook as to_notebook_py\nfrom .nbpy import writes as write_py\nfrom .nbpy import writes as writes_py\n\n# Implementation removed, vulnerable to DoS attacks\nfrom .nbxml import reads as read_xml\nfrom .nbxml import reads as reads_xml\nfrom .nbxml import to_notebook as to_notebook_xml\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\nnbformat = 2\nnbformat_minor = 0\n\n\ndef parse_filename(fname):\n """Parse a notebook filename.\n\n This function takes a notebook filename and returns the notebook\n format (json/py) and the notebook name. This logic can be\n summarized as follows:\n\n * notebook.ipynb -> (notebook.ipynb, notebook, json)\n * notebook.json -> (notebook.json, notebook, json)\n * notebook.py -> (notebook.py, notebook, py)\n * notebook -> (notebook.ipynb, notebook, json)\n\n Parameters\n ----------\n fname : unicode\n The notebook filename. The filename can use a specific filename\n extension (.ipynb, .json, .py) or none, in which case .ipynb will\n be assumed.\n\n Returns\n -------\n (fname, name, format) : (unicode, unicode, unicode)\n The filename, notebook name and format.\n """\n basename, ext = os.path.splitext(fname) # noqa: PTH122\n if ext in [".ipynb", ".json"]:\n format_ = "json"\n elif ext == ".py":\n format_ = "py"\n else:\n basename = fname\n fname = fname + ".ipynb"\n format_ = "json"\n return fname, basename, format_\n | .venv\Lib\site-packages\nbformat\v2\__init__.py | __init__.py | Python | 2,735 | 0.95 | 0.044444 | 0.24 | python-kit | 498 | 2024-12-17T04:23:42.206656 | GPL-3.0 | false | 2e44a1170449c3b316078f5388631fc4 |
\n\n | .venv\Lib\site-packages\nbformat\v2\__pycache__\convert.cpython-313.pyc | convert.cpython-313.pyc | Other | 1,969 | 0.8 | 0.027027 | 0.0625 | awesome-app | 357 | 2024-02-17T18:28:50.243620 | Apache-2.0 | false | dd684aa69b07b303719f48c18bb082e7 |
\n\n | .venv\Lib\site-packages\nbformat\v2\__pycache__\nbbase.cpython-313.pyc | nbbase.cpython-313.pyc | Other | 5,782 | 0.95 | 0 | 0.013889 | node-utils | 815 | 2024-03-07T06:53:07.790774 | BSD-3-Clause | false | fb6e0c5c41fbe1c34984931fe027ce99 |
\n\n | .venv\Lib\site-packages\nbformat\v2\__pycache__\nbjson.cpython-313.pyc | nbjson.cpython-313.pyc | Other | 3,035 | 0.8 | 0 | 0.030303 | awesome-app | 991 | 2024-09-20T03:13:45.157432 | Apache-2.0 | false | 4d92ca770ea926c1325aaf6c62c43722 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.