diff --git a/.gitattributes b/.gitattributes index 60b99e64d1ecaa4663687e6de6dee78f5cf11b7a..4b41c383dee7e4ebfaa4cbf088cf0fe997e139a3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -331,3 +331,4 @@ parrot/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merg llava_next/lib/python3.10/site-packages/pandas/tests/indexing/__pycache__/test_loc.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tf_objects.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/lib/python3.10/site-packages/pyparsing/__init__.py b/llava_next/lib/python3.10/site-packages/pyparsing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..543ceb62bd531440ab3697d17076d219efe910a6 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/pyparsing/__init__.py @@ -0,0 +1,326 @@ +# module pyparsing.py +# +# Copyright (c) 2003-2022 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = """ +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. With pyparsing, you don't need to learn +a new syntax for defining grammars or matching expressions - the parsing +module provides a library of classes that you use to construct the +grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +``", !"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :meth:`'+'` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions):: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of :class:`'+'`, +:class:`'|'`, :class:`'^'` and :class:`'&'` operators. + +The :class:`ParseResults` object returned from +:class:`ParserElement.parse_string` can be +accessed as a nested list, a dictionary, or an object with named +attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'`, :class:`'|'`, :class:`'^'`, + and :class:`'&'` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.set_results_name` + - access the parsed data, which is returned as a :class:`ParseResults` + object + - find some helpful expression short-cuts like :class:`DelimitedList` + and :class:`one_of` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class +""" +from typing import NamedTuple + + +class version_info(NamedTuple): + major: int + minor: int + micro: int + releaselevel: str + serial: int + + @property + def __version__(self): + return ( + f"{self.major}.{self.minor}.{self.micro}" + + ( + f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}", + "", + )[self.releaselevel == "final"] + ) + + def __str__(self): + return f"{__name__} {self.__version__} / {__version_time__}" + + def __repr__(self): + return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})" + + +__version_info__ = version_info(3, 2, 0, "final", 1) +__version_time__ = "13 Oct 2024 09:46 UTC" +__version__ = __version_info__.__version__ +__versionTime__ = __version_time__ +__author__ = "Paul McGuire " + +from .util import * +from .exceptions import * +from .actions import * +from .core import __diag__, __compat__ +from .results import * +from .core import * +from .core import _builtin_exprs as core_builtin_exprs +from .helpers import * +from .helpers import _builtin_exprs as helper_builtin_exprs + +from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode +from .testing import pyparsing_test as testing +from .common import ( + pyparsing_common as common, + _builtin_exprs as common_builtin_exprs, +) + +# Compatibility synonyms +if "pyparsing_unicode" not in globals(): + pyparsing_unicode = unicode # type: ignore[misc] +if "pyparsing_common" not in globals(): + pyparsing_common = common +if "pyparsing_test" not in globals(): + pyparsing_test = testing + +core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs + + +__all__ = [ + "__version__", + "__version_time__", + "__author__", + "__compat__", + "__diag__", + "And", + "AtLineStart", + "AtStringStart", + "CaselessKeyword", + "CaselessLiteral", + "CharsNotIn", + "CloseMatch", + "Combine", + "DelimitedList", + "Dict", + "Each", + "Empty", + "FollowedBy", + "Forward", + "GoToColumn", + "Group", + "IndentedBlock", + "Keyword", + "LineEnd", + "LineStart", + "Literal", + "Located", + "PrecededBy", + "MatchFirst", + "NoMatch", + "NotAny", + "OneOrMore", + "OnlyOnce", + "OpAssoc", + "Opt", + "Optional", + "Or", + "ParseBaseException", + "ParseElementEnhance", + "ParseException", + "ParseExpression", + "ParseFatalException", + "ParseResults", + "ParseSyntaxException", + "ParserElement", + "PositionToken", + "QuotedString", + "RecursiveGrammarException", + "Regex", + "SkipTo", + "StringEnd", + "StringStart", + "Suppress", + "Tag", + "Token", + "TokenConverter", + "White", + "Word", + "WordEnd", + "WordStart", + "ZeroOrMore", + "Char", + "alphanums", + "alphas", + "alphas8bit", + "any_close_tag", + "any_open_tag", + "autoname_elements", + "c_style_comment", + "col", + "common_html_entity", + "condition_as_parse_action", + "counted_array", + "cpp_style_comment", + "dbl_quoted_string", + "dbl_slash_comment", + "delimited_list", + "dict_of", + "empty", + "hexnums", + "html_comment", + "identchars", + "identbodychars", + "infix_notation", + "java_style_comment", + "line", + "line_end", + "line_start", + "lineno", + "make_html_tags", + "make_xml_tags", + "match_only_at_col", + "match_previous_expr", + "match_previous_literal", + "nested_expr", + "null_debug_action", + "nums", + "one_of", + "original_text_for", + "printables", + "punc8bit", + "pyparsing_common", + "pyparsing_test", + "pyparsing_unicode", + "python_style_comment", + "quoted_string", + "remove_quotes", + "replace_with", + "replace_html_entity", + "rest_of_line", + "sgl_quoted_string", + "srange", + "string_end", + "string_start", + "token_map", + "trace_parse_action", + "ungroup", + "unicode_set", + "unicode_string", + "with_attribute", + "with_class", + # pre-PEP8 compatibility names + "__versionTime__", + "anyCloseTag", + "anyOpenTag", + "cStyleComment", + "commonHTMLEntity", + "conditionAsParseAction", + "countedArray", + "cppStyleComment", + "dblQuotedString", + "dblSlashComment", + "delimitedList", + "dictOf", + "htmlComment", + "indentedBlock", + "infixNotation", + "javaStyleComment", + "lineEnd", + "lineStart", + "locatedExpr", + "makeHTMLTags", + "makeXMLTags", + "matchOnlyAtCol", + "matchPreviousExpr", + "matchPreviousLiteral", + "nestedExpr", + "nullDebugAction", + "oneOf", + "opAssoc", + "originalTextFor", + "pythonStyleComment", + "quotedString", + "removeQuotes", + "replaceHTMLEntity", + "replaceWith", + "restOfLine", + "sglQuotedString", + "stringEnd", + "stringStart", + "tokenMap", + "traceParseAction", + "unicodeString", + "withAttribute", + "withClass", + "common", + "unicode", + "testing", +] diff --git a/llava_next/lib/python3.10/site-packages/pyparsing/actions.py b/llava_next/lib/python3.10/site-packages/pyparsing/actions.py new file mode 100644 index 0000000000000000000000000000000000000000..1d2dce99e199f68c8a34c31b468d9aa3e1e3c023 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/pyparsing/actions.py @@ -0,0 +1,206 @@ +# actions.py + +from .exceptions import ParseException +from .util import col, replaced_by_pep8 + + +class OnlyOnce: + """ + Wrapper for parse actions, to ensure they are only called once. + """ + + def __init__(self, method_call): + from .core import _trim_arity + + self.callable = _trim_arity(method_call) + self.called = False + + def __call__(self, s, l, t): + if not self.called: + results = self.callable(s, l, t) + self.called = True + return results + raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") + + def reset(self): + """ + Allow the associated parse action to be called once more. + """ + + self.called = False + + +def match_only_at_col(n): + """ + Helper method for defining parse actions that require matching at + a specific column in the input text. + """ + + def verify_col(strg, locn, toks): + if col(locn, strg) != n: + raise ParseException(strg, locn, f"matched token not at column {n}") + + return verify_col + + +def replace_with(repl_str): + """ + Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :class:`transform_string` (). + + Example:: + + num = Word(nums).set_parse_action(lambda toks: int(toks[0])) + na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) + term = na | num + + term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s, l, t: [repl_str] + + +def remove_quotes(s, l, t): + """ + Helper parse action for removing quotation marks from parsed + quoted strings. + + Example:: + + # by default, quotation marks are included in parsed results + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use remove_quotes to strip quotation marks from parsed results + quoted_string.set_parse_action(remove_quotes) + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + + +def with_attribute(*args, **attr_dict): + """ + Helper to create a validating parse action to be used with start + tags created with :class:`make_xml_tags` or + :class:`make_html_tags`. Use ``with_attribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ```` or ``
``. + + Call ``with_attribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`with_class`. + + To verify that the attribute exists, but without specifying a value, + pass ``with_attribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ ''' + div,div_end = make_html_tags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().set_parse_action(with_attribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attr_dict.items() + attrs = [(k, v) for k, v in attrs] + + def pa(s, l, tokens): + for attrName, attrValue in attrs: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException( + s, + l, + f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}", + ) + + return pa + + +with_attribute.ANY_VALUE = object() # type: ignore [attr-defined] + + +def with_class(classname, namespace=""): + """ + Simplified version of :class:`with_attribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = make_html_tags("div") + div_grid = div().set_parse_action(with_class("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = f"{namespace}:class" if namespace else "class" + return with_attribute(**{classattr: classname}) + + +# Compatibility synonyms +# fmt: off +replaceWith = replaced_by_pep8("replaceWith", replace_with) +removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes) +withAttribute = replaced_by_pep8("withAttribute", with_attribute) +withClass = replaced_by_pep8("withClass", with_class) +matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col) +# fmt: on diff --git a/llava_next/lib/python3.10/site-packages/pyparsing/exceptions.py b/llava_next/lib/python3.10/site-packages/pyparsing/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..57a1579d121e46e407efe73f5cc827d29eb352e8 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/pyparsing/exceptions.py @@ -0,0 +1,314 @@ +# exceptions.py +from __future__ import annotations + +import copy +import re +import sys +import typing +from functools import cached_property + +from .unicode import pyparsing_unicode as ppu +from .util import ( + _collapse_string_to_ranges, + col, + line, + lineno, + replaced_by_pep8, +) + + +class _ExceptionWordUnicodeSet( + ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic +): + pass + + +_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums) +_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") + + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + + loc: int + msg: str + pstr: str + parser_element: typing.Any # "ParserElement" + args: tuple[str, int, typing.Optional[str]] + + __slots__ = ( + "loc", + "msg", + "pstr", + "parser_element", + "args", + ) + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + pstr: str, + loc: int = 0, + msg: typing.Optional[str] = None, + elem=None, + ): + if msg is None: + msg, pstr = pstr, "" + + self.loc = loc + self.msg = msg + self.pstr = pstr + self.parser_element = elem + self.args = (pstr, loc, msg) + + @staticmethod + def explain_exception(exc: Exception, depth: int = 16) -> str: + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + """ + import inspect + from .core import ParserElement + + if depth is None: + depth = sys.getrecursionlimit() + ret: list[str] = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(f"{' ' * (exc.column - 1)}^") + ret.append(f"{type(exc).__name__}: {exc}") + + if depth <= 0 or exc.__traceback__ is None: + return "\n".join(ret) + + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen: set[int] = set() + for ff in callers[-depth:]: + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}") + + elif f_self is not None: + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__}") + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue + + ret.append(code.co_name) + + depth -= 1 + if not depth: + break + + return "\n".join(ret) + + @classmethod + def _from_exception(cls, pe) -> ParseBaseException: + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element) + + @cached_property + def line(self) -> str: + """ + Return the line of text where the exception occurred. + """ + return line(self.loc, self.pstr) + + @cached_property + def lineno(self) -> int: + """ + Return the 1-based line number of text where the exception occurred. + """ + return lineno(self.loc, self.pstr) + + @cached_property + def col(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @cached_property + def column(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @cached_property + def found(self) -> str: + if not self.pstr: + return "" + + if self.loc >= len(self.pstr): + return "end of text" + + # pull out next word at error location + found_match = _exception_word_extractor.match(self.pstr, self.loc) + if found_match is not None: + found_text = found_match.group(0) + else: + found_text = self.pstr[self.loc : self.loc + 1] + + return repr(found_text).replace(r"\\", "\\") + + # pre-PEP8 compatibility + @property + def parserElement(self): + return self.parser_element + + @parserElement.setter + def parserElement(self, elem): + self.parser_element = elem + + def copy(self): + return copy.copy(self) + + def formatted_message(self) -> str: + found_phrase = f", found {self.found}" if self.found else "" + return f"{self.msg}{found_phrase} (at char {self.loc}), (line:{self.lineno}, col:{self.column})" + + def __str__(self) -> str: + return self.formatted_message() + + def __repr__(self): + return str(self) + + def mark_input_line( + self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<" + ) -> str: + """ + Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + markerString = marker_string if marker_string is not None else markerString + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = f"{line_str[:line_column]}{markerString}{line_str[line_column:]}" + return line_str.strip() + + def explain(self, depth: int = 16) -> str: + """ + Method to translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Example:: + + # an expression to parse 3 integers + expr = pp.Word(pp.nums) * 3 + try: + # a failing parse - the third integer is prefixed with "A" + expr.parse_string("123 456 A789") + except pp.ParseException as pe: + print(pe.explain(depth=0)) + + prints:: + + 123 456 A789 + ^ + ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `set_name` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + Note: pyparsing's default truncation of exception tracebacks may also truncate the + stack of expressions that are displayed in the ``explain`` output. To get the full listing + of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` + """ + return self.explain_exception(self, depth) + + # Compatibility synonyms + # fmt: off + markInputline = replaced_by_pep8("markInputline", mark_input_line) + # fmt: on + + +class ParseException(ParseBaseException): + """ + Exception thrown when a parse expression doesn't match the input string + + Example:: + + integer = Word(nums).set_name("integer") + try: + integer.parse_string("ABC") + except ParseException as pe: + print(pe) + print(f"column: {pe.column}") + + prints:: + + Expected integer (at char 0), (line:1, col:1) column: 1 + + """ + + +class ParseFatalException(ParseBaseException): + """ + User-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately + """ + + +class ParseSyntaxException(ParseFatalException): + """ + Just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + + +class RecursiveGrammarException(Exception): + """ + Exception thrown by :class:`ParserElement.validate` if the + grammar could be left-recursive; parser may need to enable + left recursion using :class:`ParserElement.enable_left_recursion` + + Deprecated: only used by deprecated method ParserElement.validate. + """ + + def __init__(self, parseElementList): + self.parseElementTrace = parseElementList + + def __str__(self) -> str: + return f"RecursiveGrammarException: {self.parseElementTrace}" diff --git a/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a8d62303e50b5af6f6b65ac7fa97a1f6539beaa Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8763cf952f8c9cad042ace8864dec8edb9474c3 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc67d29782d45433d9ad10deb3a72aca41bf7a8e Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/sniffio/_tests/__init__.py b/llava_next/lib/python3.10/site-packages/sniffio/_tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f88513a3fc90893c1c9cb3b53881082e68fcb24 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e76af36a272df3da8c7680756bb53b6a096720e5 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py b/llava_next/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py new file mode 100644 index 0000000000000000000000000000000000000000..02945a947527a9bfb396d0cd35ffee22eda665bb --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py @@ -0,0 +1,84 @@ +import os +import sys + +import pytest + +from .. import ( + current_async_library, AsyncLibraryNotFoundError, + current_async_library_cvar, thread_local +) + + +def test_basics_cvar(): + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + token = current_async_library_cvar.set("generic-lib") + try: + assert current_async_library() == "generic-lib" + finally: + current_async_library_cvar.reset(token) + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + +def test_basics_tlocal(): + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + old_name, thread_local.name = thread_local.name, "generic-lib" + try: + assert current_async_library() == "generic-lib" + finally: + thread_local.name = old_name + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + +def test_asyncio(): + import asyncio + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + ran = [] + + async def this_is_asyncio(): + assert current_async_library() == "asyncio" + # Call it a second time to exercise the caching logic + assert current_async_library() == "asyncio" + ran.append(True) + + asyncio.run(this_is_asyncio()) + assert ran == [True] + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + +@pytest.mark.skipif( + sys.version_info >= (3, 12), + reason= + "curio broken on 3.12 (https://github.com/python-trio/sniffio/pull/42)", +) +def test_curio(): + import curio + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() + + ran = [] + + async def this_is_curio(): + assert current_async_library() == "curio" + # Call it a second time to exercise the caching logic + assert current_async_library() == "curio" + ran.append(True) + + curio.run(this_is_curio) + assert ran == [True] + + with pytest.raises(AsyncLibraryNotFoundError): + current_async_library() diff --git a/llava_next/lib/python3.10/site-packages/sniffio/_version.py b/llava_next/lib/python3.10/site-packages/sniffio/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..0495d10545c9fd515ed51e890309d2b66e2c30bb --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/sniffio/_version.py @@ -0,0 +1,3 @@ +# This file is imported from __init__.py and exec'd from setup.py + +__version__ = "1.3.1" diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__init__.py b/parrot/lib/python3.10/site-packages/docstring_parser/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d80ca29fba2334cb3978686f55037900aeeb19a3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/__init__.py @@ -0,0 +1,34 @@ +"""Parse docstrings as per Sphinx notation.""" + +from .common import ( + Docstring, + DocstringDeprecated, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) +from .parser import compose, parse, parse_from_object +from .util import combine_docstrings + +Style = DocstringStyle # backwards compatibility + +__all__ = [ + "parse", + "parse_from_object", + "combine_docstrings", + "compose", + "ParseError", + "Docstring", + "DocstringMeta", + "DocstringParam", + "DocstringRaises", + "DocstringReturns", + "DocstringDeprecated", + "DocstringStyle", + "RenderingStyle", + "Style", +] diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eac4031adee3816fee6eec003be570ae6410eb9e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/common.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f22cb5c216d2d8299f8747508b940943ce3a0eca Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/common.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/epydoc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/epydoc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa2395da96751d140366bd57432f43fc41c4041b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/epydoc.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/google.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/google.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e302f642f9feef17d9c7491b09f0db13031ad30 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/google.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/numpydoc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/numpydoc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a281b3c163101ac5adf99595c886cae4fbb9ccce Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/numpydoc.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/parser.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..214005bb332bdb3472e21ff9dfe9cbb163b2f79f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/parser.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/rest.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/rest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea010cb09c2309ba63170da9ccb2797ee9154742 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/__pycache__/rest.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/attrdoc.py b/parrot/lib/python3.10/site-packages/docstring_parser/attrdoc.py new file mode 100644 index 0000000000000000000000000000000000000000..ac04cb1099f0aba4fd206a1de111077af5d20744 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/attrdoc.py @@ -0,0 +1,138 @@ +"""Attribute docstrings parsing. + +.. seealso:: https://peps.python.org/pep-0257/#what-is-a-docstring +""" + +import ast +import inspect +import sys +import textwrap +import typing as T +from types import ModuleType + +from .common import Docstring, DocstringParam + +ast_constant_attr = {ast.Constant: "value"} + +if sys.version_info[:2] <= (3, 7): + ast_constant_attr.update( + { + ast.NameConstant: "value", + ast.Num: "n", + ast.Str: "s", + } + ) + + +def ast_get_constant_value(node: ast.AST) -> T.Any: + """Return the constant's value if the given node is a constant.""" + return getattr(node, ast_constant_attr[node.__class__]) + + +def ast_unparse(node: ast.AST) -> T.Optional[str]: + """Convert the AST node to source code as a string.""" + if hasattr(ast, "unparse"): + return ast.unparse(node) + # Support simple cases in Python < 3.9 + if isinstance(node, (ast.Str, ast.Num, ast.NameConstant, ast.Constant)): + return str(ast_get_constant_value(node)) + if isinstance(node, ast.Name): + return node.id + return None + + +def ast_is_literal_str(node: ast.AST) -> bool: + """Return True if the given node is a literal string.""" + return ( + isinstance(node, ast.Expr) + and isinstance(node.value, (ast.Constant, ast.Str)) + and isinstance(ast_get_constant_value(node.value), str) + ) + + +def ast_get_attribute( + node: ast.AST, +) -> T.Optional[T.Tuple[str, T.Optional[str], T.Optional[str]]]: + """Return name, type and default if the given node is an attribute.""" + if isinstance(node, (ast.Assign, ast.AnnAssign)): + target = ( + node.targets[0] if isinstance(node, ast.Assign) else node.target + ) + if isinstance(target, ast.Name): + type_str = None + if isinstance(node, ast.AnnAssign): + type_str = ast_unparse(node.annotation) + default = None + if node.value: + default = ast_unparse(node.value) + return target.id, type_str, default + return None + + +class AttributeDocstrings(ast.NodeVisitor): + """An ast.NodeVisitor that collects attribute docstrings.""" + + attr_docs = None + prev_attr = None + + def visit(self, node): + if self.prev_attr and ast_is_literal_str(node): + attr_name, attr_type, attr_default = self.prev_attr + self.attr_docs[attr_name] = ( + ast_get_constant_value(node.value), + attr_type, + attr_default, + ) + self.prev_attr = ast_get_attribute(node) + if isinstance(node, (ast.ClassDef, ast.Module)): + self.generic_visit(node) + + def get_attr_docs( + self, component: T.Any + ) -> T.Dict[str, T.Tuple[str, T.Optional[str], T.Optional[str]]]: + """Get attribute docstrings from the given component. + + :param component: component to process (class or module) + :returns: for each attribute docstring, a tuple with (description, + type, default) + """ + self.attr_docs = {} + self.prev_attr = None + try: + source = textwrap.dedent(inspect.getsource(component)) + except OSError: + pass + else: + tree = ast.parse(source) + if inspect.ismodule(component): + self.visit(tree) + elif isinstance(tree, ast.Module) and isinstance( + tree.body[0], ast.ClassDef + ): + self.visit(tree.body[0]) + return self.attr_docs + + +def add_attribute_docstrings( + obj: T.Union[type, ModuleType], docstring: Docstring +) -> None: + """Add attribute docstrings found in the object's source code. + + :param obj: object from which to parse attribute docstrings + :param docstring: Docstring object where found attributes are added + :returns: list with names of added attributes + """ + params = set(p.arg_name for p in docstring.params) + for arg_name, (description, type_name, default) in ( + AttributeDocstrings().get_attr_docs(obj).items() + ): + if arg_name not in params: + param = DocstringParam( + args=["attribute", arg_name], + description=description, + arg_name=arg_name, + type_name=type_name, + is_optional=default is not None, + default=default, + ) + docstring.meta.append(param) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/common.py b/parrot/lib/python3.10/site-packages/docstring_parser/common.py new file mode 100644 index 0000000000000000000000000000000000000000..1738e9a1df6875983a33f74caa1cdf3368b3aeb6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/common.py @@ -0,0 +1,228 @@ +"""Common methods for parsing.""" +import enum +import typing as T + +PARAM_KEYWORDS = { + "param", + "parameter", + "arg", + "argument", + "attribute", + "key", + "keyword", +} +RAISES_KEYWORDS = {"raises", "raise", "except", "exception"} +DEPRECATION_KEYWORDS = {"deprecation", "deprecated"} +RETURNS_KEYWORDS = {"return", "returns"} +YIELDS_KEYWORDS = {"yield", "yields"} +EXAMPLES_KEYWORDS = {"example", "examples"} + + +class ParseError(RuntimeError): + """Base class for all parsing related errors.""" + + +class DocstringStyle(enum.Enum): + """Docstring style.""" + + REST = 1 + GOOGLE = 2 + NUMPYDOC = 3 + EPYDOC = 4 + AUTO = 255 + + +class RenderingStyle(enum.Enum): + """Rendering style when unparsing parsed docstrings.""" + + COMPACT = 1 + CLEAN = 2 + EXPANDED = 3 + + +class DocstringMeta: + """Docstring meta information. + + Symbolizes lines in form of + + :param arg: description + :raises ValueError: if something happens + """ + + def __init__( + self, args: T.List[str], description: T.Optional[str] + ) -> None: + """Initialize self. + + :param args: list of arguments. The exact content of this variable is + dependent on the kind of docstring; it's used to distinguish + between custom docstring meta information items. + :param description: associated docstring description. + """ + self.args = args + self.description = description + + +class DocstringParam(DocstringMeta): + """DocstringMeta symbolizing :param metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + arg_name: str, + type_name: T.Optional[str], + is_optional: T.Optional[bool], + default: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.arg_name = arg_name + self.type_name = type_name + self.is_optional = is_optional + self.default = default + + +class DocstringReturns(DocstringMeta): + """DocstringMeta symbolizing :returns or :yields metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + type_name: T.Optional[str], + is_generator: bool, + return_name: T.Optional[str] = None, + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.type_name = type_name + self.is_generator = is_generator + self.return_name = return_name + + +class DocstringRaises(DocstringMeta): + """DocstringMeta symbolizing :raises metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + type_name: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.type_name = type_name + self.description = description + + +class DocstringDeprecated(DocstringMeta): + """DocstringMeta symbolizing deprecation metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + version: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.version = version + self.description = description + + +class DocstringExample(DocstringMeta): + """DocstringMeta symbolizing example metadata.""" + + def __init__( + self, + args: T.List[str], + snippet: T.Optional[str], + description: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.snippet = snippet + self.description = description + + +class Docstring: + """Docstring object representation.""" + + def __init__( + self, + style=None, # type: T.Optional[DocstringStyle] + ) -> None: + """Initialize self.""" + self.short_description = None # type: T.Optional[str] + self.long_description = None # type: T.Optional[str] + self.blank_after_short_description = False + self.blank_after_long_description = False + self.meta = [] # type: T.List[DocstringMeta] + self.style = style # type: T.Optional[DocstringStyle] + + @property + def description(self) -> T.Optional[str]: + """Return the full description of the function + + Returns None if the docstring did not include any description + """ + ret = [] + if self.short_description: + ret.append(self.short_description) + if self.blank_after_short_description: + ret.append("") + if self.long_description: + ret.append(self.long_description) + + if not ret: + return None + + return "\n".join(ret) + + @property + def params(self) -> T.List[DocstringParam]: + """Return a list of information on function params.""" + return [item for item in self.meta if isinstance(item, DocstringParam)] + + @property + def raises(self) -> T.List[DocstringRaises]: + """Return a list of information on the exceptions that the function + may raise. + """ + return [ + item for item in self.meta if isinstance(item, DocstringRaises) + ] + + @property + def returns(self) -> T.Optional[DocstringReturns]: + """Return a single information on function return. + + Takes the first return information. + """ + for item in self.meta: + if isinstance(item, DocstringReturns): + return item + return None + + @property + def many_returns(self) -> T.List[DocstringReturns]: + """Return a list of information on function return.""" + return [ + item for item in self.meta if isinstance(item, DocstringReturns) + ] + + @property + def deprecation(self) -> T.Optional[DocstringDeprecated]: + """Return a single information on function deprecation notes.""" + for item in self.meta: + if isinstance(item, DocstringDeprecated): + return item + return None + + @property + def examples(self) -> T.List[DocstringExample]: + """Return a list of information on function examples.""" + return [ + item for item in self.meta if isinstance(item, DocstringExample) + ] diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/epydoc.py b/parrot/lib/python3.10/site-packages/docstring_parser/epydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..1ebc4a79737e19f405651fafdd1b139cb71a42cf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/epydoc.py @@ -0,0 +1,268 @@ +"""Epyoc-style docstring parsing. + +.. seealso:: http://epydoc.sourceforge.net/manual-fields.html +""" +import inspect +import re +import typing as T + +from .common import ( + Docstring, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) + + +def _clean_str(string: str) -> T.Optional[str]: + string = string.strip() + if len(string) > 0: + return string + return None + + +def parse(text: str) -> Docstring: + """Parse the epydoc-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.EPYDOC) + if not text: + return ret + + text = inspect.cleandoc(text) + match = re.search("^@", text, flags=re.M) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith("\n") + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + param_pattern = re.compile( + r"(param|keyword|type)(\s+[_A-z][_A-z0-9]*\??):" + ) + raise_pattern = re.compile(r"(raise)(\s+[_A-z][_A-z0-9]*\??)?:") + return_pattern = re.compile(r"(return|rtype|yield|ytype):") + meta_pattern = re.compile( + r"([_A-z][_A-z0-9]+)((\s+[_A-z][_A-z0-9]*\??)*):" + ) + + # tokenize + stream: T.List[T.Tuple[str, str, T.List[str], str]] = [] + for match in re.finditer( + r"(^@.*?)(?=^@|\Z)", meta_chunk, flags=re.S | re.M + ): + chunk = match.group(0) + if not chunk: + continue + + param_match = re.search(param_pattern, chunk) + raise_match = re.search(raise_pattern, chunk) + return_match = re.search(return_pattern, chunk) + meta_match = re.search(meta_pattern, chunk) + + match = param_match or raise_match or return_match or meta_match + if not match: + raise ParseError(f'Error parsing meta information near "{chunk}".') + + desc_chunk = chunk[match.end() :] + if param_match: + base = "param" + key: str = match.group(1) + args = [match.group(2).strip()] + elif raise_match: + base = "raise" + key: str = match.group(1) + args = [] if match.group(2) is None else [match.group(2).strip()] + elif return_match: + base = "return" + key: str = match.group(1) + args = [] + else: + base = "meta" + key: str = match.group(1) + token = _clean_str(match.group(2).strip()) + args = [] if token is None else re.split(r"\s+", token) + + # Make sure we didn't match some existing keyword in an incorrect + # way here: + if key in [ + "param", + "keyword", + "type", + "return", + "rtype", + "yield", + "ytype", + ]: + raise ParseError( + f'Error parsing meta information near "{chunk}".' + ) + + desc = desc_chunk.strip() + if "\n" in desc: + first_line, rest = desc.split("\n", 1) + desc = first_line + "\n" + inspect.cleandoc(rest) + stream.append((base, key, args, desc)) + + # Combine type_name, arg_name, and description information + params: T.Dict[str, T.Dict[str, T.Any]] = {} + for (base, key, args, desc) in stream: + if base not in ["param", "return"]: + continue # nothing to do + + (arg_name,) = args or ("return",) + info = params.setdefault(arg_name, {}) + info_key = "type_name" if "type" in key else "description" + info[info_key] = desc + + if base == "return": + is_generator = key in {"ytype", "yield"} + if info.setdefault("is_generator", is_generator) != is_generator: + raise ParseError( + f'Error parsing meta information for "{arg_name}".' + ) + + is_done: T.Dict[str, bool] = {} + for (base, key, args, desc) in stream: + if base == "param" and not is_done.get(args[0], False): + (arg_name,) = args + info = params[arg_name] + type_name = info.get("type_name") + + if type_name and type_name.endswith("?"): + is_optional = True + type_name = type_name[:-1] + else: + is_optional = False + + match = re.match(r".*defaults to (.+)", desc, flags=re.DOTALL) + default = match.group(1).rstrip(".") if match else None + + meta_item = DocstringParam( + args=[key, arg_name], + description=info.get("description"), + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + is_done[arg_name] = True + elif base == "return" and not is_done.get("return", False): + info = params["return"] + meta_item = DocstringReturns( + args=[key], + description=info.get("description"), + type_name=info.get("type_name"), + is_generator=info.get("is_generator", False), + ) + is_done["return"] = True + elif base == "raise": + (type_name,) = args or (None,) + meta_item = DocstringRaises( + args=[key] + args, + description=desc, + type_name=type_name, + ) + elif base == "meta": + meta_item = DocstringMeta( + args=[key] + args, + description=desc, + ) + else: + (key, *_) = args or ("return",) + assert is_done.get(key, False) + continue # don't append + + ret.meta.append(meta_item) + + return ret + + +def compose( + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_desc(desc: T.Optional[str], is_type: bool) -> str: + if not desc: + return "" + + if rendering_style == RenderingStyle.EXPANDED or ( + rendering_style == RenderingStyle.CLEAN and not is_type + ): + (first, *rest) = desc.splitlines() + return "\n".join( + ["\n" + indent + first] + [indent + line for line in rest] + ) + + (first, *rest) = desc.splitlines() + return "\n".join([" " + first] + [indent + line for line in rest]) + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + for meta in docstring.meta: + if isinstance(meta, DocstringParam): + if meta.type_name: + type_name = ( + f"{meta.type_name}?" + if meta.is_optional + else meta.type_name + ) + text = f"@type {meta.arg_name}:" + text += process_desc(type_name, True) + parts.append(text) + text = f"@param {meta.arg_name}:" + process_desc( + meta.description, False + ) + parts.append(text) + elif isinstance(meta, DocstringReturns): + (arg_key, type_key) = ( + ("yield", "ytype") + if meta.is_generator + else ("return", "rtype") + ) + if meta.type_name: + text = f"@{type_key}:" + process_desc(meta.type_name, True) + parts.append(text) + if meta.description: + text = f"@{arg_key}:" + process_desc(meta.description, False) + parts.append(text) + elif isinstance(meta, DocstringRaises): + text = f"@raise {meta.type_name}:" if meta.type_name else "@raise:" + text += process_desc(meta.description, False) + parts.append(text) + else: + text = f'@{" ".join(meta.args)}:' + text += process_desc(meta.description, False) + parts.append(text) + return "\n".join(parts) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/google.py b/parrot/lib/python3.10/site-packages/docstring_parser/google.py new file mode 100644 index 0000000000000000000000000000000000000000..2eca8f4aecd5d5010adf14f72b1dca19d5e433c4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/google.py @@ -0,0 +1,408 @@ +"""Google-style docstring parsing.""" + +import inspect +import re +import typing as T +from collections import OrderedDict, namedtuple +from enum import IntEnum + +from .common import ( + EXAMPLES_KEYWORDS, + PARAM_KEYWORDS, + RAISES_KEYWORDS, + RETURNS_KEYWORDS, + YIELDS_KEYWORDS, + Docstring, + DocstringExample, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) + + +class SectionType(IntEnum): + """Types of sections.""" + + SINGULAR = 0 + """For sections like examples.""" + + MULTIPLE = 1 + """For sections like params.""" + + SINGULAR_OR_MULTIPLE = 2 + """For sections like returns or yields.""" + + +class Section(namedtuple("SectionBase", "title key type")): + """A docstring section.""" + + +GOOGLE_TYPED_ARG_REGEX = re.compile(r"\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)") +GOOGLE_ARG_DESC_REGEX = re.compile(r".*\. Defaults to (.+)\.") +MULTIPLE_PATTERN = re.compile(r"(\s*[^:\s]+:)|([^:]*\]:.*)") + +DEFAULT_SECTIONS = [ + Section("Arguments", "param", SectionType.MULTIPLE), + Section("Args", "param", SectionType.MULTIPLE), + Section("Parameters", "param", SectionType.MULTIPLE), + Section("Params", "param", SectionType.MULTIPLE), + Section("Raises", "raises", SectionType.MULTIPLE), + Section("Exceptions", "raises", SectionType.MULTIPLE), + Section("Except", "raises", SectionType.MULTIPLE), + Section("Attributes", "attribute", SectionType.MULTIPLE), + Section("Example", "examples", SectionType.SINGULAR), + Section("Examples", "examples", SectionType.SINGULAR), + Section("Returns", "returns", SectionType.SINGULAR_OR_MULTIPLE), + Section("Yields", "yields", SectionType.SINGULAR_OR_MULTIPLE), +] + + +class GoogleParser: + """Parser for Google-style docstrings.""" + + def __init__( + self, sections: T.Optional[T.List[Section]] = None, title_colon=True + ): + """Setup sections. + + :param sections: Recognized sections or None to defaults. + :param title_colon: require colon after section title. + """ + if not sections: + sections = DEFAULT_SECTIONS + self.sections = {s.title: s for s in sections} + self.title_colon = title_colon + self._setup() + + def _setup(self): + if self.title_colon: + colon = ":" + else: + colon = "" + self.titles_re = re.compile( + "^(" + + "|".join(f"({t})" for t in self.sections) + + ")" + + colon + + "[ \t\r\f\v]*$", + flags=re.M, + ) + + def _build_meta(self, text: str, title: str) -> DocstringMeta: + """Build docstring element. + + :param text: docstring element text + :param title: title of section containing element + :return: + """ + + section = self.sections[title] + + if ( + section.type == SectionType.SINGULAR_OR_MULTIPLE + and not MULTIPLE_PATTERN.match(text) + ) or section.type == SectionType.SINGULAR: + return self._build_single_meta(section, text) + + if ":" not in text: + raise ParseError(f"Expected a colon in {text!r}.") + + # Split spec and description + before, desc = text.split(":", 1) + if desc: + desc = desc[1:] if desc[0] == " " else desc + if "\n" in desc: + first_line, rest = desc.split("\n", 1) + desc = first_line + "\n" + inspect.cleandoc(rest) + desc = desc.strip("\n") + + return self._build_multi_meta(section, before, desc) + + @staticmethod + def _build_single_meta(section: Section, desc: str) -> DocstringMeta: + if section.key in RETURNS_KEYWORDS | YIELDS_KEYWORDS: + return DocstringReturns( + args=[section.key], + description=desc, + type_name=None, + is_generator=section.key in YIELDS_KEYWORDS, + ) + if section.key in RAISES_KEYWORDS: + return DocstringRaises( + args=[section.key], description=desc, type_name=None + ) + if section.key in EXAMPLES_KEYWORDS: + return DocstringExample( + args=[section.key], snippet=None, description=desc + ) + if section.key in PARAM_KEYWORDS: + raise ParseError("Expected paramenter name.") + return DocstringMeta(args=[section.key], description=desc) + + @staticmethod + def _build_multi_meta( + section: Section, before: str, desc: str + ) -> DocstringMeta: + if section.key in PARAM_KEYWORDS: + match = GOOGLE_TYPED_ARG_REGEX.match(before) + if match: + arg_name, type_name = match.group(1, 2) + if type_name.endswith(", optional"): + is_optional = True + type_name = type_name[:-10] + elif type_name.endswith("?"): + is_optional = True + type_name = type_name[:-1] + else: + is_optional = False + else: + arg_name, type_name = before, None + is_optional = None + + match = GOOGLE_ARG_DESC_REGEX.match(desc) + default = match.group(1) if match else None + + return DocstringParam( + args=[section.key, before], + description=desc, + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + if section.key in RETURNS_KEYWORDS | YIELDS_KEYWORDS: + return DocstringReturns( + args=[section.key, before], + description=desc, + type_name=before, + is_generator=section.key in YIELDS_KEYWORDS, + ) + if section.key in RAISES_KEYWORDS: + return DocstringRaises( + args=[section.key, before], description=desc, type_name=before + ) + return DocstringMeta(args=[section.key, before], description=desc) + + def add_section(self, section: Section): + """Add or replace a section. + + :param section: The new section. + """ + + self.sections[section.title] = section + self._setup() + + def parse(self, text: str) -> Docstring: + """Parse the Google-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.GOOGLE) + if not text: + return ret + + # Clean according to PEP-0257 + text = inspect.cleandoc(text) + + # Find first title and split on its position + match = self.titles_re.search(text) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + # Break description into short and long parts + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith( + "\n" + ) + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + # Split by sections determined by titles + matches = list(self.titles_re.finditer(meta_chunk)) + if not matches: + return ret + splits = [] + for j in range(len(matches) - 1): + splits.append((matches[j].end(), matches[j + 1].start())) + splits.append((matches[-1].end(), len(meta_chunk))) + + chunks = OrderedDict() # type: T.Mapping[str,str] + for j, (start, end) in enumerate(splits): + title = matches[j].group(1) + if title not in self.sections: + continue + + # Clear Any Unknown Meta + # Ref: https://github.com/rr-/docstring_parser/issues/29 + meta_details = meta_chunk[start:end] + unknown_meta = re.search(r"\n\S", meta_details) + if unknown_meta is not None: + meta_details = meta_details[: unknown_meta.start()] + + chunks[title] = meta_details.strip("\n") + if not chunks: + return ret + + # Add elements from each chunk + for title, chunk in chunks.items(): + # Determine indent + indent_match = re.search(r"^\s*", chunk) + if not indent_match: + raise ParseError(f'Can\'t infer indent from "{chunk}"') + indent = indent_match.group() + + # Check for singular elements + if self.sections[title].type in [ + SectionType.SINGULAR, + SectionType.SINGULAR_OR_MULTIPLE, + ]: + part = inspect.cleandoc(chunk) + ret.meta.append(self._build_meta(part, title)) + continue + + # Split based on lines which have exactly that indent + _re = "^" + indent + r"(?=\S)" + c_matches = list(re.finditer(_re, chunk, flags=re.M)) + if not c_matches: + raise ParseError(f'No specification for "{title}": "{chunk}"') + c_splits = [] + for j in range(len(c_matches) - 1): + c_splits.append((c_matches[j].end(), c_matches[j + 1].start())) + c_splits.append((c_matches[-1].end(), len(chunk))) + for j, (start, end) in enumerate(c_splits): + part = chunk[start:end].strip("\n") + ret.meta.append(self._build_meta(part, title)) + + return ret + + +def parse(text: str) -> Docstring: + """Parse the Google-style docstring into its components. + + :returns: parsed docstring + """ + return GoogleParser().parse(text) + + +def compose( + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_one( + one: T.Union[DocstringParam, DocstringReturns, DocstringRaises] + ): + head = "" + + if isinstance(one, DocstringParam): + head += one.arg_name or "" + elif isinstance(one, DocstringReturns): + head += one.return_name or "" + + if isinstance(one, DocstringParam) and one.is_optional: + optional = ( + "?" + if rendering_style == RenderingStyle.COMPACT + else ", optional" + ) + else: + optional = "" + + if one.type_name and head: + head += f" ({one.type_name}{optional}):" + elif one.type_name: + head += f"{one.type_name}{optional}:" + else: + head += ":" + head = indent + head + + if one.description and rendering_style == RenderingStyle.EXPANDED: + body = f"\n{indent}{indent}".join( + [head] + one.description.splitlines() + ) + parts.append(body) + elif one.description: + (first, *rest) = one.description.splitlines() + body = f"\n{indent}{indent}".join([head + " " + first] + rest) + parts.append(body) + else: + parts.append(head) + + def process_sect(name: str, args: T.List[T.Any]): + if args: + parts.append(name) + for arg in args: + process_one(arg) + parts.append("") + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + process_sect( + "Args:", [p for p in docstring.params or [] if p.args[0] == "param"] + ) + + process_sect( + "Attributes:", + [p for p in docstring.params or [] if p.args[0] == "attribute"], + ) + + process_sect( + "Returns:", + [p for p in docstring.many_returns or [] if not p.is_generator], + ) + + process_sect( + "Yields:", [p for p in docstring.many_returns or [] if p.is_generator] + ) + + process_sect("Raises:", docstring.raises or []) + + if docstring.returns and not docstring.many_returns: + ret = docstring.returns + parts.append("Yields:" if ret else "Returns:") + parts.append("-" * len(parts[-1])) + process_one(ret) + + for meta in docstring.meta: + if isinstance( + meta, (DocstringParam, DocstringReturns, DocstringRaises) + ): + continue # Already handled + parts.append(meta.args[0].replace("_", "").title() + ":") + if meta.description: + lines = [indent + l for l in meta.description.splitlines()] + parts.append("\n".join(lines)) + parts.append("") + + while parts and not parts[-1]: + parts.pop() + + return "\n".join(parts) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/numpydoc.py b/parrot/lib/python3.10/site-packages/docstring_parser/numpydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..79ee4f3ca75d707efe1d8afc7bca5e1a445967d4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/numpydoc.py @@ -0,0 +1,532 @@ +"""Numpydoc-style docstring parsing. + +:see: https://numpydoc.readthedocs.io/en/latest/format.html +""" + +import inspect +import itertools +import re +import typing as T +from textwrap import dedent + +from .common import ( + Docstring, + DocstringDeprecated, + DocstringExample, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + RenderingStyle, +) + + +def _pairwise(iterable: T.Iterable, end=None) -> T.Iterable: + left, right = itertools.tee(iterable) + next(right, None) + return itertools.zip_longest(left, right, fillvalue=end) + + +def _clean_str(string: str) -> T.Optional[str]: + string = string.strip() + if len(string) > 0: + return string + return None + + +KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M) +PARAM_KEY_REGEX = re.compile(r"^(?P.*?)(?:\s*:\s*(?P.*?))?$") +PARAM_OPTIONAL_REGEX = re.compile(r"(?P.*?)(?:, optional|\(optional\))$") + +# numpydoc format has no formal grammar for this, +# but we can make some educated guesses... +PARAM_DEFAULT_REGEX = re.compile( + r"(?[\w\-\.]*\w)" +) + +RETURN_KEY_REGEX = re.compile(r"^(?:(?P.*?)\s*:\s*)?(?P.*?)$") + + +class Section: + """Numpydoc section parser. + + :param title: section title. For most sections, this is a heading like + "Parameters" which appears on its own line, underlined by + en-dashes ('-') on the following line. + :param key: meta key string. In the parsed ``DocstringMeta`` instance this + will be the first element of the ``args`` attribute list. + """ + + def __init__(self, title: str, key: str) -> None: + self.title = title + self.key = key + + @property + def title_pattern(self) -> str: + """Regular expression pattern matching this section's header. + + This pattern will match this instance's ``title`` attribute in + an anonymous group. + """ + dashes = "-" * len(self.title) + return rf"^({self.title})\s*?\n{dashes}\s*$" + + def parse(self, text: str) -> T.Iterable[DocstringMeta]: + """Parse ``DocstringMeta`` objects from the body of this section. + + :param text: section body text. Should be cleaned with + ``inspect.cleandoc`` before parsing. + """ + yield DocstringMeta([self.key], description=_clean_str(text)) + + +class _KVSection(Section): + """Base parser for numpydoc sections with key-value syntax. + + E.g. sections that look like this: + key + value + key2 : type + values can also span... + ... multiple lines + """ + + def _parse_item(self, key: str, value: str) -> DocstringMeta: + pass + + def parse(self, text: str) -> T.Iterable[DocstringMeta]: + for match, next_match in _pairwise(KV_REGEX.finditer(text)): + start = match.end() + end = next_match.start() if next_match is not None else None + value = text[start:end] + yield self._parse_item( + key=match.group(), value=inspect.cleandoc(value) + ) + + +class _SphinxSection(Section): + """Base parser for numpydoc sections with sphinx-style syntax. + + E.g. sections that look like this: + .. title:: something + possibly over multiple lines + """ + + @property + def title_pattern(self) -> str: + return rf"^\.\.\s*({self.title})\s*::" + + +class ParamSection(_KVSection): + """Parser for numpydoc parameter sections. + + E.g. any section that looks like this: + arg_name + arg_description + arg_2 : type, optional + descriptions can also span... + ... multiple lines + """ + + def _parse_item(self, key: str, value: str) -> DocstringParam: + match = PARAM_KEY_REGEX.match(key) + arg_name = type_name = is_optional = None + if match is not None: + arg_name = match.group("name") + type_name = match.group("type") + if type_name is not None: + optional_match = PARAM_OPTIONAL_REGEX.match(type_name) + if optional_match is not None: + type_name = optional_match.group("type") + is_optional = True + else: + is_optional = False + + default = None + if len(value) > 0: + default_match = PARAM_DEFAULT_REGEX.search(value) + if default_match is not None: + default = default_match.group("value") + + return DocstringParam( + args=[self.key, arg_name], + description=_clean_str(value), + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + + +class RaisesSection(_KVSection): + """Parser for numpydoc raises sections. + + E.g. any section that looks like this: + ValueError + A description of what might raise ValueError + """ + + def _parse_item(self, key: str, value: str) -> DocstringRaises: + return DocstringRaises( + args=[self.key, key], + description=_clean_str(value), + type_name=key if len(key) > 0 else None, + ) + + +class ReturnsSection(_KVSection): + """Parser for numpydoc returns sections. + + E.g. any section that looks like this: + return_name : type + A description of this returned value + another_type + Return names are optional, types are required + """ + + is_generator = False + + def _parse_item(self, key: str, value: str) -> DocstringReturns: + match = RETURN_KEY_REGEX.match(key) + if match is not None: + return_name = match.group("name") + type_name = match.group("type") + else: + return_name = None + type_name = None + + return DocstringReturns( + args=[self.key], + description=_clean_str(value), + type_name=type_name, + is_generator=self.is_generator, + return_name=return_name, + ) + + +class YieldsSection(ReturnsSection): + """Parser for numpydoc generator "yields" sections.""" + + is_generator = True + + +class DeprecationSection(_SphinxSection): + """Parser for numpydoc "deprecation warning" sections.""" + + def parse(self, text: str) -> T.Iterable[DocstringDeprecated]: + version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None] + + if desc is not None: + desc = _clean_str(inspect.cleandoc(desc)) + + yield DocstringDeprecated( + args=[self.key], description=desc, version=_clean_str(version) + ) + + +class ExamplesSection(Section): + """Parser for numpydoc examples sections. + + E.g. any section that looks like this: + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + """ + + def parse(self, text: str) -> T.Iterable[DocstringMeta]: + """Parse ``DocstringExample`` objects from the body of this section. + + :param text: section body text. Should be cleaned with + ``inspect.cleandoc`` before parsing. + """ + lines = dedent(text).strip().splitlines() + while lines: + snippet_lines = [] + description_lines = [] + while lines: + if not lines[0].startswith(">>>"): + break + snippet_lines.append(lines.pop(0)) + while lines: + if lines[0].startswith(">>>"): + break + description_lines.append(lines.pop(0)) + yield DocstringExample( + [self.key], + snippet="\n".join(snippet_lines) if snippet_lines else None, + description="\n".join(description_lines), + ) + + +DEFAULT_SECTIONS = [ + ParamSection("Parameters", "param"), + ParamSection("Params", "param"), + ParamSection("Arguments", "param"), + ParamSection("Args", "param"), + ParamSection("Other Parameters", "other_param"), + ParamSection("Other Params", "other_param"), + ParamSection("Other Arguments", "other_param"), + ParamSection("Other Args", "other_param"), + ParamSection("Receives", "receives"), + ParamSection("Receive", "receives"), + RaisesSection("Raises", "raises"), + RaisesSection("Raise", "raises"), + RaisesSection("Warns", "warns"), + RaisesSection("Warn", "warns"), + ParamSection("Attributes", "attribute"), + ParamSection("Attribute", "attribute"), + ReturnsSection("Returns", "returns"), + ReturnsSection("Return", "returns"), + YieldsSection("Yields", "yields"), + YieldsSection("Yield", "yields"), + ExamplesSection("Examples", "examples"), + ExamplesSection("Example", "examples"), + Section("Warnings", "warnings"), + Section("Warning", "warnings"), + Section("See Also", "see_also"), + Section("Related", "see_also"), + Section("Notes", "notes"), + Section("Note", "notes"), + Section("References", "references"), + Section("Reference", "references"), + DeprecationSection("deprecated", "deprecation"), +] + + +class NumpydocParser: + """Parser for numpydoc-style docstrings.""" + + def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None): + """Setup sections. + + :param sections: Recognized sections or None to defaults. + """ + sections = sections or DEFAULT_SECTIONS + self.sections = {s.title: s for s in sections} + self._setup() + + def _setup(self): + self.titles_re = re.compile( + r"|".join(s.title_pattern for s in self.sections.values()), + flags=re.M, + ) + + def add_section(self, section: Section): + """Add or replace a section. + + :param section: The new section. + """ + + self.sections[section.title] = section + self._setup() + + def parse(self, text: str) -> Docstring: + """Parse the numpy-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.NUMPYDOC) + if not text: + return ret + + # Clean according to PEP-0257 + text = inspect.cleandoc(text) + + # Find first title and split on its position + match = self.titles_re.search(text) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + # Break description into short and long parts + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith( + "\n" + ) + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)): + title = next(g for g in match.groups() if g is not None) + factory = self.sections[title] + + # section chunk starts after the header, + # ends at the start of the next header + start = match.end() + end = nextmatch.start() if nextmatch is not None else None + ret.meta.extend(factory.parse(meta_chunk[start:end])) + + return ret + + +def parse(text: str) -> Docstring: + """Parse the numpy-style docstring into its components. + + :returns: parsed docstring + """ + return NumpydocParser().parse(text) + + +def compose( + # pylint: disable=W0613 + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_one( + one: T.Union[DocstringParam, DocstringReturns, DocstringRaises] + ): + if isinstance(one, DocstringParam): + head = one.arg_name + elif isinstance(one, DocstringReturns): + head = one.return_name + else: + head = None + + if one.type_name and head: + head += f" : {one.type_name}" + elif one.type_name: + head = one.type_name + elif not head: + head = "" + + if isinstance(one, DocstringParam) and one.is_optional: + head += ", optional" + + if one.description: + body = f"\n{indent}".join([head] + one.description.splitlines()) + parts.append(body) + else: + parts.append(head) + + def process_sect(name: str, args: T.List[T.Any]): + if args: + parts.append("") + parts.append(name) + parts.append("-" * len(parts[-1])) + for arg in args: + process_one(arg) + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + + if docstring.deprecation: + first = ".. deprecated::" + if docstring.deprecation.version: + first += f" {docstring.deprecation.version}" + if docstring.deprecation.description: + rest = docstring.deprecation.description.splitlines() + else: + rest = [] + sep = f"\n{indent}" + parts.append(sep.join([first] + rest)) + + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + process_sect( + "Parameters", + [item for item in docstring.params or [] if item.args[0] == "param"], + ) + + process_sect( + "Attributes", + [ + item + for item in docstring.params or [] + if item.args[0] == "attribute" + ], + ) + + process_sect( + "Returns", + [ + item + for item in docstring.many_returns or [] + if not item.is_generator + ], + ) + + process_sect( + "Yields", + [item for item in docstring.many_returns or [] if item.is_generator], + ) + + if docstring.returns and not docstring.many_returns: + ret = docstring.returns + parts.append("Yields" if ret else "Returns") + parts.append("-" * len(parts[-1])) + process_one(ret) + + process_sect( + "Receives", + [ + item + for item in docstring.params or [] + if item.args[0] == "receives" + ], + ) + + process_sect( + "Other Parameters", + [ + item + for item in docstring.params or [] + if item.args[0] == "other_param" + ], + ) + + process_sect( + "Raises", + [item for item in docstring.raises or [] if item.args[0] == "raises"], + ) + + process_sect( + "Warns", + [item for item in docstring.raises or [] if item.args[0] == "warns"], + ) + + for meta in docstring.meta: + if isinstance( + meta, + ( + DocstringDeprecated, + DocstringParam, + DocstringReturns, + DocstringRaises, + ), + ): + continue # Already handled + + parts.append("") + parts.append(meta.args[0].replace("_", "").title()) + parts.append("-" * len(meta.args[0])) + + if meta.description: + parts.append(meta.description) + + return "\n".join(parts) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/parser.py b/parrot/lib/python3.10/site-packages/docstring_parser/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..2710e6330da4180d2e20890692f79812fb771024 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/parser.py @@ -0,0 +1,98 @@ +"""The main parsing routine.""" + +import inspect +import typing as T + +from docstring_parser import epydoc, google, numpydoc, rest +from docstring_parser.attrdoc import add_attribute_docstrings +from docstring_parser.common import ( + Docstring, + DocstringStyle, + ParseError, + RenderingStyle, +) + +_STYLE_MAP = { + DocstringStyle.REST: rest, + DocstringStyle.GOOGLE: google, + DocstringStyle.NUMPYDOC: numpydoc, + DocstringStyle.EPYDOC: epydoc, +} + + +def parse(text: str, style: DocstringStyle = DocstringStyle.AUTO) -> Docstring: + """Parse the docstring into its components. + + :param text: docstring text to parse + :param style: docstring style + :returns: parsed docstring representation + """ + if style != DocstringStyle.AUTO: + return _STYLE_MAP[style].parse(text) + + exc: T.Optional[Exception] = None + rets = [] + for module in _STYLE_MAP.values(): + try: + ret = module.parse(text) + except ParseError as ex: + exc = ex + else: + rets.append(ret) + + if not rets: + raise exc + + return sorted(rets, key=lambda d: len(d.meta), reverse=True)[0] + + +def parse_from_object( + obj: T.Any, + style: DocstringStyle = DocstringStyle.AUTO, +) -> Docstring: + """Parse the object's docstring(s) into its components. + + The object can be anything that has a ``__doc__`` attribute. In contrast to + the ``parse`` function, ``parse_from_object`` is able to parse attribute + docstrings which are defined in the source code instead of ``__doc__``. + + Currently only attribute docstrings defined at class and module levels are + supported. Attribute docstrings defined in ``__init__`` methods are not + supported. + + When given a class, only the attribute docstrings of that class are parsed, + not its inherited classes. This is a design decision. Separate calls to + this function should be performed to get attribute docstrings of parent + classes. + + :param obj: object from which to parse the docstring(s) + :param style: docstring style + :returns: parsed docstring representation + """ + docstring = parse(obj.__doc__, style=style) + + if inspect.isclass(obj) or inspect.ismodule(obj): + add_attribute_docstrings(obj, docstring) + + return docstring + + +def compose( + docstring: Docstring, + style: DocstringStyle = DocstringStyle.AUTO, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param style: docstring style to render + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + module = _STYLE_MAP[ + docstring.style if style == DocstringStyle.AUTO else style + ] + return module.compose( + docstring, rendering_style=rendering_style, indent=indent + ) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/py.typed b/parrot/lib/python3.10/site-packages/docstring_parser/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..1242d43277017748bbbf3d0a104ec4cf78245822 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/rest.py b/parrot/lib/python3.10/site-packages/docstring_parser/rest.py new file mode 100644 index 0000000000000000000000000000000000000000..127084d54f949502d26118189735fd1317cccd98 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/rest.py @@ -0,0 +1,259 @@ +"""ReST-style docstring parsing.""" + +import inspect +import re +import typing as T + +from .common import ( + DEPRECATION_KEYWORDS, + PARAM_KEYWORDS, + RAISES_KEYWORDS, + RETURNS_KEYWORDS, + YIELDS_KEYWORDS, + Docstring, + DocstringDeprecated, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) + + +def _build_meta(args: T.List[str], desc: str) -> DocstringMeta: + key = args[0] + + if key in PARAM_KEYWORDS: + if len(args) == 3: + key, type_name, arg_name = args + if type_name.endswith("?"): + is_optional = True + type_name = type_name[:-1] + else: + is_optional = False + elif len(args) == 2: + key, arg_name = args + type_name = None + is_optional = None + else: + raise ParseError( + f"Expected one or two arguments for a {key} keyword." + ) + + match = re.match(r".*defaults to (.+)", desc, flags=re.DOTALL) + default = match.group(1).rstrip(".") if match else None + + return DocstringParam( + args=args, + description=desc, + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + + if key in RETURNS_KEYWORDS | YIELDS_KEYWORDS: + if len(args) == 2: + type_name = args[1] + elif len(args) == 1: + type_name = None + else: + raise ParseError( + f"Expected one or no arguments for a {key} keyword." + ) + + return DocstringReturns( + args=args, + description=desc, + type_name=type_name, + is_generator=key in YIELDS_KEYWORDS, + ) + + if key in DEPRECATION_KEYWORDS: + match = re.search( + r"^(?Pv?((?:\d+)(?:\.[0-9a-z\.]+))) (?P.+)", + desc, + flags=re.I, + ) + return DocstringDeprecated( + args=args, + version=match.group("version") if match else None, + description=match.group("desc") if match else desc, + ) + + if key in RAISES_KEYWORDS: + if len(args) == 2: + type_name = args[1] + elif len(args) == 1: + type_name = None + else: + raise ParseError( + f"Expected one or no arguments for a {key} keyword." + ) + return DocstringRaises( + args=args, description=desc, type_name=type_name + ) + + return DocstringMeta(args=args, description=desc) + + +def parse(text: str) -> Docstring: + """Parse the ReST-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.REST) + if not text: + return ret + + text = inspect.cleandoc(text) + match = re.search("^:", text, flags=re.M) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith("\n") + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + types = {} + rtypes = {} + for match in re.finditer( + r"(^:.*?)(?=^:|\Z)", meta_chunk, flags=re.S | re.M + ): + chunk = match.group(0) + if not chunk: + continue + try: + args_chunk, desc_chunk = chunk.lstrip(":").split(":", 1) + except ValueError as ex: + raise ParseError( + f'Error parsing meta information near "{chunk}".' + ) from ex + args = args_chunk.split() + desc = desc_chunk.strip() + + if "\n" in desc: + first_line, rest = desc.split("\n", 1) + desc = first_line + "\n" + inspect.cleandoc(rest) + + # Add special handling for :type a: typename + if len(args) == 2 and args[0] == "type": + types[args[1]] = desc + elif len(args) in [1, 2] and args[0] == "rtype": + rtypes[None if len(args) == 1 else args[1]] = desc + else: + ret.meta.append(_build_meta(args, desc)) + + for meta in ret.meta: + if isinstance(meta, DocstringParam): + meta.type_name = meta.type_name or types.get(meta.arg_name) + elif isinstance(meta, DocstringReturns): + meta.type_name = meta.type_name or rtypes.get(meta.return_name) + + if not any(isinstance(m, DocstringReturns) for m in ret.meta) and rtypes: + for (return_name, type_name) in rtypes.items(): + ret.meta.append( + DocstringReturns( + args=[], + type_name=type_name, + description=None, + is_generator=False, + return_name=return_name, + ) + ) + + return ret + + +def compose( + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_desc(desc: T.Optional[str]) -> str: + if not desc: + return "" + + if rendering_style == RenderingStyle.CLEAN: + (first, *rest) = desc.splitlines() + return "\n".join([" " + first] + [indent + line for line in rest]) + + if rendering_style == RenderingStyle.EXPANDED: + (first, *rest) = desc.splitlines() + return "\n".join( + ["\n" + indent + first] + [indent + line for line in rest] + ) + + return " " + desc + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + for meta in docstring.meta: + if isinstance(meta, DocstringParam): + if meta.type_name: + type_text = ( + f" {meta.type_name}? " + if meta.is_optional + else f" {meta.type_name} " + ) + else: + type_text = " " + if rendering_style == RenderingStyle.EXPANDED: + text = f":param {meta.arg_name}:" + text += process_desc(meta.description) + parts.append(text) + if type_text[:-1]: + parts.append(f":type {meta.arg_name}:{type_text[:-1]}") + else: + text = f":param{type_text}{meta.arg_name}:" + text += process_desc(meta.description) + parts.append(text) + elif isinstance(meta, DocstringReturns): + type_text = f" {meta.type_name}" if meta.type_name else "" + key = "yields" if meta.is_generator else "returns" + + if rendering_style == RenderingStyle.EXPANDED: + if meta.description: + text = f":{key}:" + text += process_desc(meta.description) + parts.append(text) + if type_text: + parts.append(f":rtype:{type_text}") + else: + text = f":{key}{type_text}:" + text += process_desc(meta.description) + parts.append(text) + elif isinstance(meta, DocstringRaises): + type_text = f" {meta.type_name} " if meta.type_name else "" + text = f":raises{type_text}:" + process_desc(meta.description) + parts.append(text) + else: + text = f':{" ".join(meta.args)}:' + process_desc(meta.description) + parts.append(text) + return "\n".join(parts) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__init__.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..164665e94d8456d16cb55bbf24dd23cb0978b9c2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for docstring parser.""" diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef10456fbc1f6b9479ac6ce41991772506c26f22 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/_pydoctor.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/_pydoctor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a81191a4be996df82bed5859db0ef1869dee0d4e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/_pydoctor.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_epydoc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_epydoc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c44a6881172278e18bc27e2a3e8d9ad7d7903d39 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_epydoc.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_google.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_google.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c48c0ed748236c8679e79a288e1dcac0ec914254 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_google.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_numpydoc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_numpydoc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8982eb711d44a5cc42407c56a185d063ba6e68a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_numpydoc.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_parse_from_object.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_parse_from_object.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ecc074524093c90ee42b5a9aadda24e2986771b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_parse_from_object.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_parser.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfc89dd92f082fde7817dfa9ef7a3f215bed1ebf Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_parser.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_rest.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_rest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faad91c1abe2c4f0364cb003d875675a07c68b05 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_rest.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcd30c93986bba65745dada1d4e8093cd2e67e13 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/docstring_parser/tests/__pycache__/test_util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/_pydoctor.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/_pydoctor.py new file mode 100644 index 0000000000000000000000000000000000000000..5f3532a47526478e8511301871f79fadd779b3b9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/_pydoctor.py @@ -0,0 +1,22 @@ +"""Private pydoctor customization code in order to exclude the package +docstring_parser.tests from the API documentation. Based on Twisted code. +""" + +# pylint: disable=invalid-name + +try: + from pydoctor.model import Documentable, PrivacyClass, System +except ImportError: + pass +else: + + class HidesTestsPydoctorSystem(System): + """A PyDoctor "system" used to generate the docs.""" + + def privacyClass(self, documentable: Documentable) -> PrivacyClass: + """Report the privacy level for an object. Hide the module + 'docstring_parser.tests'. + """ + if documentable.fullName().startswith("docstring_parser.tests"): + return PrivacyClass.HIDDEN + return super().privacyClass(documentable) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_epydoc.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_epydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..e23518a442458a49999617cb7b3353bf4be468fa --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_epydoc.py @@ -0,0 +1,723 @@ +"""Tests for epydoc-style docstring routines.""" +import typing as T + +import pytest +from docstring_parser.common import ParseError, RenderingStyle +from docstring_parser.epydoc import compose, parse + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc", + [ + ( + """ + Short description + @meta: asd + """, + "Short description", + None, + False, + False, + ), + ( + """ + Short description + Long description + @meta: asd + """, + "Short description", + "Long description", + False, + False, + ), + ( + """ + Short description + First line + Second line + @meta: asd + """, + "Short description", + "First line\n Second line", + False, + False, + ), + ( + """ + Short description + + First line + Second line + @meta: asd + """, + "Short description", + "First line\n Second line", + True, + False, + ), + ( + """ + Short description + + First line + Second line + + @meta: asd + """, + "Short description", + "First line\n Second line", + True, + True, + ), + ( + """ + @meta: asd + """, + None, + None, + False, + False, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + @meta: asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + @meta1: asd + 1 + 2 + 3 + @meta2: herp + @meta3: derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["meta1"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["meta2"] + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["meta3"] + assert docstring.meta[2].description == "derp" + + +def test_meta_with_args() -> None: + """Test parsing meta with additional arguments.""" + docstring = parse( + """ + Short description + + @meta ene due rabe: asd + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta", "ene", "due", "rabe"] + assert docstring.meta[0].description == "asd" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @param message: description 4, defaults to 'hello' + @type message: str? + @param multiline: long description 5, + defaults to 'bye' + @type multiline: str? + """ + ) + assert len(docstring.params) == 5 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert docstring.params[0].default is None + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[1].default is None + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[2].default is None + assert docstring.params[3].arg_name == "message" + assert docstring.params[3].type_name == "str" + assert ( + docstring.params[3].description == "description 4, defaults to 'hello'" + ) + assert docstring.params[3].is_optional + assert docstring.params[3].default == "'hello'" + assert docstring.params[4].arg_name == "multiline" + assert docstring.params[4].type_name == "str" + assert ( + docstring.params[4].description + == "long description 5,\ndefaults to 'bye'" + ) + assert docstring.params[4].is_optional + assert docstring.params[4].default == "'bye'" + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + + docstring = parse( + """ + Short description + @return: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + + docstring = parse( + """ + Short description + @return: description + @rtype: int + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + + +def test_yields() -> None: + """Test parsing yields.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + + docstring = parse( + """ + Short description + @yield: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + + docstring = parse( + """ + Short description + @yield: description + @ytype: int + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + @raise: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name is None + assert docstring.raises[0].description == "description" + + docstring = parse( + """ + Short description + @raise ValueError: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_broken_meta() -> None: + """Test parsing broken meta.""" + with pytest.raises(ParseError): + parse("@") + + with pytest.raises(ParseError): + parse("@param herp derp") + + with pytest.raises(ParseError): + parse("@param: invalid") + + with pytest.raises(ParseError): + parse("@param with too many args: desc") + + # these should not raise any errors + parse("@sthstrange: desc") + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", ""), + ("\n", ""), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ( + "Short description\n\nLong description", + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + """, + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description\n\nLong description\nSecond line", + ), + ( + "Short description\nLong description", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + """, + "Short description\nLong description", + ), + ( + "\nShort description\nLong description\n", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + Second line + """, + "Short description\nLong description\nSecond line", + ), + ( + """ + Short description + @meta: asd + """, + "Short description\n@meta: asd", + ), + ( + """ + Short description + Long description + @meta: asd + """, + "Short description\nLong description\n@meta: asd", + ), + ( + """ + Short description + First line + Second line + @meta: asd + """, + "Short description\nFirst line\n Second line\n@meta: asd", + ), + ( + """ + Short description + + First line + Second line + @meta: asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "@meta: asd", + ), + ( + """ + Short description + + First line + Second line + + @meta: asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "\n" + "@meta: asd", + ), + ( + """ + @meta: asd + """, + "@meta: asd", + ), + ( + """ + Short description + + @meta: asd + 1 + 2 + 3 + """, + "Short description\n" + "\n" + "@meta: asd\n" + " 1\n" + " 2\n" + " 3", + ), + ( + """ + Short description + + @meta1: asd + 1 + 2 + 3 + @meta2: herp + @meta3: derp + """, + "Short description\n" + "\n@meta1: asd\n" + " 1\n" + " 2\n" + " 3\n@meta2: herp\n" + "@meta3: derp", + ), + ( + """ + Short description + + @meta ene due rabe: asd + """, + "Short description\n\n@meta ene due rabe: asd", + ), + ( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @type message: str? + @param message: description 4, defaults to 'hello' + @type multiline: str? + @param multiline: long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "@param name: description 1\n" + "@type priority: int\n" + "@param priority: description 2\n" + "@type sender: str?\n" + "@param sender: description 3\n" + "@type message: str?\n" + "@param message: description 4, defaults to 'hello'\n" + "@type multiline: str?\n" + "@param multiline: long description 5,\n" + " defaults to 'bye'", + ), + ( + """ + Short description + @raise: description + """, + "Short description\n@raise: description", + ), + ( + """ + Short description + @raise ValueError: description + """, + "Short description\n@raise ValueError: description", + ), + ], +) +def test_compose(source: str, expected: str) -> None: + """Test compose in default mode.""" + assert compose(parse(source)) == expected + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @type message: str? + @param message: description 4, defaults to 'hello' + @type multiline: str? + @param multiline: long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "@param name:\n" + " description 1\n" + "@type priority: int\n" + "@param priority:\n" + " description 2\n" + "@type sender: str?\n" + "@param sender:\n" + " description 3\n" + "@type message: str?\n" + "@param message:\n" + " description 4, defaults to 'hello'\n" + "@type multiline: str?\n" + "@param multiline:\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_clean(source: str, expected: str) -> None: + """Test compose in clean mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.CLEAN) + == expected + ) + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @type message: str? + @param message: description 4, defaults to 'hello' + @type multiline: str? + @param multiline: long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "@param name:\n" + " description 1\n" + "@type priority:\n" + " int\n" + "@param priority:\n" + " description 2\n" + "@type sender:\n" + " str?\n" + "@param sender:\n" + " description 3\n" + "@type message:\n" + " str?\n" + "@param message:\n" + " description 4, defaults to 'hello'\n" + "@type multiline:\n" + " str?\n" + "@param multiline:\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_expanded(source: str, expected: str) -> None: + """Test compose in expanded mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.EXPANDED) + == expected + ) + + +def test_short_rtype() -> None: + """Test abbreviated docstring with only return type information.""" + string = "Short description.\n\n@rtype: float" + docstring = parse(string) + assert compose(docstring) == string diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_google.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_google.py new file mode 100644 index 0000000000000000000000000000000000000000..201f14159212b8a810b28fe182e21fd6850d8b7a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_google.py @@ -0,0 +1,977 @@ +"""Tests for Google-style docstring routines.""" +import typing as T + +import pytest +from docstring_parser.common import ParseError, RenderingStyle +from docstring_parser.google import ( + GoogleParser, + Section, + SectionType, + compose, + parse, +) + + +def test_google_parser_unknown_section() -> None: + """Test parsing an unknown section with default GoogleParser + configuration. + """ + parser = GoogleParser() + docstring = parser.parse( + """ + Unknown: + spam: a + """ + ) + assert docstring.short_description == "Unknown:" + assert docstring.long_description == "spam: a" + assert len(docstring.meta) == 0 + + +def test_google_parser_custom_sections() -> None: + """Test parsing an unknown section with custom GoogleParser + configuration. + """ + parser = GoogleParser( + [ + Section("DESCRIPTION", "desc", SectionType.SINGULAR), + Section("ARGUMENTS", "param", SectionType.MULTIPLE), + Section("ATTRIBUTES", "attribute", SectionType.MULTIPLE), + Section("EXAMPLES", "examples", SectionType.SINGULAR), + ], + title_colon=False, + ) + docstring = parser.parse( + """ + DESCRIPTION + This is the description. + + ARGUMENTS + arg1: first arg + arg2: second arg + + ATTRIBUTES + attr1: first attribute + attr2: second attribute + + EXAMPLES + Many examples + More examples + """ + ) + + assert docstring.short_description is None + assert docstring.long_description is None + assert len(docstring.meta) == 6 + assert docstring.meta[0].args == ["desc"] + assert docstring.meta[0].description == "This is the description." + assert docstring.meta[1].args == ["param", "arg1"] + assert docstring.meta[1].description == "first arg" + assert docstring.meta[2].args == ["param", "arg2"] + assert docstring.meta[2].description == "second arg" + assert docstring.meta[3].args == ["attribute", "attr1"] + assert docstring.meta[3].description == "first attribute" + assert docstring.meta[4].args == ["attribute", "attr2"] + assert docstring.meta[4].description == "second attribute" + assert docstring.meta[5].args == ["examples"] + assert docstring.meta[5].description == "Many examples\nMore examples" + + +def test_google_parser_custom_sections_after() -> None: + """Test parsing an unknown section with custom GoogleParser configuration + that was set at a runtime. + """ + parser = GoogleParser(title_colon=False) + parser.add_section(Section("Note", "note", SectionType.SINGULAR)) + docstring = parser.parse( + """ + short description + + Note: + a note + """ + ) + assert docstring.short_description == "short description" + assert docstring.long_description == "Note:\n a note" + + docstring = parser.parse( + """ + short description + + Note a note + """ + ) + assert docstring.short_description == "short description" + assert docstring.long_description == "Note a note" + + docstring = parser.parse( + """ + short description + + Note + a note + """ + ) + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["note"] + assert docstring.meta[0].description == "a note" + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc", + [ + ( + """ + Short description + Args: + asd: + """, + "Short description", + None, + False, + False, + ), + ( + """ + Short description + Long description + Args: + asd: + """, + "Short description", + "Long description", + False, + False, + ), + ( + """ + Short description + First line + Second line + Args: + asd: + """, + "Short description", + "First line\n Second line", + False, + False, + ), + ( + """ + Short description + + First line + Second line + Args: + asd: + """, + "Short description", + "First line\n Second line", + True, + False, + ), + ( + """ + Short description + + First line + Second line + + Args: + asd: + """, + "Short description", + "First line\n Second line", + True, + True, + ), + ( + """ + Args: + asd: + """, + None, + None, + False, + False, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + Args: + spam: asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +def test_default_args() -> None: + """Test parsing default arguments.""" + docstring = parse( + """A sample function + +A function the demonstrates docstrings + +Args: + arg1 (int): The firsty arg + arg2 (str): The second arg + arg3 (float, optional): The third arg. Defaults to 1.0. + arg4 (Optional[Dict[str, Any]], optional): The last arg. Defaults to None. + arg5 (str, optional): The fifth arg. Defaults to DEFAULT_ARG5. + +Returns: + Mapping[str, Any]: The args packed in a mapping +""" + ) + assert docstring is not None + assert len(docstring.params) == 5 + + arg4 = docstring.params[3] + assert arg4.arg_name == "arg4" + assert arg4.is_optional + assert arg4.type_name == "Optional[Dict[str, Any]]" + assert arg4.default == "None" + assert arg4.description == "The last arg. Defaults to None." + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + Args: + spam: asd + 1 + 2 + 3 + + Raises: + bla: herp + yay: derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["raises", "bla"] + assert docstring.meta[1].type_name == "bla" + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["raises", "yay"] + assert docstring.meta[2].type_name == "yay" + assert docstring.meta[2].description == "derp" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str?): description 3 + ratio (Optional[float], optional): description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Args: + name: description 1 + with multi-line text + priority (int): description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_attributes() -> None: + """Test parsing attributes.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Attributes: + name: description 1 + priority (int): description 2 + sender (str?): description 3 + ratio (Optional[float], optional): description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Attributes: + name: description 1 + with multi-line text + priority (int): description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + Returns: + description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns: + description with: a colon! + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description with: a colon!" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns: + int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Returns: + Optional[Mapping[str, List[int]]]: A description: with a colon + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "Optional[Mapping[str, List[int]]]" + assert docstring.returns.description == "A description: with a colon" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Yields: + int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns: + int: description + with much text + + even some spacing + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == ( + "description\nwith much text\n\neven some spacing" + ) + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + Raises: + ValueError: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_examples() -> None: + """Test parsing examples.""" + docstring = parse( + """ + Short description + Example: + example: 1 + Examples: + long example + + more here + """ + ) + assert len(docstring.examples) == 2 + assert docstring.examples[0].description == "example: 1" + assert docstring.examples[1].description == "long example\n\nmore here" + + +def test_broken_meta() -> None: + """Test parsing broken meta.""" + with pytest.raises(ParseError): + parse("Args:") + + with pytest.raises(ParseError): + parse("Args:\n herp derp") + + +def test_unknown_meta() -> None: + """Test parsing unknown meta.""" + docstring = parse( + """Short desc + + Unknown 0: + title0: content0 + + Args: + arg0: desc0 + arg1: desc1 + + Unknown1: + title1: content1 + + Unknown2: + title2: content2 + """ + ) + + assert docstring.params[0].arg_name == "arg0" + assert docstring.params[0].description == "desc0" + assert docstring.params[1].arg_name == "arg1" + assert docstring.params[1].description == "desc1" + + +def test_broken_arguments() -> None: + """Test parsing broken arguments.""" + with pytest.raises(ParseError): + parse( + """This is a test + + Args: + param - poorly formatted + """ + ) + + +def test_empty_example() -> None: + """Test parsing empty examples section.""" + docstring = parse( + """Short description + + Example: + + Raises: + IOError: some error + """ + ) + + assert len(docstring.examples) == 1 + assert docstring.examples[0].args == ["examples"] + assert docstring.examples[0].description == "" + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", ""), + ("\n", ""), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ( + "Short description\n\nLong description", + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + """, + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description\n\nLong description\nSecond line", + ), + ( + "Short description\nLong description", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + """, + "Short description\nLong description", + ), + ( + "\nShort description\nLong description\n", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + Second line + """, + "Short description\nLong description\nSecond line", + ), + ( + """ + Short description + Meta: + asd + """, + "Short description\nMeta:\n asd", + ), + ( + """ + Short description + Long description + Meta: + asd + """, + "Short description\nLong description\nMeta:\n asd", + ), + ( + """ + Short description + First line + Second line + Meta: + asd + """, + "Short description\n" + "First line\n" + " Second line\n" + "Meta:\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + Meta: + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "Meta:\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + + Meta: + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "\n" + "Meta:\n" + " asd", + ), + ( + """ + Short description + + Meta: + asd + 1 + 2 + 3 + """, + "Short description\n" + "\n" + "Meta:\n" + " asd\n" + " 1\n" + " 2\n" + " 3", + ), + ( + """ + Short description + + Meta1: + asd + 1 + 2 + 3 + Meta2: + herp + Meta3: + derp + """, + "Short description\n" + "\n" + "Meta1:\n" + " asd\n" + " 1\n" + " 2\n" + " 3\n" + "Meta2:\n" + " herp\n" + "Meta3:\n" + " derp", + ), + ( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str, optional): description 3 + message (str, optional): description 4, defaults to 'hello' + multiline (str?): + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Args:\n" + " name: description 1\n" + " priority (int): description 2\n" + " sender (str?): description 3\n" + " message (str?): description 4, defaults to 'hello'\n" + " multiline (str?): long description 5,\n" + " defaults to 'bye'", + ), + ( + """ + Short description + Raises: + ValueError: description + """, + "Short description\nRaises:\n ValueError: description", + ), + ], +) +def test_compose(source: str, expected: str) -> None: + """Test compose in default mode.""" + assert compose(parse(source)) == expected + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str, optional): description 3 + message (str, optional): description 4, defaults to 'hello' + multiline (str?): + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Args:\n" + " name: description 1\n" + " priority (int): description 2\n" + " sender (str, optional): description 3\n" + " message (str, optional): description 4, defaults to 'hello'\n" + " multiline (str, optional): long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_clean(source: str, expected: str) -> None: + """Test compose in clean mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.CLEAN) + == expected + ) + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str, optional): description 3 + message (str, optional): description 4, defaults to 'hello' + multiline (str?): + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Args:\n" + " name:\n" + " description 1\n" + " priority (int):\n" + " description 2\n" + " sender (str, optional):\n" + " description 3\n" + " message (str, optional):\n" + " description 4, defaults to 'hello'\n" + " multiline (str, optional):\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_expanded(source: str, expected: str) -> None: + """Test compose in expanded mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.EXPANDED) + == expected + ) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_numpydoc.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_numpydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5a6dbe637083854b9714e82a85bde96c1bc6cb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_numpydoc.py @@ -0,0 +1,1087 @@ +"""Tests for numpydoc-style docstring routines.""" +import typing as T + +import pytest +from docstring_parser.numpydoc import compose, parse + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc", + [ + ( + """ + Short description + Parameters + ---------- + asd + """, + "Short description", + None, + False, + False, + ), + ( + """ + Short description + Long description + Parameters + ---------- + asd + """, + "Short description", + "Long description", + False, + False, + ), + ( + """ + Short description + First line + Second line + Parameters + ---------- + asd + """, + "Short description", + "First line\n Second line", + False, + False, + ), + ( + """ + Short description + + First line + Second line + Parameters + ---------- + asd + """, + "Short description", + "First line\n Second line", + True, + False, + ), + ( + """ + Short description + + First line + Second line + + Parameters + ---------- + asd + """, + "Short description", + "First line\n Second line", + True, + True, + ), + ( + """ + Parameters + ---------- + asd + """, + None, + None, + False, + False, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + Parameters + ---------- + spam + asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +@pytest.mark.parametrize( + "source, expected_is_optional, expected_type_name, expected_default", + [ + ( + """ + Parameters + ---------- + arg1 : int + The first arg + """, + False, + "int", + None, + ), + ( + """ + Parameters + ---------- + arg2 : str + The second arg + """, + False, + "str", + None, + ), + ( + """ + Parameters + ---------- + arg3 : float, optional + The third arg. Default is 1.0. + """, + True, + "float", + "1.0", + ), + ( + """ + Parameters + ---------- + arg4 : Optional[Dict[str, Any]], optional + The fourth arg. Defaults to None + """, + True, + "Optional[Dict[str, Any]]", + "None", + ), + ( + """ + Parameters + ---------- + arg5 : str, optional + The fifth arg. Default: DEFAULT_ARGS + """, + True, + "str", + "DEFAULT_ARGS", + ), + ( + """ + Parameters + ---------- + parameter_without_default : int + The parameter_without_default is required. + """, + False, + "int", + None, + ), + ], +) +def test_default_args( + source: str, + expected_is_optional: bool, + expected_type_name: T.Optional[str], + expected_default: T.Optional[str], +) -> None: + """Test parsing default arguments.""" + docstring = parse(source) + assert docstring is not None + assert len(docstring.params) == 1 + + arg1 = docstring.params[0] + assert arg1.is_optional == expected_is_optional + assert arg1.type_name == expected_type_name + assert arg1.default == expected_default + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + Parameters + ---------- + spam + asd + 1 + 2 + 3 + + Raises + ------ + bla + herp + yay + derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["raises", "bla"] + assert docstring.meta[1].type_name == "bla" + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["raises", "yay"] + assert docstring.meta[2].type_name == "yay" + assert docstring.meta[2].description == "derp" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Parameters + ---------- + name + description 1 + priority : int + description 2 + sender : str, optional + description 3 + ratio : Optional[float], optional + description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Parameters + ---------- + name + description 1 + with multi-line text + priority : int + description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_attributes() -> None: + """Test parsing attributes.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Attributes + ---------- + name + description 1 + priority : int + description 2 + sender : str, optional + description 3 + ratio : Optional[float], optional + description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Attributes + ---------- + name + description 1 + with multi-line text + priority : int + description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_other_params() -> None: + """Test parsing other parameters.""" + docstring = parse( + """ + Short description + Other Parameters + ---------------- + only_seldom_used_keywords : type, optional + Explanation + common_parameters_listed_above : type, optional + Explanation + """ + ) + assert len(docstring.meta) == 2 + assert docstring.meta[0].args == [ + "other_param", + "only_seldom_used_keywords", + ] + assert docstring.meta[0].arg_name == "only_seldom_used_keywords" + assert docstring.meta[0].type_name == "type" + assert docstring.meta[0].is_optional + assert docstring.meta[0].description == "Explanation" + + assert docstring.meta[1].args == [ + "other_param", + "common_parameters_listed_above", + ] + + +def test_yields() -> None: + """Test parsing yields.""" + docstring = parse( + """ + Short description + Yields + ------ + int + description + """ + ) + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["yields"] + assert docstring.meta[0].type_name == "int" + assert docstring.meta[0].description == "description" + assert docstring.meta[0].return_name is None + assert docstring.meta[0].is_generator + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + Returns + ------- + type + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "type" + assert docstring.returns.description is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns + ------- + int + description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Returns + ------- + Optional[Mapping[str, List[int]]] + A description: with a colon + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "Optional[Mapping[str, List[int]]]" + assert docstring.returns.description == "A description: with a colon" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns + ------- + int + description + with much text + + even some spacing + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == ( + "description\nwith much text\n\neven some spacing" + ) + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns + ------- + a : int + description for a + b : str + description for b + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == ("description for a") + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 2 + assert docstring.many_returns[0].type_name == "int" + assert docstring.many_returns[0].description == "description for a" + assert docstring.many_returns[0].return_name == "a" + assert docstring.many_returns[1].type_name == "str" + assert docstring.many_returns[1].description == "description for b" + assert docstring.many_returns[1].return_name == "b" + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + Raises + ------ + ValueError + description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_warns() -> None: + """Test parsing warns.""" + docstring = parse( + """ + Short description + Warns + ----- + UserWarning + description + """ + ) + assert len(docstring.meta) == 1 + assert docstring.meta[0].type_name == "UserWarning" + assert docstring.meta[0].description == "description" + + +def test_simple_sections() -> None: + """Test parsing simple sections.""" + docstring = parse( + """ + Short description + + See Also + -------- + something : some thing you can also see + actually, anything can go in this section + + Warnings + -------- + Here be dragons + + Notes + ----- + None of this is real + + References + ---------- + Cite the relevant literature, e.g. [1]_. You may also cite these + references in the notes section above. + + .. [1] O. McNoleg, "The integration of GIS, remote sensing, + expert systems and adaptive co-kriging for environmental habitat + modelling of the Highland Haggis using object-oriented, fuzzy-logic + and neural-network techniques," Computers & Geosciences, vol. 22, + pp. 585-588, 1996. + """ + ) + assert len(docstring.meta) == 4 + assert docstring.meta[0].args == ["see_also"] + assert docstring.meta[0].description == ( + "something : some thing you can also see\n" + "actually, anything can go in this section" + ) + + assert docstring.meta[1].args == ["warnings"] + assert docstring.meta[1].description == "Here be dragons" + + assert docstring.meta[2].args == ["notes"] + assert docstring.meta[2].description == "None of this is real" + + assert docstring.meta[3].args == ["references"] + + +@pytest.mark.parametrize( + "source, expected_results", + [ + ( + "Description\nExamples\n--------\nlong example\n\nmore here", + [ + (None, "long example\n\nmore here"), + ], + ), + ( + "Description\nExamples\n--------\n>>> test", + [ + (">>> test", ""), + ], + ), + ( + "Description\nExamples\n--------\n>>> testa\n>>> testb", + [ + (">>> testa\n>>> testb", ""), + ], + ), + ( + "Description\nExamples\n--------\n>>> test1\ndesc1", + [ + (">>> test1", "desc1"), + ], + ), + ( + "Description\nExamples\n--------\n" + ">>> test1a\n>>> test1b\ndesc1a\ndesc1b", + [ + (">>> test1a\n>>> test1b", "desc1a\ndesc1b"), + ], + ), + ( + "Description\nExamples\n--------\n" + ">>> test1\ndesc1\n>>> test2\ndesc2", + [ + (">>> test1", "desc1"), + (">>> test2", "desc2"), + ], + ), + ( + "Description\nExamples\n--------\n" + ">>> test1a\n>>> test1b\ndesc1a\ndesc1b\n" + ">>> test2a\n>>> test2b\ndesc2a\ndesc2b\n", + [ + (">>> test1a\n>>> test1b", "desc1a\ndesc1b"), + (">>> test2a\n>>> test2b", "desc2a\ndesc2b"), + ], + ), + ( + "Description\nExamples\n--------\n" + " >>> test1a\n >>> test1b\n desc1a\n desc1b\n" + " >>> test2a\n >>> test2b\n desc2a\n desc2b\n", + [ + (">>> test1a\n>>> test1b", "desc1a\ndesc1b"), + (">>> test2a\n>>> test2b", "desc2a\ndesc2b"), + ], + ), + ], +) +def test_examples( + source, expected_results: T.List[T.Tuple[T.Optional[str], str]] +) -> None: + """Test parsing examples.""" + docstring = parse(source) + assert len(docstring.meta) == len(expected_results) + for meta, expected_result in zip(docstring.meta, expected_results): + assert meta.description == expected_result[1] + assert len(docstring.examples) == len(expected_results) + for example, expected_result in zip(docstring.examples, expected_results): + assert example.snippet == expected_result[0] + assert example.description == expected_result[1] + + +@pytest.mark.parametrize( + "source, expected_depr_version, expected_depr_desc", + [ + ( + "Short description\n\n.. deprecated:: 1.6.0\n This is busted!", + "1.6.0", + "This is busted!", + ), + ( + ( + "Short description\n\n" + ".. deprecated:: 1.6.0\n" + " This description has\n" + " multiple lines!" + ), + "1.6.0", + "This description has\nmultiple lines!", + ), + ("Short description\n\n.. deprecated:: 1.6.0", "1.6.0", None), + ( + "Short description\n\n.. deprecated::\n No version!", + None, + "No version!", + ), + ], +) +def test_deprecation( + source: str, + expected_depr_version: T.Optional[str], + expected_depr_desc: T.Optional[str], +) -> None: + """Test parsing deprecation notes.""" + docstring = parse(source) + + assert docstring.deprecation is not None + assert docstring.deprecation.version == expected_depr_version + assert docstring.deprecation.description == expected_depr_desc + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", ""), + ("\n", ""), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ( + "Short description\n\nLong description", + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + """, + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description\n\nLong description\nSecond line", + ), + ( + "Short description\nLong description", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + """, + "Short description\nLong description", + ), + ( + "\nShort description\nLong description\n", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + Second line + """, + "Short description\nLong description\nSecond line", + ), + ( + """ + Short description + Meta: + ----- + asd + """, + "Short description\nMeta:\n-----\n asd", + ), + ( + """ + Short description + Long description + Meta: + ----- + asd + """, + "Short description\n" + "Long description\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + First line + Second line + Meta: + ----- + asd + """, + "Short description\n" + "First line\n" + " Second line\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + Meta: + ----- + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + + Meta: + ----- + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + + Meta: + ----- + asd + 1 + 2 + 3 + """, + "Short description\n" + "\n" + "Meta:\n" + "-----\n" + " asd\n" + " 1\n" + " 2\n" + " 3", + ), + ( + """ + Short description + + Meta1: + ------ + asd + 1 + 2 + 3 + Meta2: + ------ + herp + Meta3: + ------ + derp + """, + "Short description\n" + "\n" + "Meta1:\n" + "------\n" + " asd\n" + " 1\n" + " 2\n" + " 3\n" + "Meta2:\n" + "------\n" + " herp\n" + "Meta3:\n" + "------\n" + " derp", + ), + ( + """ + Short description + + Parameters: + ----------- + name + description 1 + priority: int + description 2 + sender: str, optional + description 3 + message: str, optional + description 4, defaults to 'hello' + multiline: str, optional + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Parameters:\n" + "-----------\n" + " name\n" + " description 1\n" + " priority: int\n" + " description 2\n" + " sender: str, optional\n" + " description 3\n" + " message: str, optional\n" + " description 4, defaults to 'hello'\n" + " multiline: str, optional\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ( + """ + Short description + Raises: + ------- + ValueError + description + """, + "Short description\n" + "Raises:\n" + "-------\n" + " ValueError\n" + " description", + ), + ( + """ + Description + Examples: + -------- + >>> test1a + >>> test1b + desc1a + desc1b + >>> test2a + >>> test2b + desc2a + desc2b + """, + "Description\n" + "Examples:\n" + "--------\n" + ">>> test1a\n" + ">>> test1b\n" + "desc1a\n" + "desc1b\n" + ">>> test2a\n" + ">>> test2b\n" + "desc2a\n" + "desc2b", + ), + ], +) +def test_compose(source: str, expected: str) -> None: + """Test compose in default mode.""" + assert compose(parse(source)) == expected diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_parse_from_object.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_parse_from_object.py new file mode 100644 index 0000000000000000000000000000000000000000..1db37da7bf0e4e224c925f0541c53b2c6b9d6d44 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_parse_from_object.py @@ -0,0 +1,109 @@ +"""Tests for parse_from_object function and attribute docstrings.""" +from unittest.mock import patch + +from docstring_parser import parse_from_object + +module_attr: int = 1 +"""Description for module_attr""" + + +def test_from_module_attribute_docstrings() -> None: + """Test the parse of attribute docstrings from a module.""" + from . import test_parse_from_object # pylint: disable=C0415,W0406 + + docstring = parse_from_object(test_parse_from_object) + + assert "parse_from_object" in docstring.short_description + assert len(docstring.params) == 1 + assert docstring.params[0].arg_name == "module_attr" + assert docstring.params[0].type_name == "int" + assert docstring.params[0].description == "Description for module_attr" + + +def test_from_class_attribute_docstrings() -> None: + """Test the parse of attribute docstrings from a class.""" + + class StandardCase: + """Short description + Long description + """ + + attr_one: str + """Description for attr_one""" + attr_two: bool = False + """Description for attr_two""" + + docstring = parse_from_object(StandardCase) + + assert docstring.short_description == "Short description" + assert docstring.long_description == "Long description" + assert docstring.description == "Short description\nLong description" + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "attr_one" + assert docstring.params[0].type_name == "str" + assert docstring.params[0].description == "Description for attr_one" + assert docstring.params[1].arg_name == "attr_two" + assert docstring.params[1].type_name == "bool" + assert docstring.params[1].description == "Description for attr_two" + + +def test_from_class_attribute_docstrings_without_type() -> None: + """Test the parse of untyped attribute docstrings.""" + + class WithoutType: # pylint: disable=missing-class-docstring + attr_one = "value" + """Description for attr_one""" + + docstring = parse_from_object(WithoutType) + + assert docstring.short_description is None + assert docstring.long_description is None + assert docstring.description is None + assert len(docstring.params) == 1 + assert docstring.params[0].arg_name == "attr_one" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "Description for attr_one" + + +def test_from_class_without_source() -> None: + """Test the parse of class when source is unavailable.""" + + class WithoutSource: + """Short description""" + + attr_one: str + """Description for attr_one""" + + with patch( + "inspect.getsource", side_effect=OSError("could not get source code") + ): + docstring = parse_from_object(WithoutSource) + + assert docstring.short_description == "Short description" + assert docstring.long_description is None + assert docstring.description == "Short description" + assert len(docstring.params) == 0 + + +def test_from_function() -> None: + """Test the parse of a function docstring.""" + + def a_function(param1: str, param2: int = 2): + """Short description + Args: + param1: Description for param1 + param2: Description for param2 + """ + return f"{param1} {param2}" + + docstring = parse_from_object(a_function) + + assert docstring.short_description == "Short description" + assert docstring.description == "Short description" + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "param1" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "Description for param1" + assert docstring.params[1].arg_name == "param2" + assert docstring.params[1].type_name is None + assert docstring.params[1].description == "Description for param2" diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_parser.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb78bcf0919e6a811fc40eb2a3b21f8189d4a28 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_parser.py @@ -0,0 +1,222 @@ +"""Tests for generic docstring routines.""" +import pytest +from docstring_parser.common import DocstringStyle, ParseError +from docstring_parser.parser import parse + + +def test_rest() -> None: + """Test ReST-style parser autodetection.""" + docstring = parse( + """ + Short description + + Long description + + Causing people to indent: + + A lot sometimes + + :param spam: spam desc + :param int bla: bla desc + :param str yay: + :raises ValueError: exc desc + :returns tuple: ret desc + """ + ) + + assert docstring.style == DocstringStyle.REST + assert docstring.short_description == "Short description" + assert docstring.long_description == ( + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert docstring.description == ( + "Short description\n\n" + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert len(docstring.params) == 3 + assert docstring.params[0].arg_name == "spam" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "spam desc" + assert docstring.params[1].arg_name == "bla" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "bla desc" + assert docstring.params[2].arg_name == "yay" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "" + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "exc desc" + assert docstring.returns is not None + assert docstring.returns.type_name == "tuple" + assert docstring.returns.description == "ret desc" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_google() -> None: + """Test Google-style parser autodetection.""" + docstring = parse( + """Short description + + Long description + + Causing people to indent: + + A lot sometimes + + Args: + spam: spam desc + bla (int): bla desc + yay (str): + + Raises: + ValueError: exc desc + + Returns: + tuple: ret desc + """ + ) + + assert docstring.style == DocstringStyle.GOOGLE + assert docstring.short_description == "Short description" + assert docstring.long_description == ( + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert docstring.description == ( + "Short description\n\n" + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert len(docstring.params) == 3 + assert docstring.params[0].arg_name == "spam" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "spam desc" + assert docstring.params[1].arg_name == "bla" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "bla desc" + assert docstring.params[2].arg_name == "yay" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "" + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "exc desc" + assert docstring.returns is not None + assert docstring.returns.type_name == "tuple" + assert docstring.returns.description == "ret desc" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_numpydoc() -> None: + """Test numpydoc-style parser autodetection.""" + docstring = parse( + """Short description + + Long description + + Causing people to indent: + + A lot sometimes + + Parameters + ---------- + spam + spam desc + bla : int + bla desc + yay : str + + Raises + ------ + ValueError + exc desc + + Other Parameters + ---------------- + this_guy : int, optional + you know him + + Returns + ------- + tuple + ret desc + + See Also + -------- + multiple lines... + something else? + + Warnings + -------- + multiple lines... + none of this is real! + """ + ) + + assert docstring.style == DocstringStyle.NUMPYDOC + assert docstring.short_description == "Short description" + assert docstring.long_description == ( + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert docstring.description == ( + "Short description\n\n" + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "spam" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "spam desc" + assert docstring.params[1].arg_name == "bla" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "bla desc" + assert docstring.params[2].arg_name == "yay" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description is None + assert docstring.params[3].arg_name == "this_guy" + assert docstring.params[3].type_name == "int" + assert docstring.params[3].is_optional + assert docstring.params[3].description == "you know him" + + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "exc desc" + assert docstring.returns is not None + assert docstring.returns.type_name == "tuple" + assert docstring.returns.description == "ret desc" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_autodetection_error_detection() -> None: + """Test autodection for the case where one of the parsers throws an error + and another one succeeds. + """ + source = """ + Does something useless + + :param 3 + 3 a: a param + """ + + with pytest.raises(ParseError): + # assert that one of the parsers does raise + parse(source, DocstringStyle.REST) + + # assert that autodetection still works + docstring = parse(source) + + assert docstring + assert docstring.style == DocstringStyle.GOOGLE diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_rest.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_rest.py new file mode 100644 index 0000000000000000000000000000000000000000..7f850854b6cdad2bf564bcd9f50b35a4df9159bd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_rest.py @@ -0,0 +1,541 @@ +"""Tests for ReST-style docstring routines.""" +import typing as T + +import pytest +from docstring_parser.common import ParseError, RenderingStyle +from docstring_parser.rest import compose, parse + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc, " + "expected_full_desc", + [ + ( + """ + Short description + :meta: asd + """, + "Short description", + None, + False, + False, + "Short description", + ), + ( + """ + Short description + Long description + :meta: asd + """, + "Short description", + "Long description", + False, + False, + "Short description\nLong description", + ), + ( + """ + Short description + First line + Second line + :meta: asd + """, + "Short description", + "First line\n Second line", + False, + False, + "Short description\nFirst line\n Second line", + ), + ( + """ + Short description + + First line + Second line + :meta: asd + """, + "Short description", + "First line\n Second line", + True, + False, + "Short description\n\nFirst line\n Second line", + ), + ( + """ + Short description + + First line + Second line + + :meta: asd + """, + "Short description", + "First line\n Second line", + True, + True, + "Short description\n\nFirst line\n Second line", + ), + ( + """ + :meta: asd + """, + None, + None, + False, + False, + None, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, + expected_full_desc: T.Optional[str], +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert docstring.description == expected_full_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + :meta: asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + :meta1: asd + 1 + 2 + 3 + :meta2: herp + :meta3: derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["meta1"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["meta2"] + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["meta3"] + assert docstring.meta[2].description == "derp" + + +def test_meta_with_args() -> None: + """Test parsing meta with additional arguments.""" + docstring = parse( + """ + Short description + + :meta ene due rabe: asd + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta", "ene", "due", "rabe"] + assert docstring.meta[0].description == "asd" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + :param name: description 1 + :param int priority: description 2 + :param str? sender: description 3 + :param str? message: description 4, defaults to 'hello' + :param str? multiline: long description 5, + defaults to 'bye' + """ + ) + assert len(docstring.params) == 5 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert docstring.params[0].default is None + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[1].default is None + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[2].default is None + assert docstring.params[3].arg_name == "message" + assert docstring.params[3].type_name == "str" + assert ( + docstring.params[3].description == "description 4, defaults to 'hello'" + ) + assert docstring.params[3].is_optional + assert docstring.params[3].default == "'hello'" + assert docstring.params[4].arg_name == "multiline" + assert docstring.params[4].type_name == "str" + assert ( + docstring.params[4].description + == "long description 5,\ndefaults to 'bye'" + ) + assert docstring.params[4].is_optional + assert docstring.params[4].default == "'bye'" + + docstring = parse( + """ + Short description + + :param a: description a + :type a: int + :param int b: description b + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "a" + assert docstring.params[0].type_name == "int" + assert docstring.params[0].description == "description a" + assert docstring.params[0].default is None + assert not docstring.params[0].is_optional + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + :returns: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + assert docstring.many_returns == [docstring.returns] + + docstring = parse( + """ + Short description + :returns int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + assert docstring.many_returns == [docstring.returns] + + docstring = parse( + """ + Short description + :returns: description + :rtype: int + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + assert docstring.many_returns == [docstring.returns] + + +def test_yields() -> None: + """Test parsing yields.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + :yields: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + :yields int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + :raises: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name is None + assert docstring.raises[0].description == "description" + + docstring = parse( + """ + Short description + :raises ValueError: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_broken_meta() -> None: + """Test parsing broken meta.""" + with pytest.raises(ParseError): + parse(":") + + with pytest.raises(ParseError): + parse(":param herp derp") + + with pytest.raises(ParseError): + parse(":param: invalid") + + with pytest.raises(ParseError): + parse(":param with too many args: desc") + + # these should not raise any errors + parse(":sthstrange: desc") + + +def test_deprecation() -> None: + """Test parsing deprecation notes.""" + docstring = parse(":deprecation: 1.1.0 this function will be removed") + assert docstring.deprecation is not None + assert docstring.deprecation.version == "1.1.0" + assert docstring.deprecation.description == "this function will be removed" + + docstring = parse(":deprecation: this function will be removed") + assert docstring.deprecation is not None + assert docstring.deprecation.version is None + assert docstring.deprecation.description == "this function will be removed" + + +@pytest.mark.parametrize( + "rendering_style, expected", + [ + ( + RenderingStyle.COMPACT, + "Short description.\n" + "\n" + "Long description.\n" + "\n" + ":param int foo: a description\n" + ":param int bar: another description\n" + ":returns float: a return", + ), + ( + RenderingStyle.CLEAN, + "Short description.\n" + "\n" + "Long description.\n" + "\n" + ":param int foo: a description\n" + ":param int bar: another description\n" + ":returns float: a return", + ), + ( + RenderingStyle.EXPANDED, + "Short description.\n" + "\n" + "Long description.\n" + "\n" + ":param foo:\n" + " a description\n" + ":type foo: int\n" + ":param bar:\n" + " another description\n" + ":type bar: int\n" + ":returns:\n" + " a return\n" + ":rtype: float", + ), + ], +) +def test_compose(rendering_style: RenderingStyle, expected: str) -> None: + """Test compose""" + + docstring = parse( + """ + Short description. + + Long description. + + :param int foo: a description + :param int bar: another description + :return float: a return + """ + ) + assert compose(docstring, rendering_style=rendering_style) == expected + + +def test_short_rtype() -> None: + """Test abbreviated docstring with only return type information.""" + string = "Short description.\n\n:rtype: float" + docstring = parse(string) + rendering_style = RenderingStyle.EXPANDED + assert compose(docstring, rendering_style=rendering_style) == string diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_util.py b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..00bc2f985f35805c3bf10766bb4f80ff0d0a3ee5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/tests/test_util.py @@ -0,0 +1,64 @@ +"""Test for utility functions.""" + +from docstring_parser.common import DocstringReturns +from docstring_parser.util import combine_docstrings + + +def test_combine_docstrings() -> None: + """Test combine_docstrings wrapper.""" + + def fun1(arg_a, arg_b, arg_c, arg_d): + """short_description: fun1 + + :param arg_a: fun1 + :param arg_b: fun1 + :return: fun1 + """ + assert arg_a and arg_b and arg_c and arg_d + + def fun2(arg_b, arg_c, arg_d, arg_e): + """short_description: fun2 + + long_description: fun2 + + :param arg_b: fun2 + :param arg_c: fun2 + :param arg_e: fun2 + """ + assert arg_b and arg_c and arg_d and arg_e + + @combine_docstrings(fun1, fun2) + def decorated1(arg_a, arg_b, arg_c, arg_d, arg_e, arg_f): + """ + :param arg_e: decorated + :param arg_f: decorated + """ + assert arg_a and arg_b and arg_c and arg_d and arg_e and arg_f + + assert decorated1.__doc__ == ( + "short_description: fun2\n" + "\n" + "long_description: fun2\n" + "\n" + ":param arg_a: fun1\n" + ":param arg_b: fun1\n" + ":param arg_c: fun2\n" + ":param arg_e: fun2\n" + ":param arg_f: decorated\n" + ":returns: fun1" + ) + + @combine_docstrings(fun1, fun2, exclude=[DocstringReturns]) + def decorated2(arg_a, arg_b, arg_c, arg_d, arg_e, arg_f): + assert arg_a and arg_b and arg_c and arg_d and arg_e and arg_f + + assert decorated2.__doc__ == ( + "short_description: fun2\n" + "\n" + "long_description: fun2\n" + "\n" + ":param arg_a: fun1\n" + ":param arg_b: fun1\n" + ":param arg_c: fun2\n" + ":param arg_e: fun2" + ) diff --git a/parrot/lib/python3.10/site-packages/docstring_parser/util.py b/parrot/lib/python3.10/site-packages/docstring_parser/util.py new file mode 100644 index 0000000000000000000000000000000000000000..84629b026792a427c7e53bbfba126aa36ff21de9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/docstring_parser/util.py @@ -0,0 +1,144 @@ +"""Utility functions for working with docstrings.""" +import typing as T +from collections import ChainMap +from inspect import Signature +from itertools import chain + +from .common import ( + DocstringMeta, + DocstringParam, + DocstringReturns, + DocstringStyle, + RenderingStyle, +) +from .parser import compose, parse + +_Func = T.Callable[..., T.Any] + +assert DocstringReturns # used in docstring + + +def combine_docstrings( + *others: _Func, + exclude: T.Iterable[T.Type[DocstringMeta]] = (), + style: DocstringStyle = DocstringStyle.AUTO, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, +) -> _Func: + """A function decorator that parses the docstrings from `others`, + programmatically combines them with the parsed docstring of the decorated + function, and replaces the docstring of the decorated function with the + composed result. Only parameters that are part of the decorated functions + signature are included in the combined docstring. When multiple sources for + a parameter or docstring metadata exists then the decorator will first + default to the wrapped function's value (when available) and otherwise use + the rightmost definition from ``others``. + + The following example illustrates its usage: + + >>> def fun1(a, b, c, d): + ... '''short_description: fun1 + ... + ... :param a: fun1 + ... :param b: fun1 + ... :return: fun1 + ... ''' + >>> def fun2(b, c, d, e): + ... '''short_description: fun2 + ... + ... long_description: fun2 + ... + ... :param b: fun2 + ... :param c: fun2 + ... :param e: fun2 + ... ''' + >>> @combine_docstrings(fun1, fun2) + >>> def decorated(a, b, c, d, e, f): + ... ''' + ... :param e: decorated + ... :param f: decorated + ... ''' + >>> print(decorated.__doc__) + short_description: fun2 + + long_description: fun2 + + :param a: fun1 + :param b: fun1 + :param c: fun2 + :param e: fun2 + :param f: decorated + :returns: fun1 + >>> @combine_docstrings(fun1, fun2, exclude=[DocstringReturns]) + >>> def decorated(a, b, c, d, e, f): pass + >>> print(decorated.__doc__) + short_description: fun2 + + long_description: fun2 + + :param a: fun1 + :param b: fun1 + :param c: fun2 + :param e: fun2 + + :param others: callables from which to parse docstrings. + :param exclude: an iterable of ``DocstringMeta`` subclasses to exclude when + combining docstrings. + :param style: style composed docstring. The default will infer the style + from the decorated function. + :param rendering_style: The rendering style used to compose a docstring. + :return: the decorated function with a modified docstring. + """ + + def wrapper(func: _Func) -> _Func: + sig = Signature.from_callable(func) + + comb_doc = parse(func.__doc__ or "") + docs = [parse(other.__doc__ or "") for other in others] + [comb_doc] + params = dict( + ChainMap( + *( + {param.arg_name: param for param in doc.params} + for doc in docs + ) + ) + ) + + for doc in reversed(docs): + if not doc.short_description: + continue + comb_doc.short_description = doc.short_description + comb_doc.blank_after_short_description = ( + doc.blank_after_short_description + ) + break + + for doc in reversed(docs): + if not doc.long_description: + continue + comb_doc.long_description = doc.long_description + comb_doc.blank_after_long_description = ( + doc.blank_after_long_description + ) + break + + combined = {} + for doc in docs: + metas = {} + for meta in doc.meta: + meta_type = type(meta) + if meta_type in exclude: + continue + metas.setdefault(meta_type, []).append(meta) + for (meta_type, meta) in metas.items(): + combined[meta_type] = meta + + combined[DocstringParam] = [ + params[name] for name in sig.parameters if name in params + ] + comb_doc.meta = list(chain(*combined.values())) + func.__doc__ = compose( + comb_doc, style=style, rendering_style=rendering_style + ) + return func + + return wrapper diff --git a/parrot/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD b/parrot/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a631893b629ea97342259329b8d7258b4bd63e33 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD @@ -0,0 +1,23 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406 +nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549 +nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292 +nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561 +nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495 +nvidia/cusolver/include/cusolver_common.h,sha256=8SMCLEPkMN9Ni_KANkvPSHCieV1jrTARuS-Mhmuq5H8,8826 +nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/lib/libcusolver.so.11,sha256=ECh6vHzpxfx-fBY3YVZrWZ6uGzYsR-EACRHRmEQ9bVI,114481816 +nvidia/cusolver/lib/libcusolverMg.so.11,sha256=0f3uK8NQhMAFtQ5r76UCApP7coB7wWG2pQOMh1RMmwY,79763496 +nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA,sha256=b8Zxnx3ZVIwttTKBnzgVXjXu8-_pRL6wBkYMTV7i6gA,1626 +nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD,, +nvidia_cusolver_cu12-11.4.5.107.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bcd6a530cecf9db1fd5cd8992623e148d8cf110 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/adapters.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/adapters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7476e314cd6af3607bf2c48d55f25182614a3e14 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/adapters.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/auth.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/auth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..636535e208e070b575037bfd986b2fa07092d818 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/auth.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/compat.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8c91b0d2c444122945ccd2ffe7aae09a6a8affe Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/compat.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc0e339b4f1ba5965b21dc9ce63d0b1f2bd1110a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/exceptions.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90a28a72a1eaed506c4284ea2db4a84d217871f3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/exceptions.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9df0cba2efb870077ed1ec21c67ae183db039ec1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/hooks.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eef469796632cba1a536799df0868fdec1e3e61 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/hooks.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/models.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27607d4d5e2c1a86b035d3856637da36692a4e13 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/models.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08084784f4b64dca43cd3922d3e63b6e8e55467e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/sessions.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/sessions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71b91a98af3c338f97136a06787b41446b64c6ad Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/sessions.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/status_codes.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/status_codes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05250075f8e67b8fe2ec049ff8dac0741c7b0b2d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/status_codes.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/structures.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1377ccdcd4b0ef42b77eb18f22a030503c2d4843 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/structures.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/requests/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6efbaddc098b65c12571ebedbc844a9f9719b5d9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/requests/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/requests/__version__.py b/parrot/lib/python3.10/site-packages/requests/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..2c105aca7d48ce1c35a456785cc75f97f076a426 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/__version__.py @@ -0,0 +1,14 @@ +# .-. .-. .-. . . .-. .-. .-. .-. +# |( |- |.| | | |- `-. | `-. +# ' ' `-' `-`.`-' `-' `-' ' `-' + +__title__ = "requests" +__description__ = "Python HTTP for Humans." +__url__ = "https://requests.readthedocs.io" +__version__ = "2.32.3" +__build__ = 0x023203 +__author__ = "Kenneth Reitz" +__author_email__ = "me@kennethreitz.org" +__license__ = "Apache-2.0" +__copyright__ = "Copyright Kenneth Reitz" +__cake__ = "\u2728 \U0001f370 \u2728" diff --git a/parrot/lib/python3.10/site-packages/requests/api.py b/parrot/lib/python3.10/site-packages/requests/api.py new file mode 100644 index 0000000000000000000000000000000000000000..5960744552e7f8eea815429e7bdad38b0cc2741d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/api.py @@ -0,0 +1,157 @@ +""" +requests.api +~~~~~~~~~~~~ + +This module implements the Requests API. + +:copyright: (c) 2012 by Kenneth Reitz. +:license: Apache2, see LICENSE for more details. +""" + +from . import sessions + + +def request(method, url, **kwargs): + """Constructs and sends a :class:`Request `. + + :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. + ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` + or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content_type'`` is a string + defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers + to add for the file. + :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How many seconds to wait for the server to send data + before giving up, as a float, or a :ref:`(connect timeout, read + timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param stream: (optional) if ``False``, the response content will be immediately downloaded. + :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + :return: :class:`Response ` object + :rtype: requests.Response + + Usage:: + + >>> import requests + >>> req = requests.request('GET', 'https://httpbin.org/get') + >>> req + + """ + + # By using the 'with' statement we are sure the session is closed, thus we + # avoid leaving sockets open which can trigger a ResourceWarning in some + # cases, and look like a memory leak in others. + with sessions.Session() as session: + return session.request(method=method, url=url, **kwargs) + + +def get(url, params=None, **kwargs): + r"""Sends a GET request. + + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("get", url, params=params, **kwargs) + + +def options(url, **kwargs): + r"""Sends an OPTIONS request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("options", url, **kwargs) + + +def head(url, **kwargs): + r"""Sends a HEAD request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. If + `allow_redirects` is not provided, it will be set to `False` (as + opposed to the default :meth:`request` behavior). + :return: :class:`Response ` object + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return request("head", url, **kwargs) + + +def post(url, data=None, json=None, **kwargs): + r"""Sends a POST request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("post", url, data=data, json=json, **kwargs) + + +def put(url, data=None, **kwargs): + r"""Sends a PUT request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("put", url, data=data, **kwargs) + + +def patch(url, data=None, **kwargs): + r"""Sends a PATCH request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("patch", url, data=data, **kwargs) + + +def delete(url, **kwargs): + r"""Sends a DELETE request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("delete", url, **kwargs) diff --git a/parrot/lib/python3.10/site-packages/requests/auth.py b/parrot/lib/python3.10/site-packages/requests/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..4a7ce6dc1460e0de8aa0c38ea9123faa69bd5110 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/auth.py @@ -0,0 +1,314 @@ +""" +requests.auth +~~~~~~~~~~~~~ + +This module contains the authentication handlers for Requests. +""" + +import hashlib +import os +import re +import threading +import time +import warnings +from base64 import b64encode + +from ._internal_utils import to_native_string +from .compat import basestring, str, urlparse +from .cookies import extract_cookies_to_jar +from .utils import parse_dict_header + +CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +CONTENT_TYPE_MULTI_PART = "multipart/form-data" + + +def _basic_auth_str(username, password): + """Returns a Basic Auth string.""" + + # "I want us to put a big-ol' comment on top of it that + # says that this behaviour is dumb but we need to preserve + # it because people are relying on it." + # - Lukasa + # + # These are here solely to maintain backwards compatibility + # for things like ints. This will be removed in 3.0.0. + if not isinstance(username, basestring): + warnings.warn( + "Non-string usernames will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(username), + category=DeprecationWarning, + ) + username = str(username) + + if not isinstance(password, basestring): + warnings.warn( + "Non-string passwords will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(type(password)), + category=DeprecationWarning, + ) + password = str(password) + # -- End Removal -- + + if isinstance(username, str): + username = username.encode("latin1") + + if isinstance(password, str): + password = password.encode("latin1") + + authstr = "Basic " + to_native_string( + b64encode(b":".join((username, password))).strip() + ) + + return authstr + + +class AuthBase: + """Base class that all auth implementations derive from""" + + def __call__(self, r): + raise NotImplementedError("Auth hooks must be callable.") + + +class HTTPBasicAuth(AuthBase): + """Attaches HTTP Basic Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other + + def __call__(self, r): + r.headers["Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPProxyAuth(HTTPBasicAuth): + """Attaches HTTP Proxy Authentication to a given Request object.""" + + def __call__(self, r): + r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPDigestAuth(AuthBase): + """Attaches HTTP Digest Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + # Keep state in per-thread local storage + self._thread_local = threading.local() + + def init_per_thread_state(self): + # Ensure state is initialized just once per-thread + if not hasattr(self._thread_local, "init"): + self._thread_local.init = True + self._thread_local.last_nonce = "" + self._thread_local.nonce_count = 0 + self._thread_local.chal = {} + self._thread_local.pos = None + self._thread_local.num_401_calls = None + + def build_digest_header(self, method, url): + """ + :rtype: str + """ + + realm = self._thread_local.chal["realm"] + nonce = self._thread_local.chal["nonce"] + qop = self._thread_local.chal.get("qop") + algorithm = self._thread_local.chal.get("algorithm") + opaque = self._thread_local.chal.get("opaque") + hash_utf8 = None + + if algorithm is None: + _algorithm = "MD5" + else: + _algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if _algorithm == "MD5" or _algorithm == "MD5-SESS": + + def md5_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.md5(x).hexdigest() + + hash_utf8 = md5_utf8 + elif _algorithm == "SHA": + + def sha_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha1(x).hexdigest() + + hash_utf8 = sha_utf8 + elif _algorithm == "SHA-256": + + def sha256_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha256(x).hexdigest() + + hash_utf8 = sha256_utf8 + elif _algorithm == "SHA-512": + + def sha512_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha512(x).hexdigest() + + hash_utf8 = sha512_utf8 + + KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 + + if hash_utf8 is None: + return None + + # XXX not implemented yet + entdig = None + p_parsed = urlparse(url) + #: path is request-uri defined in RFC 2616 which should not be empty + path = p_parsed.path or "/" + if p_parsed.query: + path += f"?{p_parsed.query}" + + A1 = f"{self.username}:{realm}:{self.password}" + A2 = f"{method}:{path}" + + HA1 = hash_utf8(A1) + HA2 = hash_utf8(A2) + + if nonce == self._thread_local.last_nonce: + self._thread_local.nonce_count += 1 + else: + self._thread_local.nonce_count = 1 + ncvalue = f"{self._thread_local.nonce_count:08x}" + s = str(self._thread_local.nonce_count).encode("utf-8") + s += nonce.encode("utf-8") + s += time.ctime().encode("utf-8") + s += os.urandom(8) + + cnonce = hashlib.sha1(s).hexdigest()[:16] + if _algorithm == "MD5-SESS": + HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") + + if not qop: + respdig = KD(HA1, f"{nonce}:{HA2}") + elif qop == "auth" or "auth" in qop.split(","): + noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" + respdig = KD(HA1, noncebit) + else: + # XXX handle auth-int. + return None + + self._thread_local.last_nonce = nonce + + # XXX should the partial digests be encoded too? + base = ( + f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' + f'uri="{path}", response="{respdig}"' + ) + if opaque: + base += f', opaque="{opaque}"' + if algorithm: + base += f', algorithm="{algorithm}"' + if entdig: + base += f', digest="{entdig}"' + if qop: + base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' + + return f"Digest {base}" + + def handle_redirect(self, r, **kwargs): + """Reset num_401_calls counter on redirects.""" + if r.is_redirect: + self._thread_local.num_401_calls = 1 + + def handle_401(self, r, **kwargs): + """ + Takes the given response and tries digest-auth, if needed. + + :rtype: requests.Response + """ + + # If response is not 4xx, do not auth + # See https://github.com/psf/requests/issues/3772 + if not 400 <= r.status_code < 500: + self._thread_local.num_401_calls = 1 + return r + + if self._thread_local.pos is not None: + # Rewind the file position indicator of the body to where + # it was to resend the request. + r.request.body.seek(self._thread_local.pos) + s_auth = r.headers.get("www-authenticate", "") + + if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: + self._thread_local.num_401_calls += 1 + pat = re.compile(r"digest ", flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) + + # Consume content and release the original connection + # to allow our new request to reuse the same one. + r.content + r.close() + prep = r.request.copy() + extract_cookies_to_jar(prep._cookies, r.request, r.raw) + prep.prepare_cookies(prep._cookies) + + prep.headers["Authorization"] = self.build_digest_header( + prep.method, prep.url + ) + _r = r.connection.send(prep, **kwargs) + _r.history.append(r) + _r.request = prep + + return _r + + self._thread_local.num_401_calls = 1 + return r + + def __call__(self, r): + # Initialize per-thread state, if needed + self.init_per_thread_state() + # If we have a saved nonce, skip the 401 + if self._thread_local.last_nonce: + r.headers["Authorization"] = self.build_digest_header(r.method, r.url) + try: + self._thread_local.pos = r.body.tell() + except AttributeError: + # In the case of HTTPDigestAuth being reused and the body of + # the previous request was a file-like object, pos has the + # file position of the previous body. Ensure it's set to + # None. + self._thread_local.pos = None + r.register_hook("response", self.handle_401) + r.register_hook("response", self.handle_redirect) + self._thread_local.num_401_calls = 1 + + return r + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other diff --git a/parrot/lib/python3.10/site-packages/requests/certs.py b/parrot/lib/python3.10/site-packages/requests/certs.py new file mode 100644 index 0000000000000000000000000000000000000000..be422c3e91e43bacf60ff3302688df0b28742333 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/certs.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +""" +requests.certs +~~~~~~~~~~~~~~ + +This module returns the preferred default CA certificate bundle. There is +only one — the one from the certifi package. + +If you are packaging Requests, e.g., for a Linux distribution or a managed +environment, you can change the definition of where() to return a separately +packaged CA bundle. +""" +from certifi import where + +if __name__ == "__main__": + print(where()) diff --git a/parrot/lib/python3.10/site-packages/requests/cookies.py b/parrot/lib/python3.10/site-packages/requests/cookies.py new file mode 100644 index 0000000000000000000000000000000000000000..f69d0cda9e1c893401015a09f2db2de5a5960fd2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/cookies.py @@ -0,0 +1,561 @@ +""" +requests.cookies +~~~~~~~~~~~~~~~~ + +Compatibility code to be able to use `http.cookiejar.CookieJar` with requests. + +requests.utils imports from here, so be careful with imports. +""" + +import calendar +import copy +import time + +from ._internal_utils import to_native_string +from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse + +try: + import threading +except ImportError: + import dummy_threading as threading + + +class MockRequest: + """Wraps a `requests.Request` to mimic a `urllib2.Request`. + + The code in `http.cookiejar.CookieJar` expects this interface in order to correctly + manage cookie policies, i.e., determine whether a cookie can be set, given the + domains of the request and the cookie. + + The original request object is read-only. The client is responsible for collecting + the new headers via `get_new_headers()` and interpreting them appropriately. You + probably want `get_cookie_header`, defined below. + """ + + def __init__(self, request): + self._r = request + self._new_headers = {} + self.type = urlparse(self._r.url).scheme + + def get_type(self): + return self.type + + def get_host(self): + return urlparse(self._r.url).netloc + + def get_origin_req_host(self): + return self.get_host() + + def get_full_url(self): + # Only return the response's URL if the user hadn't set the Host + # header + if not self._r.headers.get("Host"): + return self._r.url + # If they did set it, retrieve it and reconstruct the expected domain + host = to_native_string(self._r.headers["Host"], encoding="utf-8") + parsed = urlparse(self._r.url) + # Reconstruct the URL as we expect it + return urlunparse( + [ + parsed.scheme, + host, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment, + ] + ) + + def is_unverifiable(self): + return True + + def has_header(self, name): + return name in self._r.headers or name in self._new_headers + + def get_header(self, name, default=None): + return self._r.headers.get(name, self._new_headers.get(name, default)) + + def add_header(self, key, val): + """cookiejar has no legitimate use for this method; add it back if you find one.""" + raise NotImplementedError( + "Cookie headers should be added with add_unredirected_header()" + ) + + def add_unredirected_header(self, name, value): + self._new_headers[name] = value + + def get_new_headers(self): + return self._new_headers + + @property + def unverifiable(self): + return self.is_unverifiable() + + @property + def origin_req_host(self): + return self.get_origin_req_host() + + @property + def host(self): + return self.get_host() + + +class MockResponse: + """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. + + ...what? Basically, expose the parsed HTTP headers from the server response + the way `http.cookiejar` expects to see them. + """ + + def __init__(self, headers): + """Make a MockResponse for `cookiejar` to read. + + :param headers: a httplib.HTTPMessage or analogous carrying the headers + """ + self._headers = headers + + def info(self): + return self._headers + + def getheaders(self, name): + self._headers.getheaders(name) + + +def extract_cookies_to_jar(jar, request, response): + """Extract the cookies from the response into a CookieJar. + + :param jar: http.cookiejar.CookieJar (not necessarily a RequestsCookieJar) + :param request: our own requests.Request object + :param response: urllib3.HTTPResponse object + """ + if not (hasattr(response, "_original_response") and response._original_response): + return + # the _original_response field is the wrapped httplib.HTTPResponse object, + req = MockRequest(request) + # pull out the HTTPMessage with the headers and put it in the mock: + res = MockResponse(response._original_response.msg) + jar.extract_cookies(res, req) + + +def get_cookie_header(jar, request): + """ + Produce an appropriate Cookie header string to be sent with `request`, or None. + + :rtype: str + """ + r = MockRequest(request) + jar.add_cookie_header(r) + return r.get_new_headers().get("Cookie") + + +def remove_cookie_by_name(cookiejar, name, domain=None, path=None): + """Unsets a cookie by name, by default over all domains and paths. + + Wraps CookieJar.clear(), is O(n). + """ + clearables = [] + for cookie in cookiejar: + if cookie.name != name: + continue + if domain is not None and domain != cookie.domain: + continue + if path is not None and path != cookie.path: + continue + clearables.append((cookie.domain, cookie.path, cookie.name)) + + for domain, path, name in clearables: + cookiejar.clear(domain, path, name) + + +class CookieConflictError(RuntimeError): + """There are two cookies that meet the criteria specified in the cookie jar. + Use .get and .set and include domain and path args in order to be more specific. + """ + + +class RequestsCookieJar(cookielib.CookieJar, MutableMapping): + """Compatibility class; is a http.cookiejar.CookieJar, but exposes a dict + interface. + + This is the CookieJar we create by default for requests and sessions that + don't specify one, since some clients may expect response.cookies and + session.cookies to support dict operations. + + Requests does not use the dict interface internally; it's just for + compatibility with external client code. All requests code should work + out of the box with externally provided instances of ``CookieJar``, e.g. + ``LWPCookieJar`` and ``FileCookieJar``. + + Unlike a regular CookieJar, this class is pickleable. + + .. warning:: dictionary operations that are normally O(1) may be O(n). + """ + + def get(self, name, default=None, domain=None, path=None): + """Dict-like get() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + + .. warning:: operation is O(n), not O(1). + """ + try: + return self._find_no_duplicates(name, domain, path) + except KeyError: + return default + + def set(self, name, value, **kwargs): + """Dict-like set() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + """ + # support client code that unsets cookies by assignment of a None value: + if value is None: + remove_cookie_by_name( + self, name, domain=kwargs.get("domain"), path=kwargs.get("path") + ) + return + + if isinstance(value, Morsel): + c = morsel_to_cookie(value) + else: + c = create_cookie(name, value, **kwargs) + self.set_cookie(c) + return c + + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies + from the jar. + + .. seealso:: itervalues() and iteritems(). + """ + for cookie in iter(self): + yield cookie.name + + def keys(self): + """Dict-like keys() that returns a list of names of cookies from the + jar. + + .. seealso:: values() and items(). + """ + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies + from the jar. + + .. seealso:: iterkeys() and iteritems(). + """ + for cookie in iter(self): + yield cookie.value + + def values(self): + """Dict-like values() that returns a list of values of cookies from the + jar. + + .. seealso:: keys() and items(). + """ + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples + from the jar. + + .. seealso:: iterkeys() and itervalues(). + """ + for cookie in iter(self): + yield cookie.name, cookie.value + + def items(self): + """Dict-like items() that returns a list of name-value tuples from the + jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a + vanilla python dict of key value pairs. + + .. seealso:: keys() and values(). + """ + return list(self.iteritems()) + + def list_domains(self): + """Utility method to list all the domains in the jar.""" + domains = [] + for cookie in iter(self): + if cookie.domain not in domains: + domains.append(cookie.domain) + return domains + + def list_paths(self): + """Utility method to list all the paths in the jar.""" + paths = [] + for cookie in iter(self): + if cookie.path not in paths: + paths.append(cookie.path) + return paths + + def multiple_domains(self): + """Returns True if there are multiple domains in the jar. + Returns False otherwise. + + :rtype: bool + """ + domains = [] + for cookie in iter(self): + if cookie.domain is not None and cookie.domain in domains: + return True + domains.append(cookie.domain) + return False # there is only one domain in jar + + def get_dict(self, domain=None, path=None): + """Takes as an argument an optional domain and path and returns a plain + old Python dict of name-value pairs of cookies that meet the + requirements. + + :rtype: dict + """ + dictionary = {} + for cookie in iter(self): + if (domain is None or cookie.domain == domain) and ( + path is None or cookie.path == path + ): + dictionary[cookie.name] = cookie.value + return dictionary + + def __contains__(self, name): + try: + return super().__contains__(name) + except CookieConflictError: + return True + + def __getitem__(self, name): + """Dict-like __getitem__() for compatibility with client code. Throws + exception if there are more than one cookie with name. In that case, + use the more explicit get() method instead. + + .. warning:: operation is O(n), not O(1). + """ + return self._find_no_duplicates(name) + + def __setitem__(self, name, value): + """Dict-like __setitem__ for compatibility with client code. Throws + exception if there is already a cookie of that name in the jar. In that + case, use the more explicit set() method instead. + """ + self.set(name, value) + + def __delitem__(self, name): + """Deletes a cookie given a name. Wraps ``http.cookiejar.CookieJar``'s + ``remove_cookie_by_name()``. + """ + remove_cookie_by_name(self, name) + + def set_cookie(self, cookie, *args, **kwargs): + if ( + hasattr(cookie.value, "startswith") + and cookie.value.startswith('"') + and cookie.value.endswith('"') + ): + cookie.value = cookie.value.replace('\\"', "") + return super().set_cookie(cookie, *args, **kwargs) + + def update(self, other): + """Updates this jar with cookies from another CookieJar or dict-like""" + if isinstance(other, cookielib.CookieJar): + for cookie in other: + self.set_cookie(copy.copy(cookie)) + else: + super().update(other) + + def _find(self, name, domain=None, path=None): + """Requests uses this method internally to get cookie values. + + If there are conflicting cookies, _find arbitrarily chooses one. + See _find_no_duplicates if you want an exception thrown if there are + conflicting cookies. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :return: cookie.value + """ + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + return cookie.value + + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :raises KeyError: if cookie is not found + :raises CookieConflictError: if there are multiple cookies + that match name and optionally domain and path + :return: cookie.value + """ + toReturn = None + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if toReturn is not None: + # if there are multiple cookies that meet passed in criteria + raise CookieConflictError( + f"There are multiple cookies with name, {name!r}" + ) + # we will eventually return this as long as no cookie conflict + toReturn = cookie.value + + if toReturn: + return toReturn + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def __getstate__(self): + """Unlike a normal CookieJar, this class is pickleable.""" + state = self.__dict__.copy() + # remove the unpickleable RLock object + state.pop("_cookies_lock") + return state + + def __setstate__(self, state): + """Unlike a normal CookieJar, this class is pickleable.""" + self.__dict__.update(state) + if "_cookies_lock" not in self.__dict__: + self._cookies_lock = threading.RLock() + + def copy(self): + """Return a copy of this RequestsCookieJar.""" + new_cj = RequestsCookieJar() + new_cj.set_policy(self.get_policy()) + new_cj.update(self) + return new_cj + + def get_policy(self): + """Return the CookiePolicy instance used.""" + return self._policy + + +def _copy_cookie_jar(jar): + if jar is None: + return None + + if hasattr(jar, "copy"): + # We're dealing with an instance of RequestsCookieJar + return jar.copy() + # We're dealing with a generic CookieJar instance + new_jar = copy.copy(jar) + new_jar.clear() + for cookie in jar: + new_jar.set_cookie(copy.copy(cookie)) + return new_jar + + +def create_cookie(name, value, **kwargs): + """Make a cookie from underspecified parameters. + + By default, the pair of `name` and `value` will be set for the domain '' + and sent on every request (this is sometimes called a "supercookie"). + """ + result = { + "version": 0, + "name": name, + "value": value, + "port": None, + "domain": "", + "path": "/", + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, + } + + badargs = set(kwargs) - set(result) + if badargs: + raise TypeError( + f"create_cookie() got unexpected keyword arguments: {list(badargs)}" + ) + + result.update(kwargs) + result["port_specified"] = bool(result["port"]) + result["domain_specified"] = bool(result["domain"]) + result["domain_initial_dot"] = result["domain"].startswith(".") + result["path_specified"] = bool(result["path"]) + + return cookielib.Cookie(**result) + + +def morsel_to_cookie(morsel): + """Convert a Morsel object into a Cookie containing the one k/v pair.""" + + expires = None + if morsel["max-age"]: + try: + expires = int(time.time() + int(morsel["max-age"])) + except ValueError: + raise TypeError(f"max-age: {morsel['max-age']} must be integer") + elif morsel["expires"]: + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) + return create_cookie( + comment=morsel["comment"], + comment_url=bool(morsel["comment"]), + discard=False, + domain=morsel["domain"], + expires=expires, + name=morsel.key, + path=morsel["path"], + port=None, + rest={"HttpOnly": morsel["httponly"]}, + rfc2109=False, + secure=bool(morsel["secure"]), + value=morsel.value, + version=morsel["version"] or 0, + ) + + +def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): + """Returns a CookieJar from a key/value dictionary. + + :param cookie_dict: Dict of key/values to insert into CookieJar. + :param cookiejar: (optional) A cookiejar to add the cookies to. + :param overwrite: (optional) If False, will not replace cookies + already in the jar with new ones. + :rtype: CookieJar + """ + if cookiejar is None: + cookiejar = RequestsCookieJar() + + if cookie_dict is not None: + names_from_jar = [cookie.name for cookie in cookiejar] + for name in cookie_dict: + if overwrite or (name not in names_from_jar): + cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) + + return cookiejar + + +def merge_cookies(cookiejar, cookies): + """Add cookies to cookiejar and returns a merged CookieJar. + + :param cookiejar: CookieJar object to add the cookies to. + :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar + """ + if not isinstance(cookiejar, cookielib.CookieJar): + raise ValueError("You can only merge into CookieJar") + + if isinstance(cookies, dict): + cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) + elif isinstance(cookies, cookielib.CookieJar): + try: + cookiejar.update(cookies) + except AttributeError: + for cookie_in_jar in cookies: + cookiejar.set_cookie(cookie_in_jar) + + return cookiejar diff --git a/parrot/lib/python3.10/site-packages/requests/hooks.py b/parrot/lib/python3.10/site-packages/requests/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..d181ba2ec2e55d274897315887b78fbdca757da8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/hooks.py @@ -0,0 +1,33 @@ +""" +requests.hooks +~~~~~~~~~~~~~~ + +This module provides the capabilities for the Requests hooks system. + +Available hooks: + +``response``: + The response generated from a Request. +""" +HOOKS = ["response"] + + +def default_hooks(): + return {event: [] for event in HOOKS} + + +# TODO: response is the only one + + +def dispatch_hook(key, hooks, hook_data, **kwargs): + """Dispatches a hook dictionary on a given piece of data.""" + hooks = hooks or {} + hooks = hooks.get(key) + if hooks: + if hasattr(hooks, "__call__"): + hooks = [hooks] + for hook in hooks: + _hook_data = hook(hook_data, **kwargs) + if _hook_data is not None: + hook_data = _hook_data + return hook_data diff --git a/parrot/lib/python3.10/site-packages/requests/models.py b/parrot/lib/python3.10/site-packages/requests/models.py new file mode 100644 index 0000000000000000000000000000000000000000..8f56ca7d23a9a12084df80cb649e019572308cfe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/models.py @@ -0,0 +1,1037 @@ +""" +requests.models +~~~~~~~~~~~~~~~ + +This module contains the primary objects that power Requests. +""" + +import datetime + +# Import encoding now, to avoid implicit import later. +# Implicit import within threads may cause LookupError when standard library is in a ZIP, +# such as in Embedded Python. See https://github.com/psf/requests/issues/3578. +import encodings.idna # noqa: F401 +from io import UnsupportedOperation + +from urllib3.exceptions import ( + DecodeError, + LocationParseError, + ProtocolError, + ReadTimeoutError, + SSLError, +) +from urllib3.fields import RequestField +from urllib3.filepost import encode_multipart_formdata +from urllib3.util import parse_url + +from ._internal_utils import to_native_string, unicode_is_ascii +from .auth import HTTPBasicAuth +from .compat import ( + Callable, + JSONDecodeError, + Mapping, + basestring, + builtin_str, + chardet, + cookielib, +) +from .compat import json as complexjson +from .compat import urlencode, urlsplit, urlunparse +from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header +from .exceptions import ( + ChunkedEncodingError, + ConnectionError, + ContentDecodingError, + HTTPError, + InvalidJSONError, + InvalidURL, +) +from .exceptions import JSONDecodeError as RequestsJSONDecodeError +from .exceptions import MissingSchema +from .exceptions import SSLError as RequestsSSLError +from .exceptions import StreamConsumedError +from .hooks import default_hooks +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( + check_header_validity, + get_auth_from_url, + guess_filename, + guess_json_utf, + iter_slices, + parse_header_links, + requote_uri, + stream_decode_response_unicode, + super_len, + to_key_val_list, +) + +#: The set of HTTP status codes that indicate an automatically +#: processable redirect. +REDIRECT_STATI = ( + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 + codes.temporary_redirect, # 307 + codes.permanent_redirect, # 308 +) + +DEFAULT_REDIRECT_LIMIT = 30 +CONTENT_CHUNK_SIZE = 10 * 1024 +ITER_CHUNK_SIZE = 512 + + +class RequestEncodingMixin: + @property + def path_url(self): + """Build the path URL to use.""" + + url = [] + + p = urlsplit(self.url) + + path = p.path + if not path: + path = "/" + + url.append(path) + + query = p.query + if query: + url.append("?") + url.append(query) + + return "".join(url) + + @staticmethod + def _encode_params(data): + """Encode parameters in a piece of data. + + Will successfully encode parameters when passed as a dict or a list of + 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary + if parameters are supplied as a dict. + """ + + if isinstance(data, (str, bytes)): + return data + elif hasattr(data, "read"): + return data + elif hasattr(data, "__iter__"): + result = [] + for k, vs in to_key_val_list(data): + if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): + vs = [vs] + for v in vs: + if v is not None: + result.append( + ( + k.encode("utf-8") if isinstance(k, str) else k, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + return urlencode(result, doseq=True) + else: + return data + + @staticmethod + def _encode_files(files, data): + """Build the body for a multipart/form-data request. + + Will successfully encode files when passed as a dict or a list of + tuples. Order is retained if data is a list of tuples but arbitrary + if parameters are supplied as a dict. + The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) + or 4-tuples (filename, fileobj, contentype, custom_headers). + """ + if not files: + raise ValueError("Files must be provided.") + elif isinstance(data, basestring): + raise ValueError("Data must not be a string.") + + new_fields = [] + fields = to_key_val_list(data or {}) + files = to_key_val_list(files or {}) + + for field, val in fields: + if isinstance(val, basestring) or not hasattr(val, "__iter__"): + val = [val] + for v in val: + if v is not None: + # Don't call str() on bytestrings: in Py3 it all goes wrong. + if not isinstance(v, bytes): + v = str(v) + + new_fields.append( + ( + field.decode("utf-8") + if isinstance(field, bytes) + else field, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + + for k, v in files: + # support for explicit filename + ft = None + fh = None + if isinstance(v, (tuple, list)): + if len(v) == 2: + fn, fp = v + elif len(v) == 3: + fn, fp, ft = v + else: + fn, fp, ft, fh = v + else: + fn = guess_filename(v) or k + fp = v + + if isinstance(fp, (str, bytes, bytearray)): + fdata = fp + elif hasattr(fp, "read"): + fdata = fp.read() + elif fp is None: + continue + else: + fdata = fp + + rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) + rf.make_multipart(content_type=ft) + new_fields.append(rf) + + body, content_type = encode_multipart_formdata(new_fields) + + return body, content_type + + +class RequestHooksMixin: + def register_hook(self, event, hook): + """Properly register a hook.""" + + if event not in self.hooks: + raise ValueError(f'Unsupported event specified, with event name "{event}"') + + if isinstance(hook, Callable): + self.hooks[event].append(hook) + elif hasattr(hook, "__iter__"): + self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) + + def deregister_hook(self, event, hook): + """Deregister a previously registered hook. + Returns True if the hook existed, False if not. + """ + + try: + self.hooks[event].remove(hook) + return True + except ValueError: + return False + + +class Request(RequestHooksMixin): + """A user-created :class:`Request ` object. + + Used to prepare a :class:`PreparedRequest `, which is sent to the server. + + :param method: HTTP method to use. + :param url: URL to send. + :param headers: dictionary of headers to send. + :param files: dictionary of {filename: fileobject} files to multipart upload. + :param data: the body to attach to the request. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param json: json for the body to attach to the request (if files or data is not specified). + :param params: URL parameters to append to the URL. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param auth: Auth handler or (user, pass) tuple. + :param cookies: dictionary or CookieJar of cookies to attach to this request. + :param hooks: dictionary of callback hooks, for internal usage. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> req.prepare() + + """ + + def __init__( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + # Default empty dicts for dict params. + data = [] if data is None else data + files = [] if files is None else files + headers = {} if headers is None else headers + params = {} if params is None else params + hooks = {} if hooks is None else hooks + + self.hooks = default_hooks() + for k, v in list(hooks.items()): + self.register_hook(event=k, hook=v) + + self.method = method + self.url = url + self.headers = headers + self.files = files + self.data = data + self.json = json + self.params = params + self.auth = auth + self.cookies = cookies + + def __repr__(self): + return f"" + + def prepare(self): + """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" + p = PreparedRequest() + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + json=self.json, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) + return p + + +class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): + """The fully mutable :class:`PreparedRequest ` object, + containing the exact bytes that will be sent to the server. + + Instances are generated from a :class:`Request ` object, and + should not be instantiated manually; doing so may produce undesirable + effects. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> r = req.prepare() + >>> r + + + >>> s = requests.Session() + >>> s.send(r) + + """ + + def __init__(self): + #: HTTP verb to send to the server. + self.method = None + #: HTTP URL to send the request to. + self.url = None + #: dictionary of HTTP headers. + self.headers = None + # The `CookieJar` used to create the Cookie header will be stored here + # after prepare_cookies is called + self._cookies = None + #: request body to send to the server. + self.body = None + #: dictionary of callback hooks, for internal usage. + self.hooks = default_hooks() + #: integer denoting starting position of a readable file-like body. + self._body_position = None + + def prepare( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + """Prepares the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files, json) + self.prepare_auth(auth, url) + + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + + def __repr__(self): + return f"" + + def copy(self): + p = PreparedRequest() + p.method = self.method + p.url = self.url + p.headers = self.headers.copy() if self.headers is not None else None + p._cookies = _copy_cookie_jar(self._cookies) + p.body = self.body + p.hooks = self.hooks + p._body_position = self._body_position + return p + + def prepare_method(self, method): + """Prepares the given HTTP method.""" + self.method = method + if self.method is not None: + self.method = to_native_string(self.method.upper()) + + @staticmethod + def _get_idna_encoded_host(host): + import idna + + try: + host = idna.encode(host, uts46=True).decode("utf-8") + except idna.IDNAError: + raise UnicodeError + return host + + def prepare_url(self, url, params): + """Prepares the given HTTP URL.""" + #: Accept objects that have string representations. + #: We're unable to blindly call unicode/str functions + #: as this will include the bytestring indicator (b'') + #: on python 3.x. + #: https://github.com/psf/requests/pull/2238 + if isinstance(url, bytes): + url = url.decode("utf8") + else: + url = str(url) + + # Remove leading whitespaces from url + url = url.lstrip() + + # Don't do any URL preparation for non-HTTP schemes like `mailto`, + # `data` etc to work around exceptions from `url_parse`, which + # handles RFC 3986 only. + if ":" in url and not url.lower().startswith("http"): + self.url = url + return + + # Support for unicode domain names and paths. + try: + scheme, auth, host, port, path, query, fragment = parse_url(url) + except LocationParseError as e: + raise InvalidURL(*e.args) + + if not scheme: + raise MissingSchema( + f"Invalid URL {url!r}: No scheme supplied. " + f"Perhaps you meant https://{url}?" + ) + + if not host: + raise InvalidURL(f"Invalid URL {url!r}: No host supplied") + + # In general, we want to try IDNA encoding the hostname if the string contains + # non-ASCII characters. This allows users to automatically get the correct IDNA + # behaviour. For strings containing only ASCII characters, we need to also verify + # it doesn't start with a wildcard (*), before allowing the unencoded hostname. + if not unicode_is_ascii(host): + try: + host = self._get_idna_encoded_host(host) + except UnicodeError: + raise InvalidURL("URL has an invalid label.") + elif host.startswith(("*", ".")): + raise InvalidURL("URL has an invalid label.") + + # Carefully reconstruct the network location + netloc = auth or "" + if netloc: + netloc += "@" + netloc += host + if port: + netloc += f":{port}" + + # Bare domains aren't valid URLs. + if not path: + path = "/" + + if isinstance(params, (str, bytes)): + params = to_native_string(params) + + enc_params = self._encode_params(params) + if enc_params: + if query: + query = f"{query}&{enc_params}" + else: + query = enc_params + + url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) + self.url = url + + def prepare_headers(self, headers): + """Prepares the given HTTP headers.""" + + self.headers = CaseInsensitiveDict() + if headers: + for header in headers.items(): + # Raise exception on invalid header value. + check_header_validity(header) + name, value = header + self.headers[to_native_string(name)] = value + + def prepare_body(self, data, files, json=None): + """Prepares the given HTTP body data.""" + + # Check if file, fo, generator, iterator. + # If not, run through normal process. + + # Nottin' on you. + body = None + content_type = None + + if not data and json is not None: + # urllib3 requires a bytes-like body. Python 2's json.dumps + # provides this natively, but Python 3 gives a Unicode string. + content_type = "application/json" + + try: + body = complexjson.dumps(json, allow_nan=False) + except ValueError as ve: + raise InvalidJSONError(ve, request=self) + + if not isinstance(body, bytes): + body = body.encode("utf-8") + + is_stream = all( + [ + hasattr(data, "__iter__"), + not isinstance(data, (basestring, list, tuple, Mapping)), + ] + ) + + if is_stream: + try: + length = super_len(data) + except (TypeError, AttributeError, UnsupportedOperation): + length = None + + body = data + + if getattr(body, "tell", None) is not None: + # Record the current file position before reading. + # This will allow us to rewind a file in the event + # of a redirect. + try: + self._body_position = body.tell() + except OSError: + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body + self._body_position = object() + + if files: + raise NotImplementedError( + "Streamed bodies and files are mutually exclusive." + ) + + if length: + self.headers["Content-Length"] = builtin_str(length) + else: + self.headers["Transfer-Encoding"] = "chunked" + else: + # Multi-part file uploads. + if files: + (body, content_type) = self._encode_files(files, data) + else: + if data: + body = self._encode_params(data) + if isinstance(data, basestring) or hasattr(data, "read"): + content_type = None + else: + content_type = "application/x-www-form-urlencoded" + + self.prepare_content_length(body) + + # Add content-type if it wasn't explicitly provided. + if content_type and ("content-type" not in self.headers): + self.headers["Content-Type"] = content_type + + self.body = body + + def prepare_content_length(self, body): + """Prepare Content-Length header based on request method and body""" + if body is not None: + length = super_len(body) + if length: + # If length exists, set it. Otherwise, we fallback + # to Transfer-Encoding: chunked. + self.headers["Content-Length"] = builtin_str(length) + elif ( + self.method not in ("GET", "HEAD") + and self.headers.get("Content-Length") is None + ): + # Set Content-Length to 0 for methods that can have a body + # but don't provide one. (i.e. not GET or HEAD) + self.headers["Content-Length"] = "0" + + def prepare_auth(self, auth, url=""): + """Prepares the given HTTP auth data.""" + + # If no Auth is explicitly provided, extract it from the URL first. + if auth is None: + url_auth = get_auth_from_url(self.url) + auth = url_auth if any(url_auth) else None + + if auth: + if isinstance(auth, tuple) and len(auth) == 2: + # special-case basic HTTP auth + auth = HTTPBasicAuth(*auth) + + # Allow auth to make its changes. + r = auth(self) + + # Update self to reflect the auth changes. + self.__dict__.update(r.__dict__) + + # Recompute Content-Length + self.prepare_content_length(self.body) + + def prepare_cookies(self, cookies): + """Prepares the given HTTP cookie data. + + This function eventually generates a ``Cookie`` header from the + given cookies using cookielib. Due to cookielib's design, the header + will not be regenerated if it already exists, meaning this function + can only be called once for the life of the + :class:`PreparedRequest ` object. Any subsequent calls + to ``prepare_cookies`` will have no actual effect, unless the "Cookie" + header is removed beforehand. + """ + if isinstance(cookies, cookielib.CookieJar): + self._cookies = cookies + else: + self._cookies = cookiejar_from_dict(cookies) + + cookie_header = get_cookie_header(self._cookies, self) + if cookie_header is not None: + self.headers["Cookie"] = cookie_header + + def prepare_hooks(self, hooks): + """Prepares the given hooks.""" + # hooks can be passed as None to the prepare method and to this + # method. To prevent iterating over None, simply use an empty list + # if hooks is False-y + hooks = hooks or [] + for event in hooks: + self.register_hook(event, hooks[event]) + + +class Response: + """The :class:`Response ` object, which contains a + server's response to an HTTP request. + """ + + __attrs__ = [ + "_content", + "status_code", + "headers", + "url", + "history", + "encoding", + "reason", + "cookies", + "elapsed", + "request", + ] + + def __init__(self): + self._content = False + self._content_consumed = False + self._next = None + + #: Integer Code of responded HTTP Status, e.g. 404 or 200. + self.status_code = None + + #: Case-insensitive Dictionary of Response Headers. + #: For example, ``headers['content-encoding']`` will return the + #: value of a ``'Content-Encoding'`` response header. + self.headers = CaseInsensitiveDict() + + #: File-like object representation of response (for advanced usage). + #: Use of ``raw`` requires that ``stream=True`` be set on the request. + #: This requirement does not apply for use internally to Requests. + self.raw = None + + #: Final URL location of Response. + self.url = None + + #: Encoding to decode with when accessing r.text. + self.encoding = None + + #: A list of :class:`Response ` objects from + #: the history of the Request. Any redirect responses will end + #: up here. The list is sorted from the oldest to the most recent request. + self.history = [] + + #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". + self.reason = None + + #: A CookieJar of Cookies the server sent back. + self.cookies = cookiejar_from_dict({}) + + #: The amount of time elapsed between sending the request + #: and the arrival of the response (as a timedelta). + #: This property specifically measures the time taken between sending + #: the first byte of the request and finishing parsing the headers. It + #: is therefore unaffected by consuming the response content or the + #: value of the ``stream`` keyword argument. + self.elapsed = datetime.timedelta(0) + + #: The :class:`PreparedRequest ` object to which this + #: is a response. + self.request = None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def __getstate__(self): + # Consume everything; accessing the content attribute makes + # sure the content has been fully read. + if not self._content_consumed: + self.content + + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + for name, value in state.items(): + setattr(self, name, value) + + # pickled objects do not have .raw + setattr(self, "_content_consumed", True) + setattr(self, "raw", None) + + def __repr__(self): + return f"" + + def __bool__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __nonzero__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __iter__(self): + """Allows you to use a response as an iterator.""" + return self.iter_content(128) + + @property + def ok(self): + """Returns True if :attr:`status_code` is less than 400, False if not. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + try: + self.raise_for_status() + except HTTPError: + return False + return True + + @property + def is_redirect(self): + """True if this Response is a well-formed HTTP redirect that could have + been processed automatically (by :meth:`Session.resolve_redirects`). + """ + return "location" in self.headers and self.status_code in REDIRECT_STATI + + @property + def is_permanent_redirect(self): + """True if this Response one of the permanent versions of redirect.""" + return "location" in self.headers and self.status_code in ( + codes.moved_permanently, + codes.permanent_redirect, + ) + + @property + def next(self): + """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" + return self._next + + @property + def apparent_encoding(self): + """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" + if chardet is not None: + return chardet.detect(self.content)["encoding"] + else: + # If no character detection library is available, we'll fall back + # to a standard Python utf-8 str. + return "utf-8" + + def iter_content(self, chunk_size=1, decode_unicode=False): + """Iterates over the response data. When stream=True is set on the + request, this avoids reading the content at once into memory for + large responses. The chunk size is the number of bytes it should + read into memory. This is not necessarily the length of each item + returned as decoding can take place. + + chunk_size must be of type int or None. A value of None will + function differently depending on the value of `stream`. + stream=True will read data as it arrives in whatever size the + chunks are received. If stream=False, data is returned as + a single chunk. + + If decode_unicode is True, content will be decoded using the best + available encoding based on the response. + """ + + def generate(): + # Special case for urllib3. + if hasattr(self.raw, "stream"): + try: + yield from self.raw.stream(chunk_size, decode_content=True) + except ProtocolError as e: + raise ChunkedEncodingError(e) + except DecodeError as e: + raise ContentDecodingError(e) + except ReadTimeoutError as e: + raise ConnectionError(e) + except SSLError as e: + raise RequestsSSLError(e) + else: + # Standard file-like object. + while True: + chunk = self.raw.read(chunk_size) + if not chunk: + break + yield chunk + + self._content_consumed = True + + if self._content_consumed and isinstance(self._content, bool): + raise StreamConsumedError() + elif chunk_size is not None and not isinstance(chunk_size, int): + raise TypeError( + f"chunk_size must be an int, it is instead a {type(chunk_size)}." + ) + # simulate reading small chunks of the content + reused_chunks = iter_slices(self._content, chunk_size) + + stream_chunks = generate() + + chunks = reused_chunks if self._content_consumed else stream_chunks + + if decode_unicode: + chunks = stream_decode_response_unicode(chunks, self) + + return chunks + + def iter_lines( + self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None + ): + """Iterates over the response data, one line at a time. When + stream=True is set on the request, this avoids reading the + content at once into memory for large responses. + + .. note:: This method is not reentrant safe. + """ + + pending = None + + for chunk in self.iter_content( + chunk_size=chunk_size, decode_unicode=decode_unicode + ): + if pending is not None: + chunk = pending + chunk + + if delimiter: + lines = chunk.split(delimiter) + else: + lines = chunk.splitlines() + + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + pending = lines.pop() + else: + pending = None + + yield from lines + + if pending is not None: + yield pending + + @property + def content(self): + """Content of the response, in bytes.""" + + if self._content is False: + # Read the contents. + if self._content_consumed: + raise RuntimeError("The content for this response was already consumed") + + if self.status_code == 0 or self.raw is None: + self._content = None + else: + self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" + + self._content_consumed = True + # don't need to release the connection; that's been handled by urllib3 + # since we exhausted the data. + return self._content + + @property + def text(self): + """Content of the response, in unicode. + + If Response.encoding is None, encoding will be guessed using + ``charset_normalizer`` or ``chardet``. + + The encoding of the response content is determined based solely on HTTP + headers, following RFC 2616 to the letter. If you can take advantage of + non-HTTP knowledge to make a better guess at the encoding, you should + set ``r.encoding`` appropriately before accessing this property. + """ + + # Try charset from content-type + content = None + encoding = self.encoding + + if not self.content: + return "" + + # Fallback to auto-detected encoding. + if self.encoding is None: + encoding = self.apparent_encoding + + # Decode unicode from given encoding. + try: + content = str(self.content, encoding, errors="replace") + except (LookupError, TypeError): + # A LookupError is raised if the encoding was not found which could + # indicate a misspelling or similar mistake. + # + # A TypeError can be raised if encoding is None + # + # So we try blindly encoding. + content = str(self.content, errors="replace") + + return content + + def json(self, **kwargs): + r"""Returns the json-encoded content of a response, if any. + + :param \*\*kwargs: Optional arguments that ``json.loads`` takes. + :raises requests.exceptions.JSONDecodeError: If the response body does not + contain valid json. + """ + + if not self.encoding and self.content and len(self.content) > 3: + # No encoding set. JSON RFC 4627 section 3 states we should expect + # UTF-8, -16 or -32. Detect which one to use; If the detection or + # decoding fails, fall back to `self.text` (using charset_normalizer to make + # a best guess). + encoding = guess_json_utf(self.content) + if encoding is not None: + try: + return complexjson.loads(self.content.decode(encoding), **kwargs) + except UnicodeDecodeError: + # Wrong UTF codec detected; usually because it's not UTF-8 + # but some other 8-bit codec. This is an RFC violation, + # and the server didn't bother to tell us what codec *was* + # used. + pass + except JSONDecodeError as e: + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + try: + return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + # Catch JSON-related errors and raise as requests.JSONDecodeError + # This aliases json.JSONDecodeError and simplejson.JSONDecodeError + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + @property + def links(self): + """Returns the parsed header links of the response, if any.""" + + header = self.headers.get("link") + + resolved_links = {} + + if header: + links = parse_header_links(header) + + for link in links: + key = link.get("rel") or link.get("url") + resolved_links[key] = link + + return resolved_links + + def raise_for_status(self): + """Raises :class:`HTTPError`, if one occurred.""" + + http_error_msg = "" + if isinstance(self.reason, bytes): + # We attempt to decode utf-8 first because some servers + # choose to localize their reason strings. If the string + # isn't utf-8, we fall back to iso-8859-1 for all other + # encodings. (See PR #3538) + try: + reason = self.reason.decode("utf-8") + except UnicodeDecodeError: + reason = self.reason.decode("iso-8859-1") + else: + reason = self.reason + + if 400 <= self.status_code < 500: + http_error_msg = ( + f"{self.status_code} Client Error: {reason} for url: {self.url}" + ) + + elif 500 <= self.status_code < 600: + http_error_msg = ( + f"{self.status_code} Server Error: {reason} for url: {self.url}" + ) + + if http_error_msg: + raise HTTPError(http_error_msg, response=self) + + def close(self): + """Releases the connection back to the pool. Once this method has been + called the underlying ``raw`` object must not be accessed again. + + *Note: Should not normally need to be called explicitly.* + """ + if not self._content_consumed: + self.raw.close() + + release_conn = getattr(self.raw, "release_conn", None) + if release_conn is not None: + release_conn() diff --git a/parrot/lib/python3.10/site-packages/requests/packages.py b/parrot/lib/python3.10/site-packages/requests/packages.py new file mode 100644 index 0000000000000000000000000000000000000000..5ab3d8e250de8475cb22553f564e5444e02c7460 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/packages.py @@ -0,0 +1,23 @@ +import sys + +from .compat import chardet + +# This code exists for backwards compatibility reasons. +# I don't like it either. Just look the other way. :) + +for package in ("urllib3", "idna"): + locals()[package] = __import__(package) + # This traversal is apparently necessary such that the identities are + # preserved (requests.packages.urllib3.* is urllib3.*) + for mod in list(sys.modules): + if mod == package or mod.startswith(f"{package}."): + sys.modules[f"requests.packages.{mod}"] = sys.modules[mod] + +if chardet is not None: + target = chardet.__name__ + for mod in list(sys.modules): + if mod == target or mod.startswith(f"{target}."): + imported_mod = sys.modules[mod] + sys.modules[f"requests.packages.{mod}"] = imported_mod + mod = mod.replace(target, "chardet") + sys.modules[f"requests.packages.{mod}"] = imported_mod diff --git a/parrot/lib/python3.10/site-packages/requests/sessions.py b/parrot/lib/python3.10/site-packages/requests/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..b387bc36df7bc064b502adcb3c1a4527dd401fda --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/sessions.py @@ -0,0 +1,831 @@ +""" +requests.sessions +~~~~~~~~~~~~~~~~~ + +This module provides a Session object to manage and persist settings across +requests (cookies, auth, proxies). +""" +import os +import sys +import time +from collections import OrderedDict +from datetime import timedelta + +from ._internal_utils import to_native_string +from .adapters import HTTPAdapter +from .auth import _basic_auth_str +from .compat import Mapping, cookielib, urljoin, urlparse +from .cookies import ( + RequestsCookieJar, + cookiejar_from_dict, + extract_cookies_to_jar, + merge_cookies, +) +from .exceptions import ( + ChunkedEncodingError, + ContentDecodingError, + InvalidSchema, + TooManyRedirects, +) +from .hooks import default_hooks, dispatch_hook + +# formerly defined here, reexposed here for backward compatibility +from .models import ( # noqa: F401 + DEFAULT_REDIRECT_LIMIT, + REDIRECT_STATI, + PreparedRequest, + Request, +) +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( # noqa: F401 + DEFAULT_PORTS, + default_headers, + get_auth_from_url, + get_environ_proxies, + get_netrc_auth, + requote_uri, + resolve_proxies, + rewind_body, + should_bypass_proxies, + to_key_val_list, +) + +# Preferred clock, based on which one is more accurate on a given system. +if sys.platform == "win32": + preferred_clock = time.perf_counter +else: + preferred_clock = time.time + + +def merge_setting(request_setting, session_setting, dict_class=OrderedDict): + """Determines appropriate setting for a given request, taking into account + the explicit setting on that request, and the setting in the session. If a + setting is a dictionary, they will be merged together using `dict_class` + """ + + if session_setting is None: + return request_setting + + if request_setting is None: + return session_setting + + # Bypass if not a dictionary (e.g. verify) + if not ( + isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) + ): + return request_setting + + merged_setting = dict_class(to_key_val_list(session_setting)) + merged_setting.update(to_key_val_list(request_setting)) + + # Remove keys that are set to None. Extract keys first to avoid altering + # the dictionary during iteration. + none_keys = [k for (k, v) in merged_setting.items() if v is None] + for key in none_keys: + del merged_setting[key] + + return merged_setting + + +def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): + """Properly merges both requests and session hooks. + + This is necessary because when request_hooks == {'response': []}, the + merge breaks Session hooks entirely. + """ + if session_hooks is None or session_hooks.get("response") == []: + return request_hooks + + if request_hooks is None or request_hooks.get("response") == []: + return session_hooks + + return merge_setting(request_hooks, session_hooks, dict_class) + + +class SessionRedirectMixin: + def get_redirect_target(self, resp): + """Receives a Response. Returns a redirect URI or ``None``""" + # Due to the nature of how requests processes redirects this method will + # be called at least once upon the original response and at least twice + # on each subsequent redirect response (if any). + # If a custom mixin is used to handle this logic, it may be advantageous + # to cache the redirect location onto the response object as a private + # attribute. + if resp.is_redirect: + location = resp.headers["location"] + # Currently the underlying http module on py3 decode headers + # in latin1, but empirical evidence suggests that latin1 is very + # rarely used with non-ASCII characters in HTTP headers. + # It is more likely to get UTF8 header rather than latin1. + # This causes incorrect handling of UTF8 encoded location headers. + # To solve this, we re-encode the location in latin1. + location = location.encode("latin1") + return to_native_string(location, "utf8") + return None + + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if ( + old_parsed.scheme == "http" + and old_parsed.port in (80, None) + and new_parsed.scheme == "https" + and new_parsed.port in (443, None) + ): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if ( + not changed_scheme + and old_parsed.port in default_port + and new_parsed.port in default_port + ): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + + def resolve_redirects( + self, + resp, + req, + stream=False, + timeout=None, + verify=True, + cert=None, + proxies=None, + yield_requests=False, + **adapter_kwargs, + ): + """Receives a Response. Returns a generator of Responses or Requests.""" + + hist = [] # keep track of history + + url = self.get_redirect_target(resp) + previous_fragment = urlparse(req.url).fragment + while url: + prepared_request = req.copy() + + # Update history and keep track of redirects. + # resp.history must ignore the original request in this loop + hist.append(resp) + resp.history = hist[1:] + + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) + + if len(resp.history) >= self.max_redirects: + raise TooManyRedirects( + f"Exceeded {self.max_redirects} redirects.", response=resp + ) + + # Release the connection back into the pool. + resp.close() + + # Handle redirection without scheme (see: RFC 1808 Section 4) + if url.startswith("//"): + parsed_rurl = urlparse(resp.url) + url = ":".join([to_native_string(parsed_rurl.scheme), url]) + + # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) + parsed = urlparse(url) + if parsed.fragment == "" and previous_fragment: + parsed = parsed._replace(fragment=previous_fragment) + elif parsed.fragment: + previous_fragment = parsed.fragment + url = parsed.geturl() + + # Facilitate relative 'location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + # Compliant with RFC3986, we percent encode the url. + if not parsed.netloc: + url = urljoin(resp.url, requote_uri(url)) + else: + url = requote_uri(url) + + prepared_request.url = to_native_string(url) + + self.rebuild_method(prepared_request, resp) + + # https://github.com/psf/requests/issues/1084 + if resp.status_code not in ( + codes.temporary_redirect, + codes.permanent_redirect, + ): + # https://github.com/psf/requests/issues/3490 + purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") + for header in purged_headers: + prepared_request.headers.pop(header, None) + prepared_request.body = None + + headers = prepared_request.headers + headers.pop("Cookie", None) + + # Extract any cookies sent on the response to the cookiejar + # in the new request. Because we've mutated our copied prepared + # request, use the old one that we haven't yet touched. + extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) + merge_cookies(prepared_request._cookies, self.cookies) + prepared_request.prepare_cookies(prepared_request._cookies) + + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # A failed tell() sets `_body_position` to `object()`. This non-None + # value ensures `rewindable` will be True, allowing us to raise an + # UnrewindableBodyError, instead of hanging the connection. + rewindable = prepared_request._body_position is not None and ( + "Content-Length" in headers or "Transfer-Encoding" in headers + ) + + # Attempt to rewind consumed file-like object. + if rewindable: + rewind_body(prepared_request) + + # Override the original request. + req = prepared_request + + if yield_requests: + yield req + else: + resp = self.send( + req, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + allow_redirects=False, + **adapter_kwargs, + ) + + extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) + + # extract redirect url, if any, for the next loop + url = self.get_redirect_target(resp) + yield resp + + def rebuild_auth(self, prepared_request, response): + """When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if "Authorization" in headers and self.should_strip_auth( + response.request.url, url + ): + # If we get redirected to a new host, we should strip out any + # authentication headers. + del headers["Authorization"] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + def rebuild_proxies(self, prepared_request, proxies): + """This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + + :rtype: dict + """ + headers = prepared_request.headers + scheme = urlparse(prepared_request.url).scheme + new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) + + if "Proxy-Authorization" in headers: + del headers["Proxy-Authorization"] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + # urllib3 handles proxy authorization for us in the standard adapter. + # Avoid appending this to TLS tunneled requests where it may be leaked. + if not scheme.startswith("https") and username and password: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return new_proxies + + def rebuild_method(self, prepared_request, response): + """When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = prepared_request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.see_other and method != "HEAD": + method = "GET" + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if response.status_code == codes.found and method != "HEAD": + method = "GET" + + # Second, if a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in Issue 1704. + if response.status_code == codes.moved and method == "POST": + method = "GET" + + prepared_request.method = method + + +class Session(SessionRedirectMixin): + """A Requests session. + + Provides cookie persistence, connection-pooling, and configuration. + + Basic Usage:: + + >>> import requests + >>> s = requests.Session() + >>> s.get('https://httpbin.org/get') + + + Or as a context manager:: + + >>> with requests.Session() as s: + ... s.get('https://httpbin.org/get') + + """ + + __attrs__ = [ + "headers", + "cookies", + "auth", + "proxies", + "hooks", + "params", + "verify", + "cert", + "adapters", + "stream", + "trust_env", + "max_redirects", + ] + + def __init__(self): + #: A case-insensitive dictionary of headers to be sent on each + #: :class:`Request ` sent from this + #: :class:`Session `. + self.headers = default_headers() + + #: Default Authentication tuple or object to attach to + #: :class:`Request `. + self.auth = None + + #: Dictionary mapping protocol or protocol and host to the URL of the proxy + #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to + #: be used on each :class:`Request `. + self.proxies = {} + + #: Event-handling hooks. + self.hooks = default_hooks() + + #: Dictionary of querystring data to attach to each + #: :class:`Request `. The dictionary values may be lists for + #: representing multivalued query parameters. + self.params = {} + + #: Stream response content default. + self.stream = False + + #: SSL Verification default. + #: Defaults to `True`, requiring requests to verify the TLS certificate at the + #: remote end. + #: If verify is set to `False`, requests will accept any TLS certificate + #: presented by the server, and will ignore hostname mismatches and/or + #: expired certificates, which will make your application vulnerable to + #: man-in-the-middle (MitM) attacks. + #: Only set this to `False` for testing. + self.verify = True + + #: SSL client certificate default, if String, path to ssl client + #: cert file (.pem). If Tuple, ('cert', 'key') pair. + self.cert = None + + #: Maximum number of redirects allowed. If the request exceeds this + #: limit, a :class:`TooManyRedirects` exception is raised. + #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is + #: 30. + self.max_redirects = DEFAULT_REDIRECT_LIMIT + + #: Trust environment settings for proxy configuration, default + #: authentication and similar. + self.trust_env = True + + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar `, but + #: may be any other ``cookielib.CookieJar`` compatible object. + self.cookies = cookiejar_from_dict({}) + + # Default connection adapters. + self.adapters = OrderedDict() + self.mount("https://", HTTPAdapter()) + self.mount("http://", HTTPAdapter()) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest ` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request ` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + :rtype: requests.PreparedRequest + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = merge_cookies( + merge_cookies(RequestsCookieJar(), self.cookies), cookies + ) + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + json=request.json, + headers=merge_setting( + request.headers, self.headers, dict_class=CaseInsensitiveDict + ), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_hooks(request.hooks, self.hooks), + ) + return p + + def request( + self, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + ): + """Constructs a :class:`Request `, prepares it and sends it. + Returns :class:`Response ` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query + string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the + :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the + :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the + :class:`Request`. + :param files: (optional) Dictionary of ``'filename': file-like-objects`` + for multipart encoding upload. + :param auth: (optional) Auth tuple or callable to enable + Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol or protocol and + hostname to the URL of the proxy. + :param hooks: (optional) Dictionary mapping hook name to one event or + list of events, event must be callable. + :param stream: (optional) whether to immediately download the response + content. Defaults to ``False``. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. When set to + ``False``, requests will accept any TLS certificate presented by + the server, and will ignore hostname mismatches and/or expired + certificates, which will make your application vulnerable to + man-in-the-middle (MitM) attacks. Setting verify to ``False`` + may be useful during local development or testing. + :param cert: (optional) if String, path to ssl client cert file (.pem). + If Tuple, ('cert', 'key') pair. + :rtype: requests.Response + """ + # Create the Request. + req = Request( + method=method.upper(), + url=url, + headers=headers, + files=files, + data=data or {}, + json=json, + params=params or {}, + auth=auth, + cookies=cookies, + hooks=hooks, + ) + prep = self.prepare_request(req) + + proxies = proxies or {} + + settings = self.merge_environment_settings( + prep.url, proxies, stream, verify, cert + ) + + # Send the request. + send_kwargs = { + "timeout": timeout, + "allow_redirects": allow_redirects, + } + send_kwargs.update(settings) + resp = self.send(prep, **send_kwargs) + + return resp + + def get(self, url, **kwargs): + r"""Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("GET", url, **kwargs) + + def options(self, url, **kwargs): + r"""Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("OPTIONS", url, **kwargs) + + def head(self, url, **kwargs): + r"""Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return self.request("HEAD", url, **kwargs) + + def post(self, url, data=None, json=None, **kwargs): + r"""Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("POST", url, data=data, json=json, **kwargs) + + def put(self, url, data=None, **kwargs): + r"""Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PUT", url, data=data, **kwargs) + + def patch(self, url, data=None, **kwargs): + r"""Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PATCH", url, data=data, **kwargs) + + def delete(self, url, **kwargs): + r"""Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("DELETE", url, **kwargs) + + def send(self, request, **kwargs): + """Send a given PreparedRequest. + + :rtype: requests.Response + """ + # Set defaults that the hooks can utilize to ensure they always have + # the correct parameters to reproduce the previous request. + kwargs.setdefault("stream", self.stream) + kwargs.setdefault("verify", self.verify) + kwargs.setdefault("cert", self.cert) + if "proxies" not in kwargs: + kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) + + # It's possible that users might accidentally send a Request object. + # Guard against that specific failure case. + if isinstance(request, Request): + raise ValueError("You can only send PreparedRequests.") + + # Set up variables needed for resolve_redirects and dispatching of hooks + allow_redirects = kwargs.pop("allow_redirects", True) + stream = kwargs.get("stream") + hooks = request.hooks + + # Get the appropriate adapter to use + adapter = self.get_adapter(url=request.url) + + # Start time (approximately) of the request + start = preferred_clock() + + # Send the request + r = adapter.send(request, **kwargs) + + # Total elapsed time of the request (approximately) + elapsed = preferred_clock() - start + r.elapsed = timedelta(seconds=elapsed) + + # Response manipulation hooks + r = dispatch_hook("response", hooks, r, **kwargs) + + # Persist cookies + if r.history: + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + + extract_cookies_to_jar(self.cookies, request, r.raw) + + # Resolve redirects if allowed. + if allow_redirects: + # Redirect resolving generator. + gen = self.resolve_redirects(r, request, **kwargs) + history = [resp for resp in gen] + else: + history = [] + + # Shuffle things around if there's history. + if history: + # Insert the first (original) request at the start + history.insert(0, r) + # Get the last request made + r = history.pop() + r.history = history + + # If redirects aren't being followed, store the response on the Request for Response.next(). + if not allow_redirects: + try: + r._next = next( + self.resolve_redirects(r, request, yield_requests=True, **kwargs) + ) + except StopIteration: + pass + + if not stream: + r.content + + return r + + def merge_environment_settings(self, url, proxies, stream, verify, cert): + """ + Check the environment and merge it with some settings. + + :rtype: dict + """ + # Gather clues from the surrounding environment. + if self.trust_env: + # Set environment's proxies. + no_proxy = proxies.get("no_proxy") if proxies is not None else None + env_proxies = get_environ_proxies(url, no_proxy=no_proxy) + for k, v in env_proxies.items(): + proxies.setdefault(k, v) + + # Look for requests environment configuration + # and be compatible with cURL. + if verify is True or verify is None: + verify = ( + os.environ.get("REQUESTS_CA_BUNDLE") + or os.environ.get("CURL_CA_BUNDLE") + or verify + ) + + # Merge all the kwargs. + proxies = merge_setting(proxies, self.proxies) + stream = merge_setting(stream, self.stream) + verify = merge_setting(verify, self.verify) + cert = merge_setting(cert, self.cert) + + return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} + + def get_adapter(self, url): + """ + Returns the appropriate connection adapter for the given URL. + + :rtype: requests.adapters.BaseAdapter + """ + for prefix, adapter in self.adapters.items(): + if url.lower().startswith(prefix.lower()): + return adapter + + # Nothing matches :-/ + raise InvalidSchema(f"No connection adapters were found for {url!r}") + + def close(self): + """Closes all adapters and as such the session""" + for v in self.adapters.values(): + v.close() + + def mount(self, prefix, adapter): + """Registers a connection adapter to a prefix. + + Adapters are sorted in descending order by prefix length. + """ + self.adapters[prefix] = adapter + keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + + for key in keys_to_move: + self.adapters[key] = self.adapters.pop(key) + + def __getstate__(self): + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} + return state + + def __setstate__(self, state): + for attr, value in state.items(): + setattr(self, attr, value) + + +def session(): + """ + Returns a :class:`Session` for context-management. + + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + + :rtype: Session + """ + return Session() diff --git a/parrot/lib/python3.10/site-packages/requests/status_codes.py b/parrot/lib/python3.10/site-packages/requests/status_codes.py new file mode 100644 index 0000000000000000000000000000000000000000..c7945a2f06897ed980cc575df2f48d9e6c1a9f7e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/status_codes.py @@ -0,0 +1,128 @@ +r""" +The ``codes`` object defines a mapping from common names for HTTP statuses +to their numerical codes, accessible either as attributes or as dictionary +items. + +Example:: + + >>> import requests + >>> requests.codes['temporary_redirect'] + 307 + >>> requests.codes.teapot + 418 + >>> requests.codes['\o/'] + 200 + +Some codes have multiple names, and both upper- and lower-case versions of +the names are allowed. For example, ``codes.ok``, ``codes.OK``, and +``codes.okay`` all correspond to the HTTP status code 200. +""" + +from .structures import LookupDict + +_codes = { + # Informational. + 100: ("continue",), + 101: ("switching_protocols",), + 102: ("processing", "early-hints"), + 103: ("checkpoint",), + 122: ("uri_too_long", "request_uri_too_long"), + 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), + 201: ("created",), + 202: ("accepted",), + 203: ("non_authoritative_info", "non_authoritative_information"), + 204: ("no_content",), + 205: ("reset_content", "reset"), + 206: ("partial_content", "partial"), + 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), + 208: ("already_reported",), + 226: ("im_used",), + # Redirection. + 300: ("multiple_choices",), + 301: ("moved_permanently", "moved", "\\o-"), + 302: ("found",), + 303: ("see_other", "other"), + 304: ("not_modified",), + 305: ("use_proxy",), + 306: ("switch_proxy",), + 307: ("temporary_redirect", "temporary_moved", "temporary"), + 308: ( + "permanent_redirect", + "resume_incomplete", + "resume", + ), # "resume" and "resume_incomplete" to be removed in 3.0 + # Client Error. + 400: ("bad_request", "bad"), + 401: ("unauthorized",), + 402: ("payment_required", "payment"), + 403: ("forbidden",), + 404: ("not_found", "-o-"), + 405: ("method_not_allowed", "not_allowed"), + 406: ("not_acceptable",), + 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), + 408: ("request_timeout", "timeout"), + 409: ("conflict",), + 410: ("gone",), + 411: ("length_required",), + 412: ("precondition_failed", "precondition"), + 413: ("request_entity_too_large", "content_too_large"), + 414: ("request_uri_too_large", "uri_too_long"), + 415: ("unsupported_media_type", "unsupported_media", "media_type"), + 416: ( + "requested_range_not_satisfiable", + "requested_range", + "range_not_satisfiable", + ), + 417: ("expectation_failed",), + 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), + 421: ("misdirected_request",), + 422: ("unprocessable_entity", "unprocessable", "unprocessable_content"), + 423: ("locked",), + 424: ("failed_dependency", "dependency"), + 425: ("unordered_collection", "unordered", "too_early"), + 426: ("upgrade_required", "upgrade"), + 428: ("precondition_required", "precondition"), + 429: ("too_many_requests", "too_many"), + 431: ("header_fields_too_large", "fields_too_large"), + 444: ("no_response", "none"), + 449: ("retry_with", "retry"), + 450: ("blocked_by_windows_parental_controls", "parental_controls"), + 451: ("unavailable_for_legal_reasons", "legal_reasons"), + 499: ("client_closed_request",), + # Server Error. + 500: ("internal_server_error", "server_error", "/o\\", "✗"), + 501: ("not_implemented",), + 502: ("bad_gateway",), + 503: ("service_unavailable", "unavailable"), + 504: ("gateway_timeout",), + 505: ("http_version_not_supported", "http_version"), + 506: ("variant_also_negotiates",), + 507: ("insufficient_storage",), + 509: ("bandwidth_limit_exceeded", "bandwidth"), + 510: ("not_extended",), + 511: ("network_authentication_required", "network_auth", "network_authentication"), +} + +codes = LookupDict(name="status_codes") + + +def _init(): + for code, titles in _codes.items(): + for title in titles: + setattr(codes, title, code) + if not title.startswith(("\\", "/")): + setattr(codes, title.upper(), code) + + def doc(code): + names = ", ".join(f"``{n}``" for n in _codes[code]) + return "* %d: %s" % (code, names) + + global __doc__ + __doc__ = ( + __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) + if __doc__ is not None + else None + ) + + +_init() diff --git a/parrot/lib/python3.10/site-packages/requests/structures.py b/parrot/lib/python3.10/site-packages/requests/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..188e13e4829591facb23ae0e2eda84b9807cb818 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/structures.py @@ -0,0 +1,99 @@ +""" +requests.structures +~~~~~~~~~~~~~~~~~~~ + +Data structures that power Requests. +""" + +from collections import OrderedDict + +from .compat import Mapping, MutableMapping + + +class CaseInsensitiveDict(MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive:: + + cid = CaseInsensitiveDict() + cid['Accept'] = 'application/json' + cid['aCCEPT'] == 'application/json' # True + list(cid) == ['Accept'] # True + + For example, ``headers['content-encoding']`` will return the + value of a ``'Content-Encoding'`` response header, regardless + of how the header name was originally stored. + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + """ + + def __init__(self, data=None, **kwargs): + self._store = OrderedDict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) + + def __eq__(self, other): + if isinstance(other, Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return str(dict(self.items())) + + +class LookupDict(dict): + """Dictionary lookup object.""" + + def __init__(self, name=None): + self.name = name + super().__init__() + + def __repr__(self): + return f"" + + def __getitem__(self, key): + # We allow fall-through here, so values default to None + + return self.__dict__.get(key, None) + + def get(self, key, default=None): + return self.__dict__.get(key, default) diff --git a/parrot/lib/python3.10/site-packages/requests/utils.py b/parrot/lib/python3.10/site-packages/requests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ae6c42f6cb48d2beaa3b7352bc1d130db3e4e3be --- /dev/null +++ b/parrot/lib/python3.10/site-packages/requests/utils.py @@ -0,0 +1,1096 @@ +""" +requests.utils +~~~~~~~~~~~~~~ + +This module provides utility functions that are used within Requests +that are also useful for external consumption. +""" + +import codecs +import contextlib +import io +import os +import re +import socket +import struct +import sys +import tempfile +import warnings +import zipfile +from collections import OrderedDict + +from urllib3.util import make_headers, parse_url + +from . import certs +from .__version__ import __version__ + +# to_native_string is unused here, but imported here for backwards compatibility +from ._internal_utils import ( # noqa: F401 + _HEADER_VALIDATORS_BYTE, + _HEADER_VALIDATORS_STR, + HEADER_VALIDATORS, + to_native_string, +) +from .compat import ( + Mapping, + basestring, + bytes, + getproxies, + getproxies_environment, + integer_types, +) +from .compat import parse_http_list as _parse_list_header +from .compat import ( + proxy_bypass, + proxy_bypass_environment, + quote, + str, + unquote, + urlparse, + urlunparse, +) +from .cookies import cookiejar_from_dict +from .exceptions import ( + FileModeWarning, + InvalidHeader, + InvalidURL, + UnrewindableBodyError, +) +from .structures import CaseInsensitiveDict + +NETRC_FILES = (".netrc", "_netrc") + +DEFAULT_CA_BUNDLE_PATH = certs.where() + +DEFAULT_PORTS = {"http": 80, "https": 443} + +# Ensure that ', ' is used to preserve previous delimiter behavior. +DEFAULT_ACCEPT_ENCODING = ", ".join( + re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) +) + + +if sys.platform == "win32": + # provide a proxy_bypass version on Windows without DNS lookups + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + return False + + try: + internetSettings = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", + ) + # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it + proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) + # ProxyOverride is almost always a string + proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] + except (OSError, ValueError): + return False + if not proxyEnable or not proxyOverride: + return False + + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(";") + # filter out empty strings to avoid re.match return true in the following code. + proxyOverride = filter(None, proxyOverride) + # now check if we match one of the registry values. + for test in proxyOverride: + if test == "": + if "." not in host: + return True + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + if re.match(test, host, re.I): + return True + return False + + def proxy_bypass(host): # noqa + """Return True, if the host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + + +def dict_to_sequence(d): + """Returns an internal sequence dictionary update.""" + + if hasattr(d, "items"): + d = d.items() + + return d + + +def super_len(o): + total_length = None + current_position = 0 + + if isinstance(o, str): + o = o.encode("utf-8") + + if hasattr(o, "__len__"): + total_length = len(o) + + elif hasattr(o, "len"): + total_length = o.len + + elif hasattr(o, "fileno"): + try: + fileno = o.fileno() + except (io.UnsupportedOperation, AttributeError): + # AttributeError is a surprising exception, seeing as how we've just checked + # that `hasattr(o, 'fileno')`. It happens for objects obtained via + # `Tarfile.extractfile()`, per issue 5229. + pass + else: + total_length = os.fstat(fileno).st_size + + # Having used fstat to determine the file length, we need to + # confirm that this file was opened up in binary mode. + if "b" not in o.mode: + warnings.warn( + ( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode." + ), + FileModeWarning, + ) + + if hasattr(o, "tell"): + try: + current_position = o.tell() + except OSError: + # This can happen in some weird situations, such as when the file + # is actually a special file descriptor like stdin. In this + # instance, we don't know what the length is, so set it to zero and + # let requests chunk it instead. + if total_length is not None: + current_position = total_length + else: + if hasattr(o, "seek") and total_length is None: + # StringIO and BytesIO have seek but no usable fileno + try: + # seek to end of file + o.seek(0, 2) + total_length = o.tell() + + # seek back to current position to support + # partially read file-like objects + o.seek(current_position or 0) + except OSError: + total_length = 0 + + if total_length is None: + total_length = 0 + + return max(0, total_length - current_position) + + +def get_netrc_auth(url, raise_errors=False): + """Returns the Requests tuple auth for a given url from netrc.""" + + netrc_file = os.environ.get("NETRC") + if netrc_file is not None: + netrc_locations = (netrc_file,) + else: + netrc_locations = (f"~/{f}" for f in NETRC_FILES) + + try: + from netrc import NetrcParseError, netrc + + netrc_path = None + + for f in netrc_locations: + try: + loc = os.path.expanduser(f) + except KeyError: + # os.path.expanduser can fail when $HOME is undefined and + # getpwuid fails. See https://bugs.python.org/issue20164 & + # https://github.com/psf/requests/issues/1846 + return + + if os.path.exists(loc): + netrc_path = loc + break + + # Abort early if there isn't one. + if netrc_path is None: + return + + ri = urlparse(url) + + # Strip port numbers from netloc. This weird `if...encode`` dance is + # used for Python 3.2, which doesn't support unicode literals. + splitstr = b":" + if isinstance(url, str): + splitstr = splitstr.decode("ascii") + host = ri.netloc.split(splitstr)[0] + + try: + _netrc = netrc(netrc_path).authenticators(host) + if _netrc: + # Return with login / password + login_i = 0 if _netrc[0] else 1 + return (_netrc[login_i], _netrc[2]) + except (NetrcParseError, OSError): + # If there was a parsing error or a permissions issue reading the file, + # we'll just skip netrc auth unless explicitly asked to raise errors. + if raise_errors: + raise + + # App Engine hackiness. + except (ImportError, AttributeError): + pass + + +def guess_filename(obj): + """Tries to guess the filename of the given object.""" + name = getattr(obj, "name", None) + if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": + return os.path.basename(name) + + +def extract_zipped_paths(path): + """Replace nonexistent paths that look like they refer to a member of a zip + archive with the location of an extracted copy of the target, or else + just return the provided path unchanged. + """ + if os.path.exists(path): + # this is already a valid path, no need to do anything further + return path + + # find the first valid part of the provided path and treat that as a zip archive + # assume the rest of the path is the name of a member in the archive + archive, member = os.path.split(path) + while archive and not os.path.exists(archive): + archive, prefix = os.path.split(archive) + if not prefix: + # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), + # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users + break + member = "/".join([prefix, member]) + + if not zipfile.is_zipfile(archive): + return path + + zip_file = zipfile.ZipFile(archive) + if member not in zip_file.namelist(): + return path + + # we have a valid zip archive and a valid member of that archive + tmp = tempfile.gettempdir() + extracted_path = os.path.join(tmp, member.split("/")[-1]) + if not os.path.exists(extracted_path): + # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition + with atomic_open(extracted_path) as file_handler: + file_handler.write(zip_file.read(member)) + return extracted_path + + +@contextlib.contextmanager +def atomic_open(filename): + """Write a file to the disk in an atomic fashion""" + tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) + try: + with os.fdopen(tmp_descriptor, "wb") as tmp_handler: + yield tmp_handler + os.replace(tmp_name, filename) + except BaseException: + os.remove(tmp_name) + raise + + +def from_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. Unless it can not be represented as such, return an + OrderedDict, e.g., + + :: + + >>> from_key_val_list([('key', 'val')]) + OrderedDict([('key', 'val')]) + >>> from_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + >>> from_key_val_list({'key': 'val'}) + OrderedDict([('key', 'val')]) + + :rtype: OrderedDict + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + return OrderedDict(value) + + +def to_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. If it can be, return a list of tuples, e.g., + + :: + + >>> to_key_val_list([('key', 'val')]) + [('key', 'val')] + >>> to_key_val_list({'key': 'val'}) + [('key', 'val')] + >>> to_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + + :rtype: list + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + if isinstance(value, Mapping): + value = value.items() + + return list(value) + + +# From mitsuhiko/werkzeug (used with permission). +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + It basically works like :func:`parse_set_header` just that items + may appear multiple times and case sensitivity is preserved. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + To create a header from the :class:`list` again, use the + :func:`dump_header` function. + + :param value: a string with a list header. + :return: :class:`list` + :rtype: list + """ + result = [] + for item in _parse_list_header(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +# From mitsuhiko/werkzeug (used with permission). +def parse_dict_header(value): + """Parse lists of key, value pairs as described by RFC 2068 Section 2 and + convert them into a python dict: + + >>> d = parse_dict_header('foo="is a fish", bar="as well"') + >>> type(d) is dict + True + >>> sorted(d.items()) + [('bar', 'as well'), ('foo', 'is a fish')] + + If there is no value for a key it will be `None`: + + >>> parse_dict_header('key_without_value') + {'key_without_value': None} + + To create a header from the :class:`dict` again, use the + :func:`dump_header` function. + + :param value: a string with a dict header. + :return: :class:`dict` + :rtype: dict + """ + result = {} + for item in _parse_list_header(value): + if "=" not in item: + result[item] = None + continue + name, value = item.split("=", 1) + if value[:1] == value[-1:] == '"': + value = unquote_header_value(value[1:-1]) + result[name] = value + return result + + +# From mitsuhiko/werkzeug (used with permission). +def unquote_header_value(value, is_filename=False): + r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + :rtype: str + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + + # if this is a filename and the starting characters look like + # a UNC path, then just return the value without quotes. Using the + # replace sequence below on a UNC path has the effect of turning + # the leading double slash into a single slash and then + # _fix_ie_filename() doesn't work correctly. See #458. + if not is_filename or value[:2] != "\\\\": + return value.replace("\\\\", "\\").replace('\\"', '"') + return value + + +def dict_from_cookiejar(cj): + """Returns a key/value dictionary from a CookieJar. + + :param cj: CookieJar object to extract cookies from. + :rtype: dict + """ + + cookie_dict = {cookie.name: cookie.value for cookie in cj} + return cookie_dict + + +def add_dict_to_cookiejar(cj, cookie_dict): + """Returns a CookieJar from a key/value dictionary. + + :param cj: CookieJar to insert cookies into. + :param cookie_dict: Dict of key/values to insert into CookieJar. + :rtype: CookieJar + """ + + return cookiejar_from_dict(cookie_dict, cj) + + +def get_encodings_from_content(content): + """Returns encodings from given content string. + + :param content: bytestring to extract encodings from. + """ + warnings.warn( + ( + "In requests 3.0, get_encodings_from_content will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + charset_re = re.compile(r']', flags=re.I) + pragma_re = re.compile(r']', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') + + return ( + charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content) + ) + + +def _parse_content_type_header(header): + """Returns content type and parameters from given header + + :param header: string + :return: tuple containing content type and dictionary of + parameters + """ + + tokens = header.split(";") + content_type, params = tokens[0].strip(), tokens[1:] + params_dict = {} + items_to_strip = "\"' " + + for param in params: + param = param.strip() + if param: + key, value = param, True + index_of_equals = param.find("=") + if index_of_equals != -1: + key = param[:index_of_equals].strip(items_to_strip) + value = param[index_of_equals + 1 :].strip(items_to_strip) + params_dict[key.lower()] = value + return content_type, params_dict + + +def get_encoding_from_headers(headers): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :rtype: str + """ + + content_type = headers.get("content-type") + + if not content_type: + return None + + content_type, params = _parse_content_type_header(content_type) + + if "charset" in params: + return params["charset"].strip("'\"") + + if "text" in content_type: + return "ISO-8859-1" + + if "application/json" in content_type: + # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset + return "utf-8" + + +def stream_decode_response_unicode(iterator, r): + """Stream decodes an iterator.""" + + if r.encoding is None: + yield from iterator + return + + decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") + for chunk in iterator: + rv = decoder.decode(chunk) + if rv: + yield rv + rv = decoder.decode(b"", final=True) + if rv: + yield rv + + +def iter_slices(string, slice_length): + """Iterate over slices of a string.""" + pos = 0 + if slice_length is None or slice_length <= 0: + slice_length = len(string) + while pos < len(string): + yield string[pos : pos + slice_length] + pos += slice_length + + +def get_unicode_from_response(r): + """Returns the requested content back in unicode. + + :param r: Response object to get unicode content from. + + Tried: + + 1. charset from content-type + 2. fall back and replace all unicode characters + + :rtype: str + """ + warnings.warn( + ( + "In requests 3.0, get_unicode_from_response will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + tried_encodings = [] + + # Try charset from content-type + encoding = get_encoding_from_headers(r.headers) + + if encoding: + try: + return str(r.content, encoding) + except UnicodeError: + tried_encodings.append(encoding) + + # Fall back: + try: + return str(r.content, encoding, errors="replace") + except TypeError: + return r.content + + +# The unreserved URI characters (RFC 3986) +UNRESERVED_SET = frozenset( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" +) + + +def unquote_unreserved(uri): + """Un-escape any percent-escape sequences in a URI that are unreserved + characters. This leaves all reserved, illegal and non-ASCII bytes encoded. + + :rtype: str + """ + parts = uri.split("%") + for i in range(1, len(parts)): + h = parts[i][0:2] + if len(h) == 2 and h.isalnum(): + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") + + if c in UNRESERVED_SET: + parts[i] = c + parts[i][2:] + else: + parts[i] = f"%{parts[i]}" + else: + parts[i] = f"%{parts[i]}" + return "".join(parts) + + +def requote_uri(uri): + """Re-quote the given URI. + + This function passes the given URI through an unquote/quote cycle to + ensure that it is fully and consistently quoted. + + :rtype: str + """ + safe_with_percent = "!#$%&'()*+,/:;=?@[]~" + safe_without_percent = "!#$&'()*+,/:;=?@[]~" + try: + # Unquote only the unreserved characters + # Then quote only illegal characters (do not quote reserved, + # unreserved, or '%') + return quote(unquote_unreserved(uri), safe=safe_with_percent) + except InvalidURL: + # We couldn't unquote the given URI, so let's try quoting it, but + # there may be unquoted '%'s in the URI. We need to make sure they're + # properly quoted so they do not cause issues elsewhere. + return quote(uri, safe=safe_without_percent) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + + :rtype: bool + """ + ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] + netaddr, bits = net.split("/") + netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + + Example: if mask is 24 function returns 255.255.255.0 + + :rtype: str + """ + bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack(">I", bits)) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except OSError: + return False + return True + + +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + + :rtype: bool + """ + if string_network.count("/") == 1: + try: + mask = int(string_network.split("/")[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split("/")[0]) + except OSError: + return False + else: + return False + return True + + +@contextlib.contextmanager +def set_environ(env_name, value): + """Set the environment variable 'env_name' to 'value' + + Save previous value, yield, and then restore the previous value stored in + the environment variable 'env_name'. + + If 'value' is None, do nothing""" + value_changed = value is not None + if value_changed: + old_value = os.environ.get(env_name) + os.environ[env_name] = value + try: + yield + finally: + if value_changed: + if old_value is None: + del os.environ[env_name] + else: + os.environ[env_name] = old_value + + +def should_bypass_proxies(url, no_proxy): + """ + Returns whether we should bypass proxies or not. + + :rtype: bool + """ + + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). + def get_proxy(key): + return os.environ.get(key) or os.environ.get(key.upper()) + + # First check whether no_proxy is defined. If it is, check that the URL + # we're getting isn't in the no_proxy list. + no_proxy_arg = no_proxy + if no_proxy is None: + no_proxy = get_proxy("no_proxy") + parsed = urlparse(url) + + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + + if no_proxy: + # We need to check whether we match here. We need to see if we match + # the end of the hostname, both with and without the port. + no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) + + if is_ipv4_address(parsed.hostname): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(parsed.hostname, proxy_ip): + return True + elif parsed.hostname == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + host_with_port = parsed.hostname + if parsed.port: + host_with_port += f":{parsed.port}" + + for host in no_proxy: + if parsed.hostname.endswith(host) or host_with_port.endswith(host): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + with set_environ("no_proxy", no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. + try: + bypass = proxy_bypass(parsed.hostname) + except (TypeError, socket.gaierror): + bypass = False + + if bypass: + return True + + return False + + +def get_environ_proxies(url, no_proxy=None): + """ + Return a dict of environment proxies. + + :rtype: dict + """ + if should_bypass_proxies(url, no_proxy=no_proxy): + return {} + else: + return getproxies() + + +def select_proxy(url, proxies): + """Select a proxy for the url, if applicable. + + :param url: The url being for the request + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + """ + proxies = proxies or {} + urlparts = urlparse(url) + if urlparts.hostname is None: + return proxies.get(urlparts.scheme, proxies.get("all")) + + proxy_keys = [ + urlparts.scheme + "://" + urlparts.hostname, + urlparts.scheme, + "all://" + urlparts.hostname, + "all", + ] + proxy = None + for proxy_key in proxy_keys: + if proxy_key in proxies: + proxy = proxies[proxy_key] + break + + return proxy + + +def resolve_proxies(request, proxies, trust_env=True): + """This method takes proxy information from a request and configuration + input to resolve a mapping of target proxies. This will consider settings + such as NO_PROXY to strip proxy configurations. + + :param request: Request or PreparedRequest + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + :param trust_env: Boolean declaring whether to trust environment configs + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + url = request.url + scheme = urlparse(url).scheme + no_proxy = proxies.get("no_proxy") + new_proxies = proxies.copy() + + if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + + if proxy: + new_proxies.setdefault(scheme, proxy) + return new_proxies + + +def default_user_agent(name="python-requests"): + """ + Return a string representing the default user agent. + + :rtype: str + """ + return f"{name}/{__version__}" + + +def default_headers(): + """ + :rtype: requests.structures.CaseInsensitiveDict + """ + return CaseInsensitiveDict( + { + "User-Agent": default_user_agent(), + "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, + "Accept": "*/*", + "Connection": "keep-alive", + } + ) + + +def parse_header_links(value): + """Return a list of parsed link headers proxies. + + i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" + + :rtype: list + """ + + links = [] + + replace_chars = " '\"" + + value = value.strip(replace_chars) + if not value: + return links + + for val in re.split(", *<", value): + try: + url, params = val.split(";", 1) + except ValueError: + url, params = val, "" + + link = {"url": url.strip("<> '\"")} + + for param in params.split(";"): + try: + key, value = param.split("=") + except ValueError: + break + + link[key.strip(replace_chars)] = value.strip(replace_chars) + + links.append(link) + + return links + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data): + """ + :rtype: str + """ + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return "utf-32" # BOM included + if sample[:3] == codecs.BOM_UTF8: + return "utf-8-sig" # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return "utf-16" # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return "utf-8" + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" + if sample[1::2] == _null2: # 2nd and 4th are null + return "utf-16-le" + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return "utf-32-be" + if sample[1:] == _null3: + return "utf-32-le" + # Did not detect a valid UTF-32 ascii-range character + return None + + +def prepend_scheme_if_needed(url, new_scheme): + """Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument. + + :rtype: str + """ + parsed = parse_url(url) + scheme, auth, host, port, path, query, fragment = parsed + + # A defect in urlparse determines that there isn't a netloc present in some + # urls. We previously assumed parsing was overly cautious, and swapped the + # netloc and path. Due to a lack of tests on the original defect, this is + # maintained with parse_url for backwards compatibility. + netloc = parsed.netloc + if not netloc: + netloc, path = path, netloc + + if auth: + # parse_url doesn't provide the netloc with auth + # so we'll add it ourselves. + netloc = "@".join([auth, netloc]) + if scheme is None: + scheme = new_scheme + if path is None: + path = "" + + return urlunparse((scheme, netloc, path, "", query, fragment)) + + +def get_auth_from_url(url): + """Given a url with authentication components, extract them into a tuple of + username,password. + + :rtype: (str,str) + """ + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ("", "") + + return auth + + +def check_header_validity(header): + """Verifies that header parts don't contain leading whitespace + reserved characters, or return characters. + + :param header: tuple, in the format (name, value). + """ + name, value = header + _validate_header_part(header, name, 0) + _validate_header_part(header, value, 1) + + +def _validate_header_part(header, header_part, header_validator_index): + if isinstance(header_part, str): + validator = _HEADER_VALIDATORS_STR[header_validator_index] + elif isinstance(header_part, bytes): + validator = _HEADER_VALIDATORS_BYTE[header_validator_index] + else: + raise InvalidHeader( + f"Header part ({header_part!r}) from {header} " + f"must be of type str or bytes, not {type(header_part)}" + ) + + if not validator.match(header_part): + header_kind = "name" if header_validator_index == 0 else "value" + raise InvalidHeader( + f"Invalid leading whitespace, reserved character(s), or return " + f"character(s) in header {header_kind}: {header_part!r}" + ) + + +def urldefragauth(url): + """ + Given a url remove the fragment and the authentication part. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url) + + # see func:`prepend_scheme_if_needed` + if not netloc: + netloc, path = path, netloc + + netloc = netloc.rsplit("@", 1)[-1] + + return urlunparse((scheme, netloc, path, params, query, "")) + + +def rewind_body(prepared_request): + """Move file pointer back to its recorded starting position + so it can be read again on redirect. + """ + body_seek = getattr(prepared_request.body, "seek", None) + if body_seek is not None and isinstance( + prepared_request._body_position, integer_types + ): + try: + body_seek(prepared_request._body_position) + except OSError: + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect." + ) + else: + raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/parrot/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc b/parrot/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe1d6fe7b6fa93f3be2b5f2195c3cdab12dbaf55 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tf_objects.cpython-310.pyc b/parrot/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tf_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a210a6a41565c90510e675428d0d92daa1266d3d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tf_objects.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12a7898e6592d52a991e372d7d2b9fc335fc68d1a974206d8e565eaad660ea5e +size 100861 diff --git a/parrot/lib/python3.10/site-packages/xxhash-3.5.0.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/xxhash-3.5.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/xxhash-3.5.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip