diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/visitor.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/visitor.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d2fab38d68e49e825e755e53cd1d8913562e300
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/visitor.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/_identifier.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/_identifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..928c1503c7d414a8a86bbf5a82c68d42cb089bd2
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/_identifier.py
@@ -0,0 +1,6 @@
+import re
+
+# generated by scripts/generate_identifier_pattern.py
+pattern = re.compile(
+ r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߽߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛࣓-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣ৾ਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣૺ-૿ଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఄా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഀ-ഃ഻഼ാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳷-᳹᷀-᷹᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꣿꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𐴤-𐽆𐴧-𐽐𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑄴𑅅𑅆𑅳𑆀-𑆂𑆳-𑇀𑇉-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌻𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑑞𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑠬-𑠺𑨁-𑨊𑨳-𑨹𑨻-𑨾𑩇𑩑-𑩛𑪊-𑪙𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𑴱-𑴶𑴺𑴼𑴽𑴿-𑵅𑵇𑶊-𑶎𑶐𑶑𑶓-𑶗𑻳-𑻶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
+)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/async_utils.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/async_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..715d70119bba3f02350827da184cedbd08f65185
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/async_utils.py
@@ -0,0 +1,84 @@
+import inspect
+import typing as t
+from functools import WRAPPER_ASSIGNMENTS
+from functools import wraps
+
+from .utils import _PassArg
+from .utils import pass_eval_context
+
+V = t.TypeVar("V")
+
+
+def async_variant(normal_func): # type: ignore
+ def decorator(async_func): # type: ignore
+ pass_arg = _PassArg.from_obj(normal_func)
+ need_eval_context = pass_arg is None
+
+ if pass_arg is _PassArg.environment:
+
+ def is_async(args: t.Any) -> bool:
+ return t.cast(bool, args[0].is_async)
+
+ else:
+
+ def is_async(args: t.Any) -> bool:
+ return t.cast(bool, args[0].environment.is_async)
+
+ # Take the doc and annotations from the sync function, but the
+ # name from the async function. Pallets-Sphinx-Themes
+ # build_function_directive expects __wrapped__ to point to the
+ # sync function.
+ async_func_attrs = ("__module__", "__name__", "__qualname__")
+ normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs))
+
+ @wraps(normal_func, assigned=normal_func_attrs)
+ @wraps(async_func, assigned=async_func_attrs, updated=())
+ def wrapper(*args, **kwargs): # type: ignore
+ b = is_async(args)
+
+ if need_eval_context:
+ args = args[1:]
+
+ if b:
+ return async_func(*args, **kwargs)
+
+ return normal_func(*args, **kwargs)
+
+ if need_eval_context:
+ wrapper = pass_eval_context(wrapper)
+
+ wrapper.jinja_async_variant = True
+ return wrapper
+
+ return decorator
+
+
+_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)}
+
+
+async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
+ # Avoid a costly call to isawaitable
+ if type(value) in _common_primitives:
+ return t.cast("V", value)
+
+ if inspect.isawaitable(value):
+ return await t.cast("t.Awaitable[V]", value)
+
+ return t.cast("V", value)
+
+
+async def auto_aiter(
+ iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+) -> "t.AsyncIterator[V]":
+ if hasattr(iterable, "__aiter__"):
+ async for item in t.cast("t.AsyncIterable[V]", iterable):
+ yield item
+ else:
+ for item in iterable:
+ yield item
+
+
+async def auto_to_list(
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+) -> t.List["V"]:
+ return [x async for x in auto_aiter(value)]
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/constants.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..41a1c23b0a7fe134b1f662545876eb65b31b071e
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/constants.py
@@ -0,0 +1,20 @@
+#: list of lorem ipsum words used by the lipsum() helper function
+LOREM_IPSUM_WORDS = """\
+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
+auctor augue bibendum blandit class commodo condimentum congue consectetuer
+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
+penatibus per pharetra phasellus placerat platea porta porttitor posuere
+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
+viverra volutpat vulputate"""
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/ext.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..fade1fa3bc89b0d85d6ae3915277b835c67d7030
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/ext.py
@@ -0,0 +1,869 @@
+"""Extension API for adding custom tags and behavior."""
+import pprint
+import re
+import typing as t
+
+from markupsafe import Markup
+
+from . import defaults
+from . import nodes
+from .environment import Environment
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .runtime import concat # type: ignore
+from .runtime import Context
+from .runtime import Undefined
+from .utils import import_string
+from .utils import pass_context
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .lexer import Token
+ from .lexer import TokenStream
+ from .parser import Parser
+
+ class _TranslationsBasic(te.Protocol):
+ def gettext(self, message: str) -> str:
+ ...
+
+ def ngettext(self, singular: str, plural: str, n: int) -> str:
+ pass
+
+ class _TranslationsContext(_TranslationsBasic):
+ def pgettext(self, context: str, message: str) -> str:
+ ...
+
+ def npgettext(self, context: str, singular: str, plural: str, n: int) -> str:
+ ...
+
+ _SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext]
+
+
+# I18N functions available in Jinja templates. If the I18N library
+# provides ugettext, it will be assigned to gettext.
+GETTEXT_FUNCTIONS: t.Tuple[str, ...] = (
+ "_",
+ "gettext",
+ "ngettext",
+ "pgettext",
+ "npgettext",
+)
+_ws_re = re.compile(r"\s*\n\s*")
+
+
+class Extension:
+ """Extensions can be used to add extra functionality to the Jinja template
+ system at the parser level. Custom extensions are bound to an environment
+ but may not store environment specific data on `self`. The reason for
+ this is that an extension can be bound to another environment (for
+ overlays) by creating a copy and reassigning the `environment` attribute.
+
+ As extensions are created by the environment they cannot accept any
+ arguments for configuration. One may want to work around that by using
+ a factory function, but that is not possible as extensions are identified
+ by their import name. The correct way to configure the extension is
+ storing the configuration values on the environment. Because this way the
+ environment ends up acting as central configuration storage the
+ attributes may clash which is why extensions have to ensure that the names
+ they choose for configuration are not too generic. ``prefix`` for example
+ is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
+ name as includes the name of the extension (fragment cache).
+ """
+
+ identifier: t.ClassVar[str]
+
+ def __init_subclass__(cls) -> None:
+ cls.identifier = f"{cls.__module__}.{cls.__name__}"
+
+ #: if this extension parses this is the list of tags it's listening to.
+ tags: t.Set[str] = set()
+
+ #: the priority of that extension. This is especially useful for
+ #: extensions that preprocess values. A lower value means higher
+ #: priority.
+ #:
+ #: .. versionadded:: 2.4
+ priority = 100
+
+ def __init__(self, environment: Environment) -> None:
+ self.environment = environment
+
+ def bind(self, environment: Environment) -> "Extension":
+ """Create a copy of this extension bound to another environment."""
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.environment = environment
+ return rv
+
+ def preprocess(
+ self, source: str, name: t.Optional[str], filename: t.Optional[str] = None
+ ) -> str:
+ """This method is called before the actual lexing and can be used to
+ preprocess the source. The `filename` is optional. The return value
+ must be the preprocessed source.
+ """
+ return source
+
+ def filter_stream(
+ self, stream: "TokenStream"
+ ) -> t.Union["TokenStream", t.Iterable["Token"]]:
+ """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
+ to filter tokens returned. This method has to return an iterable of
+ :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
+ :class:`~jinja2.lexer.TokenStream`.
+ """
+ return stream
+
+ def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
+ """If any of the :attr:`tags` matched this method is called with the
+ parser as first argument. The token the parser stream is pointing at
+ is the name token that matched. This method has to return one or a
+ list of multiple nodes.
+ """
+ raise NotImplementedError()
+
+ def attr(
+ self, name: str, lineno: t.Optional[int] = None
+ ) -> nodes.ExtensionAttribute:
+ """Return an attribute node for the current extension. This is useful
+ to pass constants on extensions to generated template code.
+
+ ::
+
+ self.attr('_my_attribute', lineno=lineno)
+ """
+ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
+
+ def call_method(
+ self,
+ name: str,
+ args: t.Optional[t.List[nodes.Expr]] = None,
+ kwargs: t.Optional[t.List[nodes.Keyword]] = None,
+ dyn_args: t.Optional[nodes.Expr] = None,
+ dyn_kwargs: t.Optional[nodes.Expr] = None,
+ lineno: t.Optional[int] = None,
+ ) -> nodes.Call:
+ """Call a method of the extension. This is a shortcut for
+ :meth:`attr` + :class:`jinja2.nodes.Call`.
+ """
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = []
+ return nodes.Call(
+ self.attr(name, lineno=lineno),
+ args,
+ kwargs,
+ dyn_args,
+ dyn_kwargs,
+ lineno=lineno,
+ )
+
+
+@pass_context
+def _gettext_alias(
+ __context: Context, *args: t.Any, **kwargs: t.Any
+) -> t.Union[t.Any, Undefined]:
+ return __context.call(__context.resolve("gettext"), *args, **kwargs)
+
+
+def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
+ @pass_context
+ def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
+ rv = __context.call(func, __string)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ # Always treat as a format string, even if there are no
+ # variables. This makes translation strings more consistent
+ # and predictable. This requires escaping
+ return rv % variables # type: ignore
+
+ return gettext
+
+
+def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
+ @pass_context
+ def ngettext(
+ __context: Context,
+ __singular: str,
+ __plural: str,
+ __num: int,
+ **variables: t.Any,
+ ) -> str:
+ variables.setdefault("num", __num)
+ rv = __context.call(func, __singular, __plural, __num)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return ngettext
+
+
+def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
+ @pass_context
+ def pgettext(
+ __context: Context, __string_ctx: str, __string: str, **variables: t.Any
+ ) -> str:
+ variables.setdefault("context", __string_ctx)
+ rv = __context.call(func, __string_ctx, __string)
+
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return pgettext
+
+
+def _make_new_npgettext(
+ func: t.Callable[[str, str, str, int], str]
+) -> t.Callable[..., str]:
+ @pass_context
+ def npgettext(
+ __context: Context,
+ __string_ctx: str,
+ __singular: str,
+ __plural: str,
+ __num: int,
+ **variables: t.Any,
+ ) -> str:
+ variables.setdefault("context", __string_ctx)
+ variables.setdefault("num", __num)
+ rv = __context.call(func, __string_ctx, __singular, __plural, __num)
+
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return npgettext
+
+
+class InternationalizationExtension(Extension):
+ """This extension adds gettext support to Jinja."""
+
+ tags = {"trans"}
+
+ # TODO: the i18n extension is currently reevaluating values in a few
+ # situations. Take this example:
+ # {% trans count=something() %}{{ count }} foo{% pluralize
+ # %}{{ count }} fooss{% endtrans %}
+ # something is called twice here. One time for the gettext value and
+ # the other time for the n-parameter of the ngettext function.
+
+ def __init__(self, environment: Environment) -> None:
+ super().__init__(environment)
+ environment.globals["_"] = _gettext_alias
+ environment.extend(
+ install_gettext_translations=self._install,
+ install_null_translations=self._install_null,
+ install_gettext_callables=self._install_callables,
+ uninstall_gettext_translations=self._uninstall,
+ extract_translations=self._extract,
+ newstyle_gettext=False,
+ )
+
+ def _install(
+ self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
+ ) -> None:
+ # ugettext and ungettext are preferred in case the I18N library
+ # is providing compatibility with older Python versions.
+ gettext = getattr(translations, "ugettext", None)
+ if gettext is None:
+ gettext = translations.gettext
+ ngettext = getattr(translations, "ungettext", None)
+ if ngettext is None:
+ ngettext = translations.ngettext
+
+ pgettext = getattr(translations, "pgettext", None)
+ npgettext = getattr(translations, "npgettext", None)
+ self._install_callables(
+ gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
+ )
+
+ def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
+ import gettext
+
+ translations = gettext.NullTranslations()
+
+ if hasattr(translations, "pgettext"):
+ # Python < 3.8
+ pgettext = translations.pgettext
+ else:
+
+ def pgettext(c: str, s: str) -> str:
+ return s
+
+ if hasattr(translations, "npgettext"):
+ npgettext = translations.npgettext
+ else:
+
+ def npgettext(c: str, s: str, p: str, n: int) -> str:
+ return s if n == 1 else p
+
+ self._install_callables(
+ gettext=translations.gettext,
+ ngettext=translations.ngettext,
+ newstyle=newstyle,
+ pgettext=pgettext,
+ npgettext=npgettext,
+ )
+
+ def _install_callables(
+ self,
+ gettext: t.Callable[[str], str],
+ ngettext: t.Callable[[str, str, int], str],
+ newstyle: t.Optional[bool] = None,
+ pgettext: t.Optional[t.Callable[[str, str], str]] = None,
+ npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
+ ) -> None:
+ if newstyle is not None:
+ self.environment.newstyle_gettext = newstyle # type: ignore
+ if self.environment.newstyle_gettext: # type: ignore
+ gettext = _make_new_gettext(gettext)
+ ngettext = _make_new_ngettext(ngettext)
+
+ if pgettext is not None:
+ pgettext = _make_new_pgettext(pgettext)
+
+ if npgettext is not None:
+ npgettext = _make_new_npgettext(npgettext)
+
+ self.environment.globals.update(
+ gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
+ )
+
+ def _uninstall(self, translations: "_SupportedTranslations") -> None:
+ for key in ("gettext", "ngettext", "pgettext", "npgettext"):
+ self.environment.globals.pop(key, None)
+
+ def _extract(
+ self,
+ source: t.Union[str, nodes.Template],
+ gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
+ ) -> t.Iterator[
+ t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
+ ]:
+ if isinstance(source, str):
+ source = self.environment.parse(source)
+ return extract_from_ast(source, gettext_functions)
+
+ def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
+ """Parse a translatable tag."""
+ lineno = next(parser.stream).lineno
+
+ context = None
+ context_token = parser.stream.next_if("string")
+
+ if context_token is not None:
+ context = context_token.value
+
+ # find all the variables referenced. Additionally a variable can be
+ # defined in the body of the trans block too, but this is checked at
+ # a later state.
+ plural_expr: t.Optional[nodes.Expr] = None
+ plural_expr_assignment: t.Optional[nodes.Assign] = None
+ num_called_num = False
+ variables: t.Dict[str, nodes.Expr] = {}
+ trimmed = None
+ while parser.stream.current.type != "block_end":
+ if variables:
+ parser.stream.expect("comma")
+
+ # skip colon for python compatibility
+ if parser.stream.skip_if("colon"):
+ break
+
+ token = parser.stream.expect("name")
+ if token.value in variables:
+ parser.fail(
+ f"translatable variable {token.value!r} defined twice.",
+ token.lineno,
+ exc=TemplateAssertionError,
+ )
+
+ # expressions
+ if parser.stream.current.type == "assign":
+ next(parser.stream)
+ variables[token.value] = var = parser.parse_expression()
+ elif trimmed is None and token.value in ("trimmed", "notrimmed"):
+ trimmed = token.value == "trimmed"
+ continue
+ else:
+ variables[token.value] = var = nodes.Name(token.value, "load")
+
+ if plural_expr is None:
+ if isinstance(var, nodes.Call):
+ plural_expr = nodes.Name("_trans", "load")
+ variables[token.value] = plural_expr
+ plural_expr_assignment = nodes.Assign(
+ nodes.Name("_trans", "store"), var
+ )
+ else:
+ plural_expr = var
+ num_called_num = token.value == "num"
+
+ parser.stream.expect("block_end")
+
+ plural = None
+ have_plural = False
+ referenced = set()
+
+ # now parse until endtrans or pluralize
+ singular_names, singular = self._parse_block(parser, True)
+ if singular_names:
+ referenced.update(singular_names)
+ if plural_expr is None:
+ plural_expr = nodes.Name(singular_names[0], "load")
+ num_called_num = singular_names[0] == "num"
+
+ # if we have a pluralize block, we parse that too
+ if parser.stream.current.test("name:pluralize"):
+ have_plural = True
+ next(parser.stream)
+ if parser.stream.current.type != "block_end":
+ token = parser.stream.expect("name")
+ if token.value not in variables:
+ parser.fail(
+ f"unknown variable {token.value!r} for pluralization",
+ token.lineno,
+ exc=TemplateAssertionError,
+ )
+ plural_expr = variables[token.value]
+ num_called_num = token.value == "num"
+ parser.stream.expect("block_end")
+ plural_names, plural = self._parse_block(parser, False)
+ next(parser.stream)
+ referenced.update(plural_names)
+ else:
+ next(parser.stream)
+
+ # register free names as simple name expressions
+ for name in referenced:
+ if name not in variables:
+ variables[name] = nodes.Name(name, "load")
+
+ if not have_plural:
+ plural_expr = None
+ elif plural_expr is None:
+ parser.fail("pluralize without variables", lineno)
+
+ if trimmed is None:
+ trimmed = self.environment.policies["ext.i18n.trimmed"]
+ if trimmed:
+ singular = self._trim_whitespace(singular)
+ if plural:
+ plural = self._trim_whitespace(plural)
+
+ node = self._make_node(
+ singular,
+ plural,
+ context,
+ variables,
+ plural_expr,
+ bool(referenced),
+ num_called_num and have_plural,
+ )
+ node.set_lineno(lineno)
+ if plural_expr_assignment is not None:
+ return [plural_expr_assignment, node]
+ else:
+ return node
+
+ def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
+ return _ws_re.sub(" ", string.strip())
+
+ def _parse_block(
+ self, parser: "Parser", allow_pluralize: bool
+ ) -> t.Tuple[t.List[str], str]:
+ """Parse until the next block tag with a given name."""
+ referenced = []
+ buf = []
+
+ while True:
+ if parser.stream.current.type == "data":
+ buf.append(parser.stream.current.value.replace("%", "%%"))
+ next(parser.stream)
+ elif parser.stream.current.type == "variable_begin":
+ next(parser.stream)
+ name = parser.stream.expect("name").value
+ referenced.append(name)
+ buf.append(f"%({name})s")
+ parser.stream.expect("variable_end")
+ elif parser.stream.current.type == "block_begin":
+ next(parser.stream)
+ block_name = (
+ parser.stream.current.value
+ if parser.stream.current.type == "name"
+ else None
+ )
+ if block_name == "endtrans":
+ break
+ elif block_name == "pluralize":
+ if allow_pluralize:
+ break
+ parser.fail(
+ "a translatable section can have only one pluralize section"
+ )
+ elif block_name == "trans":
+ parser.fail(
+ "trans blocks can't be nested; did you mean `endtrans`?"
+ )
+ parser.fail(
+ f"control structures in translatable sections are not allowed; "
+ f"saw `{block_name}`"
+ )
+ elif parser.stream.eos:
+ parser.fail("unclosed translation block")
+ else:
+ raise RuntimeError("internal parser error")
+
+ return referenced, concat(buf)
+
+ def _make_node(
+ self,
+ singular: str,
+ plural: t.Optional[str],
+ context: t.Optional[str],
+ variables: t.Dict[str, nodes.Expr],
+ plural_expr: t.Optional[nodes.Expr],
+ vars_referenced: bool,
+ num_called_num: bool,
+ ) -> nodes.Output:
+ """Generates a useful node from the data provided."""
+ newstyle = self.environment.newstyle_gettext # type: ignore
+ node: nodes.Expr
+
+ # no variables referenced? no need to escape for old style
+ # gettext invocations only if there are vars.
+ if not vars_referenced and not newstyle:
+ singular = singular.replace("%%", "%")
+ if plural:
+ plural = plural.replace("%%", "%")
+
+ func_name = "gettext"
+ func_args: t.List[nodes.Expr] = [nodes.Const(singular)]
+
+ if context is not None:
+ func_args.insert(0, nodes.Const(context))
+ func_name = f"p{func_name}"
+
+ if plural_expr is not None:
+ func_name = f"n{func_name}"
+ func_args.extend((nodes.Const(plural), plural_expr))
+
+ node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None)
+
+ # in case newstyle gettext is used, the method is powerful
+ # enough to handle the variable expansion and autoescape
+ # handling itself
+ if newstyle:
+ for key, value in variables.items():
+ # the function adds that later anyways in case num was
+ # called num, so just skip it.
+ if num_called_num and key == "num":
+ continue
+ node.kwargs.append(nodes.Keyword(key, value))
+
+ # otherwise do that here
+ else:
+ # mark the return value as safe if we are in an
+ # environment with autoescaping turned on
+ node = nodes.MarkSafeIfAutoescape(node)
+ if variables:
+ node = nodes.Mod(
+ node,
+ nodes.Dict(
+ [
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]
+ ),
+ )
+ return nodes.Output([node])
+
+
+class ExprStmtExtension(Extension):
+ """Adds a `do` tag to Jinja that works like the print statement just
+ that it doesn't print the return value.
+ """
+
+ tags = {"do"}
+
+ def parse(self, parser: "Parser") -> nodes.ExprStmt:
+ node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
+ node.node = parser.parse_tuple()
+ return node
+
+
+class LoopControlExtension(Extension):
+ """Adds break and continue to the template engine."""
+
+ tags = {"break", "continue"}
+
+ def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
+ token = next(parser.stream)
+ if token.value == "break":
+ return nodes.Break(lineno=token.lineno)
+ return nodes.Continue(lineno=token.lineno)
+
+
+class DebugExtension(Extension):
+ """A ``{% debug %}`` tag that dumps the available variables,
+ filters, and tests.
+
+ .. code-block:: html+jinja
+
+
{% debug %}
+
+ .. code-block:: text
+
+ {'context': {'cycler': ,
+ ...,
+ 'namespace': },
+ 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
+ ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
+ 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
+ ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
+
+ .. versionadded:: 2.11.0
+ """
+
+ tags = {"debug"}
+
+ def parse(self, parser: "Parser") -> nodes.Output:
+ lineno = parser.stream.expect("name:debug").lineno
+ context = nodes.ContextReference()
+ result = self.call_method("_render", [context], lineno=lineno)
+ return nodes.Output([result], lineno=lineno)
+
+ def _render(self, context: Context) -> str:
+ result = {
+ "context": context.get_all(),
+ "filters": sorted(self.environment.filters.keys()),
+ "tests": sorted(self.environment.tests.keys()),
+ }
+
+ # Set the depth since the intent is to show the top few names.
+ return pprint.pformat(result, depth=3, compact=True)
+
+
+def extract_from_ast(
+ ast: nodes.Template,
+ gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
+ babel_style: bool = True,
+) -> t.Iterator[
+ t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
+]:
+ """Extract localizable strings from the given template node. Per
+ default this function returns matches in babel style that means non string
+ parameters as well as keyword arguments are returned as `None`. This
+ allows Babel to figure out what you really meant if you are using
+ gettext functions that allow keyword arguments for placeholder expansion.
+ If you don't want that behavior set the `babel_style` parameter to `False`
+ which causes only strings to be returned and parameters are always stored
+ in tuples. As a consequence invalid gettext calls (calls without a single
+ string parameter or string parameters after non-string parameters) are
+ skipped.
+
+ This example explains the behavior:
+
+ >>> from jinja2 import Environment
+ >>> env = Environment()
+ >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
+ >>> list(extract_from_ast(node))
+ [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
+ >>> list(extract_from_ast(node, babel_style=False))
+ [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
+
+ For every string found this function yields a ``(lineno, function,
+ message)`` tuple, where:
+
+ * ``lineno`` is the number of the line on which the string was found,
+ * ``function`` is the name of the ``gettext`` function used (if the
+ string was extracted from embedded Python code), and
+ * ``message`` is the string, or a tuple of strings for functions
+ with multiple string arguments.
+
+ This extraction function operates on the AST and is because of that unable
+ to extract any comments. For comment support you have to use the babel
+ extraction interface or extract comments yourself.
+ """
+ out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
+
+ for node in ast.find_all(nodes.Call):
+ if (
+ not isinstance(node.node, nodes.Name)
+ or node.node.name not in gettext_functions
+ ):
+ continue
+
+ strings: t.List[t.Optional[str]] = []
+
+ for arg in node.args:
+ if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
+ strings.append(arg.value)
+ else:
+ strings.append(None)
+
+ for _ in node.kwargs:
+ strings.append(None)
+ if node.dyn_args is not None:
+ strings.append(None)
+ if node.dyn_kwargs is not None:
+ strings.append(None)
+
+ if not babel_style:
+ out = tuple(x for x in strings if x is not None)
+
+ if not out:
+ continue
+ else:
+ if len(strings) == 1:
+ out = strings[0]
+ else:
+ out = tuple(strings)
+
+ yield node.lineno, node.node.name, out
+
+
+class _CommentFinder:
+ """Helper class to find comments in a token stream. Can only
+ find comments for gettext calls forwards. Once the comment
+ from line 4 is found, a comment for line 1 will not return a
+ usable value.
+ """
+
+ def __init__(
+ self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str]
+ ) -> None:
+ self.tokens = tokens
+ self.comment_tags = comment_tags
+ self.offset = 0
+ self.last_lineno = 0
+
+ def find_backwards(self, offset: int) -> t.List[str]:
+ try:
+ for _, token_type, token_value in reversed(
+ self.tokens[self.offset : offset]
+ ):
+ if token_type in ("comment", "linecomment"):
+ try:
+ prefix, comment = token_value.split(None, 1)
+ except ValueError:
+ continue
+ if prefix in self.comment_tags:
+ return [comment.rstrip()]
+ return []
+ finally:
+ self.offset = offset
+
+ def find_comments(self, lineno: int) -> t.List[str]:
+ if not self.comment_tags or self.last_lineno > lineno:
+ return []
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
+ if token_lineno > lineno:
+ return self.find_backwards(self.offset + idx)
+ return self.find_backwards(len(self.tokens))
+
+
+def babel_extract(
+ fileobj: t.BinaryIO,
+ keywords: t.Sequence[str],
+ comment_tags: t.Sequence[str],
+ options: t.Dict[str, t.Any],
+) -> t.Iterator[
+ t.Tuple[
+ int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str]
+ ]
+]:
+ """Babel extraction method for Jinja templates.
+
+ .. versionchanged:: 2.3
+ Basic support for translation comments was added. If `comment_tags`
+ is now set to a list of keywords for extraction, the extractor will
+ try to find the best preceding comment that begins with one of the
+ keywords. For best results, make sure to not have more than one
+ gettext call in one line of code and the matching comment in the
+ same line or the line before.
+
+ .. versionchanged:: 2.5.1
+ The `newstyle_gettext` flag can be set to `True` to enable newstyle
+ gettext calls.
+
+ .. versionchanged:: 2.7
+ A `silent` option can now be provided. If set to `False` template
+ syntax errors are propagated instead of being ignored.
+
+ :param fileobj: the file-like object the messages should be extracted from
+ :param keywords: a list of keywords (i.e. function names) that should be
+ recognized as translation functions
+ :param comment_tags: a list of translator tags to search for and include
+ in the results.
+ :param options: a dictionary of additional options (optional)
+ :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+ (comments will be empty currently)
+ """
+ extensions: t.Dict[t.Type[Extension], None] = {}
+
+ for extension_name in options.get("extensions", "").split(","):
+ extension_name = extension_name.strip()
+
+ if not extension_name:
+ continue
+
+ extensions[import_string(extension_name)] = None
+
+ if InternationalizationExtension not in extensions:
+ extensions[InternationalizationExtension] = None
+
+ def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool:
+ return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"}
+
+ silent = getbool(options, "silent", True)
+ environment = Environment(
+ options.get("block_start_string", defaults.BLOCK_START_STRING),
+ options.get("block_end_string", defaults.BLOCK_END_STRING),
+ options.get("variable_start_string", defaults.VARIABLE_START_STRING),
+ options.get("variable_end_string", defaults.VARIABLE_END_STRING),
+ options.get("comment_start_string", defaults.COMMENT_START_STRING),
+ options.get("comment_end_string", defaults.COMMENT_END_STRING),
+ options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX,
+ options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX,
+ getbool(options, "trim_blocks", defaults.TRIM_BLOCKS),
+ getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS),
+ defaults.NEWLINE_SEQUENCE,
+ getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE),
+ tuple(extensions),
+ cache_size=0,
+ auto_reload=False,
+ )
+
+ if getbool(options, "trimmed"):
+ environment.policies["ext.i18n.trimmed"] = True
+ if getbool(options, "newstyle_gettext"):
+ environment.newstyle_gettext = True # type: ignore
+
+ source = fileobj.read().decode(options.get("encoding", "utf-8"))
+ try:
+ node = environment.parse(source)
+ tokens = list(environment.lex(environment.preprocess(source)))
+ except TemplateSyntaxError:
+ if not silent:
+ raise
+ # skip templates with syntax errors
+ return
+
+ finder = _CommentFinder(tokens, comment_tags)
+ for lineno, func, message in extract_from_ast(node, keywords):
+ yield lineno, func, message, finder.find_comments(lineno)
+
+
+#: nicer import names
+i18n = InternationalizationExtension
+do = ExprStmtExtension
+loopcontrols = LoopControlExtension
+debug = DebugExtension
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/METADATA b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..274c82b512f01f59e92def273aaa29d6a006d596
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/METADATA
@@ -0,0 +1,35 @@
+Metadata-Version: 2.1
+Name: nvidia-cuda-nvrtc-cu11
+Version: 11.8.89
+Summary: NVRTC native runtime libraries
+Home-page: https://developer.nvidia.com/cuda-zone
+Author: Nvidia CUDA Installer Team
+Author-email: cuda_installer@nvidia.com
+License: NVIDIA Proprietary Software
+Keywords: cuda,nvidia,runtime,machine learning,deep learning
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: Other/Proprietary License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3
+License-File: License.txt
+
+NVRTC native runtime libraries
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/RECORD b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..2f6673968505afd6eee9172ab61c1e0733e265b2
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/RECORD
@@ -0,0 +1,17 @@
+nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_nvrtc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_nvrtc/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_nvrtc/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_nvrtc/include/nvrtc.h,sha256=2zGRW1F9jfYLybqdwM5RxJ7EZP5aZzcgBGcsqlbbvKs,30224
+nvidia/cuda_nvrtc/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.11.8,sha256=LcS_aHA06J4RfvgvswApHkgzMZotR3IgBdQO6vuv2q8,7718792
+nvidia/cuda_nvrtc/lib/libnvrtc.so.11.2,sha256=7kQLGF-PYX64mMgB2ZXw3hwj-IxkSUMd9Ldl8K2UUYY,54417560
+nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
+nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/METADATA,sha256=92p07kziEfXp2nmuS7oNyZWr7U8iI5jesbRmd4nAfyc,1506
+nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/RECORD,,
+nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
+nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/WHEEL b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-manylinux1_x86_64
+
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/METADATA b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..bc3b4650d5e2d9144a0520e4485ac43810fd2f7a
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/METADATA
@@ -0,0 +1,35 @@
+Metadata-Version: 2.1
+Name: nvidia-cuda-runtime-cu11
+Version: 11.8.89
+Summary: CUDA Runtime native Libraries
+Home-page: https://developer.nvidia.com/cuda-zone
+Author: Nvidia CUDA Installer Team
+Author-email: cuda_installer@nvidia.com
+License: NVIDIA Proprietary Software
+Keywords: cuda,nvidia,runtime,machine learning,deep learning
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: Other/Proprietary License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3
+License-File: License.txt
+
+CUDA Runtime native Libraries
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/RECORD b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..87e25fc8d1dc280fe2e0596e076da51ec39b9dc2
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/RECORD
@@ -0,0 +1,113 @@
+nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_runtime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_runtime/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_runtime/include/CL/cl.h,sha256=2OqunnI7w8LfRaaOIvwUJOxJ2jEpN_r4DZeQMSvH0I8,77430
+nvidia/cuda_runtime/include/CL/cl.hpp,sha256=q9H85bvcgom7daYH3L1VuQvsvKTnDfnUgyrXyuoBrkM,281251
+nvidia/cuda_runtime/include/CL/cl_egl.h,sha256=HhpFYMrMx3Eaz9NthvSQRexd1BZO7_cmiu-2_-60qMM,4516
+nvidia/cuda_runtime/include/CL/cl_ext.h,sha256=uCwdMHbAcXR3RbLL7THhLpf9Rbp7ZaUpUcKXI1nAsCo,46988
+nvidia/cuda_runtime/include/CL/cl_gl.h,sha256=Z0tbd-ONmjrt-2_RM-qzAWOsMoxmXaiGaaqcN6yfTqI,6436
+nvidia/cuda_runtime/include/CL/cl_gl_ext.h,sha256=aCqqMA6lFPwIrRRcjlVmsLVmJXzALpvBv6i_0D1Zt2s,1326
+nvidia/cuda_runtime/include/CL/cl_platform.h,sha256=xXjdIuQZf2UlLr9p4l6oHul-AYNxoTpz1dDOqNf6uws,44584
+nvidia/cuda_runtime/include/CL/opencl.h,sha256=GwpOnqK_M5StKmfug1_dEgQs1r3XWL8uxLvr6YVORYg,1132
+nvidia/cuda_runtime/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_runtime/include/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_runtime/include/builtin_types.h,sha256=JxT9Vf2q2snxTBOL9ACzNmYzTWACO2VOVUu1KdFt7_g,3150
+nvidia/cuda_runtime/include/channel_descriptor.h,sha256=eAVB1Q5coPEy0F-yDdhoDH9l7B9rk5Dcp0eyKVMNEwk,22595
+nvidia/cuda_runtime/include/common_functions.h,sha256=22LTZRVcPZzEH6MJda7nNMCvMgIjSTe0OKR7sEQj6kc,3410
+nvidia/cuda_runtime/include/cooperative_groups.h,sha256=ffMJKQI2MYQYMKidiAEOwYVTA9RaYcOsR441Vj6Gw_w,66376
+nvidia/cuda_runtime/include/cooperative_groups/details/async.h,sha256=xsEHCZP3nuEY3l2p8SU2d1226XiXumUvDP_Gyh8PdVY,19122
+nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h,sha256=vWv1tyxMjSMM2Oc0SdxXhCug_PwaBM6u8iMLjKyeqjE,4561
+nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h,sha256=DfZv5d5W0XJv-tZVhgrIdjLjs6aCx_u0oy1lDIpjo1Q,7314
+nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h,sha256=v-ZUb4UgGKJk6NR2WCWHD3x_42y-togI1urFn70Gi-g,3964
+nvidia/cuda_runtime/include/cooperative_groups/details/functional.h,sha256=k5kSXKm8oV5o0zPAb5UUBUNx6K9biylBML2U1DofOL8,8503
+nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h,sha256=6dJzOEpvm_xJB9UM7jaWEZkZ2WU_40zU1qmH0UInz-g,27207
+nvidia/cuda_runtime/include/cooperative_groups/details/info.h,sha256=UzPdCvuGmFbTzUdPHFmr2LZ0dQf8AjccAqscmC4RoIc,11936
+nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h,sha256=8hCh6F8sfkyfRgMirC37Nqv-b-gIY3A_J0eMYqmD2zU,6001
+nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h,sha256=TdlPyPNXTRyS1UIZSp9E98zImo-CDnmXL7F5uuzXxzk,22399
+nvidia/cuda_runtime/include/cooperative_groups/details/scan.h,sha256=hxWd_yNk2AE5UXBBJ9GJH7wg0sgT_V4sQLToi1C4Q00,17421
+nvidia/cuda_runtime/include/cooperative_groups/details/sync.h,sha256=djyXsKuJTax1vTnG5oKPYkRu8nM6o2OJO9F_WIYJNzk,10218
+nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h,sha256=erOIHuObdfxRhBWfrXE3wsZF4B2GUuqwzQrsPwKPpbg,2960
+nvidia/cuda_runtime/include/cooperative_groups/reduce.h,sha256=B0hgDkqM-6ueqTTgb3b34A0RH4vGz8mBf5e2jT1dJ1o,2949
+nvidia/cuda_runtime/include/cooperative_groups/scan.h,sha256=2EU6T5cWNwftm2B7FicV31PojoI61yo5fHXGRYkGk40,2940
+nvidia/cuda_runtime/include/cuComplex.h,sha256=WpcgpaiPhU_o9sTPMcNTEZuyXDIc8x3sz4dUWSztL2g,12186
+nvidia/cuda_runtime/include/cuda.h,sha256=swpKhNoL1tA0kz_EMJVaRkMrIZ0aYfmph1sx6LBJTBI,840786
+nvidia/cuda_runtime/include/cudaEGL.h,sha256=_CwaQ4cEP1vfNyBSSd5qFxznPCYOovF6Cpj-QWSIBq4,39544
+nvidia/cuda_runtime/include/cudaEGLTypedefs.h,sha256=xF_FAN1Kar9oyHJ3cCU7jztTpxX8WylpiuYyYpGGHek,5645
+nvidia/cuda_runtime/include/cudaGL.h,sha256=UDArZVtTx1rphzLuTNkpidx27prdTUv_l6dXCFfkBYc,22401
+nvidia/cuda_runtime/include/cudaGLTypedefs.h,sha256=dClpQI-LuXgF9rPSBsj7OkIg8g_fXDjT0hLZS8TGpOg,6576
+nvidia/cuda_runtime/include/cudaProfilerTypedefs.h,sha256=F2aWLIKv_AhNbxNOaZVcRsxIh0kuscnV8UMWWxkBAlY,3297
+nvidia/cuda_runtime/include/cudaTypedefs.h,sha256=RuI5ctirdk7qJ0MKkYD3pVCAPR4Ax7pBqKw8T4C1jt0,93945
+nvidia/cuda_runtime/include/cudaVDPAU.h,sha256=Np7Nc2Wjaz--hkpbhW6f9aapr-NbcPDAgkot0sJerco,12694
+nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h,sha256=wz8nyOUdwM9mH9JO3QZW-A9dyxt-IufSX7nggSXpCNs,4144
+nvidia/cuda_runtime/include/cuda_awbarrier.h,sha256=LO5WG2CO22kiwgaae5timQ_5u3sJn1H9tyLnWPP6vUw,7600
+nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h,sha256=vFyH7n9Mnt78Z8vG7DQCxa1MAelyBVmhCPdqCfkNOiI,12227
+nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h,sha256=XIkzEZynW8_aDlv-VGOy4ufoTMYntu8CmGSQKq6wv0g,3993
+nvidia/cuda_runtime/include/cuda_bf16.h,sha256=CuV1113z4--_QPdDH1eVgGgBbl_TuSv3mvUAQPqgjxU,139379
+nvidia/cuda_runtime/include/cuda_bf16.hpp,sha256=YYVqbWfqs4DiodvN36GnRoqkNEAyW8MIAVF6NeUcCW0,101353
+nvidia/cuda_runtime/include/cuda_device_runtime_api.h,sha256=YGdzDamS_Fa0zAIawbnNMNwyz6XX9SKJ_rdAnmoEOzc,16574
+nvidia/cuda_runtime/include/cuda_egl_interop.h,sha256=PNWYns30MIytJQHSOh7UbZYlaTX5e0bavzK14tde_C8,37109
+nvidia/cuda_runtime/include/cuda_fp16.h,sha256=33O6OVdPNxXlObODyIibl0c5cue1AHRg0gZ8i1boilo,132563
+nvidia/cuda_runtime/include/cuda_fp16.hpp,sha256=-UjvMb2L2gFHperdhxmOoutEL5GA0Uf5wmRvfYMqyKA,93580
+nvidia/cuda_runtime/include/cuda_fp8.h,sha256=W_1NkOLgNvcxnxRfpmrwgLZIE2PmYREhWv2wohJ0JCk,13358
+nvidia/cuda_runtime/include/cuda_fp8.hpp,sha256=Ph12HdUf-Mt7t1f6RZOReWPGeqN6OeQMKJuBuE5Avsk,56491
+nvidia/cuda_runtime/include/cuda_gl_interop.h,sha256=pBQlAyGKIDmhV33GlBd58Wr4Ej96gTJYJtegddE5S00,18961
+nvidia/cuda_runtime/include/cuda_occupancy.h,sha256=Kr9HyOe-hlRjBAzbINwUYkNgbbIgIjuvKs09UZhMYQo,67179
+nvidia/cuda_runtime/include/cuda_pipeline.h,sha256=0enXG49wN4JajlQi3ahbp2ei_ufTY_Mznic7zfWmKHM,8130
+nvidia/cuda_runtime/include/cuda_pipeline_helpers.h,sha256=FplUvNDL0sk0S0C-cTNmEo-NnueU3jvV1TUdKYjN24c,13828
+nvidia/cuda_runtime/include/cuda_pipeline_primitives.h,sha256=FnJJtuV6rHr6LgL56XDwilcSbFr6W1Hj6mf1AJaMI20,8675
+nvidia/cuda_runtime/include/cuda_runtime.h,sha256=_HEZG1eaHioSl9sxNnOsbRJraBVFdq_5SGOfK1HXtOo,113013
+nvidia/cuda_runtime/include/cuda_runtime_api.h,sha256=hSshhlXKw6Vrp_wy84ydh5EOhfw24DTpgZ7iffT-e-s,569590
+nvidia/cuda_runtime/include/cuda_surface_types.h,sha256=5NdWqB9RlbdWwbIy7lQvrMROt09RrZdmVGP52mcjVkY,4276
+nvidia/cuda_runtime/include/cuda_texture_types.h,sha256=e95H37q_Y02876PJS2HFccs49_5XoAabkWxi3KqI8-0,4781
+nvidia/cuda_runtime/include/cuda_vdpau_interop.h,sha256=O2HI8-3zT9W1lIF4IajpOFbF1ABXtizQyUeVFovPQ-g,7631
+nvidia/cuda_runtime/include/cudart_platform.h,sha256=YN6sKhB0b9w5tGX1IYL7ulJVPrWAiX9A44qLv4EtW5Q,2717
+nvidia/cuda_runtime/include/device_atomic_functions.h,sha256=krPIx_BVDjRoAlLKlAgpjxrpXPcFmIjGx32X0MMEj3s,11359
+nvidia/cuda_runtime/include/device_atomic_functions.hpp,sha256=_UsoVsyP7U-9CUUCbC1QLw6IbFFkKzxk458vLbAXzOY,8149
+nvidia/cuda_runtime/include/device_double_functions.h,sha256=KUxId5Z1fx8SWfLRTxPD7RB-zN7zslzb4n7JaJLfL3I,3452
+nvidia/cuda_runtime/include/device_functions.h,sha256=bWSrhTYE9NQlss7xMSMEVusvto9j2fgUDXWVH2W_cOA,3410
+nvidia/cuda_runtime/include/device_launch_parameters.h,sha256=H1_CC-vvAaS26ys4XsTFkMgTxUTciAjdjswjizkisvQ,3846
+nvidia/cuda_runtime/include/device_types.h,sha256=2LFxoZBJPoA5V0H1EbKTEaXDi3GDJPtzOPdRHDaucIQ,3588
+nvidia/cuda_runtime/include/driver_functions.h,sha256=cN3IjRAz2Mj2Pj35SyxJIkZNDDusnJqaqzBdMzpQKbA,4625
+nvidia/cuda_runtime/include/driver_types.h,sha256=akxjeSPFYbryk_0iBfipSRafuF91-OdGva-t8EvRCyw,141075
+nvidia/cuda_runtime/include/host_config.h,sha256=BscH_GazAZbbotddVzL5RmafbQ-QjRx8f-I1O01IBW8,3380
+nvidia/cuda_runtime/include/host_defines.h,sha256=bBQwQF5C1N1c2qpLV56g1c-weu9Ysgz-gIf2Kn3uz_A,3386
+nvidia/cuda_runtime/include/library_types.h,sha256=yJvoLFw5oBdRqkQgEhIaX-stsMGlxQW9sZoJ4vbQHwI,4766
+nvidia/cuda_runtime/include/math_constants.h,sha256=cV6hAyQe8X7f7MBtaKjjIJq3BycOUDp6I5cizJX5HLw,7608
+nvidia/cuda_runtime/include/math_functions.h,sha256=5XcC6j-fJKttvhwc4hZNoLHNw808a2ZYIOtZ7ry7yd0,3398
+nvidia/cuda_runtime/include/mma.h,sha256=IY_VenxuEncwGq92MhrWUb-Xswh0ekAXLy9Rbxhxa2Y,2932
+nvidia/cuda_runtime/include/sm_20_atomic_functions.h,sha256=5MEzDxmh1n6-GW0jUpJTg9TwYGmut90x3XaKSj_btOo,4342
+nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp,sha256=Cx__BPJKUPeG5qMxZs9ztfIyqWqt0wZDZi4V_5EV4LQ,3929
+nvidia/cuda_runtime/include/sm_20_intrinsics.h,sha256=tVfZqjKaVuVVip0Wz26ucrjhSTote3CnhUQBvfQdVEQ,50660
+nvidia/cuda_runtime/include/sm_20_intrinsics.hpp,sha256=BhEBuXSKBsNGJDBJDtYL0cGRI3wX_w_OIgA5D-YxIWk,7694
+nvidia/cuda_runtime/include/sm_30_intrinsics.h,sha256=QPK_qWRrRJhID4T81cK0l5V86rwkDOVVxFfwyXcyhno,15845
+nvidia/cuda_runtime/include/sm_30_intrinsics.hpp,sha256=s50XwwWIHBhOEnln-KbCW4ObuiYFkzwzdCjSUaYchfY,24480
+nvidia/cuda_runtime/include/sm_32_atomic_functions.h,sha256=5R7T1MjFDQG6IRApz06UDhhQWW3GtaK5jPiJs3gB4Eg,6540
+nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp,sha256=YDveVhaTYKo2WcAhHS8Cbvj5cIi-lcM5YlneVeEjPE4,5377
+nvidia/cuda_runtime/include/sm_32_intrinsics.h,sha256=-nLnJz2dczMLqBxxs610KPQgVOlceuehqlDc7TqhbGI,33197
+nvidia/cuda_runtime/include/sm_32_intrinsics.hpp,sha256=ThPZXghlPZexJUHha8XKnVIKVHeNKVf4GRnrT4kXKm8,70577
+nvidia/cuda_runtime/include/sm_35_atomic_functions.h,sha256=a3XoEsKRCEOf0Q_5Y__rMfmC4pScv4VkUggVgVJVn44,2909
+nvidia/cuda_runtime/include/sm_35_intrinsics.h,sha256=BEiPNO03ZSv5XtMMul5jiTH4oLWlOu3CYkIAgrWslnk,2952
+nvidia/cuda_runtime/include/sm_60_atomic_functions.h,sha256=itW16w99dHIX1h8326fJvf1M0O2YDtFeVKehhtaWWHk,20606
+nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp,sha256=KUhC8VemPcWLCh67xTtv1vOFUIY1cj7wFPnlApfgqd8,15057
+nvidia/cuda_runtime/include/sm_61_intrinsics.h,sha256=tKqN3jCsLBo3YiiEJ7BtAAgnLPJfJqyVjQF9VUVD5tg,5991
+nvidia/cuda_runtime/include/sm_61_intrinsics.hpp,sha256=77N6vZcAtjtq8Ewipa8ZZ7Ydth1KJ-K4ka_8VPgXuts,6748
+nvidia/cuda_runtime/include/surface_functions.h,sha256=O_WV4xsP5G0_9rPs3G7UFKNFQnz9vIJNhNpTMoVNnKI,19773
+nvidia/cuda_runtime/include/surface_indirect_functions.h,sha256=2CEpbKJ1IQACGa4_wIFhskqOceKuoNYYFU9Sv9bWvPk,11930
+nvidia/cuda_runtime/include/surface_types.h,sha256=q_PY3EiCxB-KchtfAB2JQ8DeVFO2CEBJcZQdixqA2D0,4653
+nvidia/cuda_runtime/include/texture_fetch_functions.h,sha256=MEVQSiGWB2diMjZVNBS4G-QALS146Wj_e8Tjel7UqNQ,32714
+nvidia/cuda_runtime/include/texture_indirect_functions.h,sha256=rTw_uCKT2p3dPJp9Mff4SjALuXsuC5x2w3g7YgyIlWM,23039
+nvidia/cuda_runtime/include/texture_types.h,sha256=nfNPS9qNp-PPDVwlS1uM42hvj3R7lt5NuTworBstymw,9058
+nvidia/cuda_runtime/include/vector_functions.h,sha256=R5plWOkFciltO_AS5if8NcmsgDp3cFNq6zFFDd3oofk,7847
+nvidia/cuda_runtime/include/vector_functions.hpp,sha256=afXhNSd3LFTZo96EPtesTLfvxd4nTmLVzgkj967rTRg,10060
+nvidia/cuda_runtime/include/vector_types.h,sha256=ruVFRp8RioWR9mrvLXX9S15ZSJ97wqTjA8ORCJKKzOQ,13206
+nvidia/cuda_runtime/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_runtime/lib/__pycache__/__init__.cpython-311.pyc,,
+nvidia/cuda_runtime/lib/libOpenCL.so.1,sha256=FVLsRLRp2nLlhaKax_0qqY1_mbk2K3-489yy1rYD0_s,30856
+nvidia/cuda_runtime/lib/libcudart.so.11.0,sha256=0NpBrhMjz07rYQEj1p13FBJM_l6_zE5F8CuRDlHFfuY,679264
+nvidia_cuda_runtime_cu11-11.8.89.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+nvidia_cuda_runtime_cu11-11.8.89.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
+nvidia_cuda_runtime_cu11-11.8.89.dist-info/METADATA,sha256=YQ1ja7ng9blOtTnQfqjlHmSyDqk2KxUlJcxItX9tU6Y,1506
+nvidia_cuda_runtime_cu11-11.8.89.dist-info/RECORD,,
+nvidia_cuda_runtime_cu11-11.8.89.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
+nvidia_cuda_runtime_cu11-11.8.89.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/WHEEL b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-manylinux1_x86_64
+
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/top_level.txt b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/top_level.txt
@@ -0,0 +1 @@
+nvidia
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/RECORD b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..d05c79b7410fae87b89d471725f87eae8610f28c
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/RECORD
@@ -0,0 +1,853 @@
+../../../bin/pip,sha256=4AtP-3E6RwrI_be51XSXwxRGhxhML-HR3h9VZHLjcu4,270
+../../../bin/pip3,sha256=4AtP-3E6RwrI_be51XSXwxRGhxhML-HR3h9VZHLjcu4,270
+../../../bin/pip3.11,sha256=4AtP-3E6RwrI_be51XSXwxRGhxhML-HR3h9VZHLjcu4,270
+pip-24.3.1.dist-info/AUTHORS.txt,sha256=Cbb630k8EL9FkBzX9Vpi6hpYWrLSlh08eXodL5u0eLI,10925
+pip-24.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pip-24.3.1.dist-info/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093
+pip-24.3.1.dist-info/METADATA,sha256=V8iCNK1GYbC82PWsLMsASDh9AO4veocRlM4Pn9q2KFI,3677
+pip-24.3.1.dist-info/RECORD,,
+pip-24.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip-24.3.1.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
+pip-24.3.1.dist-info/entry_points.txt,sha256=eeIjuzfnfR2PrhbjnbzFU6MnSS70kZLxwaHHq6M-bD0,87
+pip-24.3.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pip/__init__.py,sha256=faXY_neeYrA_88plEhkyhwAaYeds7wu5U1iGwP24J0s,357
+pip/__main__.py,sha256=WzbhHXTbSE6gBY19mNN9m4s5o_365LOvTYSgqgbdBhE,854
+pip/__pip-runner__.py,sha256=cPPWuJ6NK_k-GzfvlejLFgwzmYUROmpAR6QC3Q-vkXQ,1450
+pip/__pycache__/__init__.cpython-311.pyc,,
+pip/__pycache__/__main__.cpython-311.pyc,,
+pip/__pycache__/__pip-runner__.cpython-311.pyc,,
+pip/_internal/__init__.py,sha256=MfcoOluDZ8QMCFYal04IqOJ9q6m2V7a0aOsnI-WOxUo,513
+pip/_internal/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/__pycache__/build_env.cpython-311.pyc,,
+pip/_internal/__pycache__/cache.cpython-311.pyc,,
+pip/_internal/__pycache__/configuration.cpython-311.pyc,,
+pip/_internal/__pycache__/exceptions.cpython-311.pyc,,
+pip/_internal/__pycache__/main.cpython-311.pyc,,
+pip/_internal/__pycache__/pyproject.cpython-311.pyc,,
+pip/_internal/__pycache__/self_outdated_check.cpython-311.pyc,,
+pip/_internal/__pycache__/wheel_builder.cpython-311.pyc,,
+pip/_internal/build_env.py,sha256=wsTPOWyPTKvUREUcO585OU01kbQufpdigY8fVHv3WIw,10584
+pip/_internal/cache.py,sha256=Jb698p5PNigRtpW5o26wQNkkUv4MnQ94mc471wL63A0,10369
+pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132
+pip/_internal/cli/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/autocompletion.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/base_command.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/cmdoptions.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/command_context.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/index_command.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/main.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/main_parser.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/parser.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/progress_bars.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/req_command.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/spinners.cpython-311.pyc,,
+pip/_internal/cli/__pycache__/status_codes.cpython-311.pyc,,
+pip/_internal/cli/autocompletion.py,sha256=Lli3Mr6aDNu7ZkJJFFvwD2-hFxNI6Avz8OwMyS5TVrs,6865
+pip/_internal/cli/base_command.py,sha256=F8nUcSM-Y-MQljJUe724-yxmc5viFXHyM_zH70NmIh4,8289
+pip/_internal/cli/cmdoptions.py,sha256=mDqBr0d0hoztbRJs-PWtcKpqNAc7khU6ZpoesZKocT8,30110
+pip/_internal/cli/command_context.py,sha256=RHgIPwtObh5KhMrd3YZTkl8zbVG-6Okml7YbFX4Ehg0,774
+pip/_internal/cli/index_command.py,sha256=-0oPTruZGkLSMrWDleZ6UtcKP3G-SImRRuhH0RfVE3o,5631
+pip/_internal/cli/main.py,sha256=BDZef-bWe9g9Jpr4OVs4dDf-845HJsKw835T7AqEnAc,2817
+pip/_internal/cli/main_parser.py,sha256=laDpsuBDl6kyfywp9eMMA9s84jfH2TJJn-vmL0GG90w,4338
+pip/_internal/cli/parser.py,sha256=VCMtduzECUV87KaHNu-xJ-wLNL82yT3x16V4XBxOAqI,10825
+pip/_internal/cli/progress_bars.py,sha256=VgydyqjZvfhqpuNcFDn00QNuA9GxRe9CKrRG8jhPuKU,2723
+pip/_internal/cli/req_command.py,sha256=DqeFhmUMs6o6Ev8qawAcOoYNdAZsfyKS0MZI5jsJYwQ,12250
+pip/_internal/cli/spinners.py,sha256=hIJ83GerdFgFCdobIA23Jggetegl_uC4Sp586nzFbPE,5118
+pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116
+pip/_internal/commands/__init__.py,sha256=5oRO9O3dM2vGuh0bFw4HOVletryrz5HHMmmPWwJrH9U,3882
+pip/_internal/commands/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/cache.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/check.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/completion.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/configuration.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/debug.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/download.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/freeze.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/hash.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/help.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/index.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/inspect.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/install.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/list.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/search.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/show.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/uninstall.cpython-311.pyc,,
+pip/_internal/commands/__pycache__/wheel.cpython-311.pyc,,
+pip/_internal/commands/cache.py,sha256=xg76_ZFEBC6zoQ3gXLRfMZJft4z2a0RwH4GEFZC6nnU,7944
+pip/_internal/commands/check.py,sha256=Hr_4eiMd9cgVDgEvjtIdw915NmL7ROIWW8enkr8slPQ,2268
+pip/_internal/commands/completion.py,sha256=HT4lD0bgsflHq2IDgYfiEdp7IGGtE7s6MgI3xn0VQEw,4287
+pip/_internal/commands/configuration.py,sha256=n98enwp6y0b5G6fiRQjaZo43FlJKYve_daMhN-4BRNc,9766
+pip/_internal/commands/debug.py,sha256=DNDRgE9YsKrbYzU0s3VKi8rHtKF4X13CJ_br_8PUXO0,6797
+pip/_internal/commands/download.py,sha256=0qB0nys6ZEPsog451lDsjL5Bx7Z97t-B80oFZKhpzKM,5273
+pip/_internal/commands/freeze.py,sha256=2Vt72BYTSm9rzue6d8dNzt8idxWK4Db6Hd-anq7GQ80,3203
+pip/_internal/commands/hash.py,sha256=EVVOuvGtoPEdFi8SNnmdqlCQrhCxV-kJsdwtdcCnXGQ,1703
+pip/_internal/commands/help.py,sha256=gcc6QDkcgHMOuAn5UxaZwAStsRBrnGSn_yxjS57JIoM,1132
+pip/_internal/commands/index.py,sha256=RAXxmJwFhVb5S1BYzb5ifX3sn9Na8v2CCVYwSMP8pao,4731
+pip/_internal/commands/inspect.py,sha256=PGrY9TRTRCM3y5Ml8Bdk8DEOXquWRfscr4DRo1LOTPc,3189
+pip/_internal/commands/install.py,sha256=iqesiLIZc6Op9uihMQFYRhAA2DQRZUxbM4z1BwXoFls,29428
+pip/_internal/commands/list.py,sha256=oiIzSjLP6__d7dIS3q0Xb5ywsaOThBWRqMyjjKzkPdM,12769
+pip/_internal/commands/search.py,sha256=fWkUQVx_gm8ebbFAlCgqtxKXT9rNahpJ-BI__3HNZpg,5626
+pip/_internal/commands/show.py,sha256=IG9L5uo8w6UA4tI_IlmaxLCoNKPa5JNJCljj3NWs0OE,7507
+pip/_internal/commands/uninstall.py,sha256=7pOR7enK76gimyxQbzxcG1OsyLXL3DvX939xmM8Fvtg,3892
+pip/_internal/commands/wheel.py,sha256=eJRhr_qoNNxWAkkdJCNiQM7CXd4E1_YyQhsqJnBPGGg,6414
+pip/_internal/configuration.py,sha256=XkAiBS0hpzsM-LF0Qu5hvPWO_Bs67-oQKRYFBuMbESs,14006
+pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858
+pip/_internal/distributions/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/distributions/__pycache__/base.cpython-311.pyc,,
+pip/_internal/distributions/__pycache__/installed.cpython-311.pyc,,
+pip/_internal/distributions/__pycache__/sdist.cpython-311.pyc,,
+pip/_internal/distributions/__pycache__/wheel.cpython-311.pyc,,
+pip/_internal/distributions/base.py,sha256=QeB9qvKXDIjLdPBDE5fMgpfGqMMCr-govnuoQnGuiF8,1783
+pip/_internal/distributions/installed.py,sha256=QinHFbWAQ8oE0pbD8MFZWkwlnfU1QYTccA1vnhrlYOU,842
+pip/_internal/distributions/sdist.py,sha256=PlcP4a6-R6c98XnOM-b6Lkb3rsvh9iG4ok8shaanrzs,6751
+pip/_internal/distributions/wheel.py,sha256=THBYfnv7VVt8mYhMYUtH13S1E7FDwtDyDfmUcl8ai0E,1317
+pip/_internal/exceptions.py,sha256=2_byISIv3kSnI_9T-Esfxrt0LnTRgcUHyxu0twsHjQY,26481
+pip/_internal/index/__init__.py,sha256=vpt-JeTZefh8a-FC22ZeBSXFVbuBcXSGiILhQZJaNpQ,30
+pip/_internal/index/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/index/__pycache__/collector.cpython-311.pyc,,
+pip/_internal/index/__pycache__/package_finder.cpython-311.pyc,,
+pip/_internal/index/__pycache__/sources.cpython-311.pyc,,
+pip/_internal/index/collector.py,sha256=RdPO0JLAlmyBWPAWYHPyRoGjz3GNAeTngCNkbGey_mE,16265
+pip/_internal/index/package_finder.py,sha256=yRC4xsyudwKnNoU6IXvNoyqYo5ScT7lB6Wa-z2eh7cs,37666
+pip/_internal/index/sources.py,sha256=lPBLK5Xiy8Q6IQMio26Wl7ocfZOKkgGklIBNyUJ23fI,8632
+pip/_internal/locations/__init__.py,sha256=UaAxeZ_f93FyouuFf4p7SXYF-4WstXuEvd3LbmPCAno,14925
+pip/_internal/locations/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/locations/__pycache__/_distutils.cpython-311.pyc,,
+pip/_internal/locations/__pycache__/_sysconfig.cpython-311.pyc,,
+pip/_internal/locations/__pycache__/base.cpython-311.pyc,,
+pip/_internal/locations/_distutils.py,sha256=x6nyVLj7X11Y4khIdf-mFlxMl2FWadtVEgeb8upc_WI,6013
+pip/_internal/locations/_sysconfig.py,sha256=IGzds60qsFneRogC-oeBaY7bEh3lPt_v47kMJChQXsU,7724
+pip/_internal/locations/base.py,sha256=RQiPi1d4FVM2Bxk04dQhXZ2PqkeljEL2fZZ9SYqIQ78,2556
+pip/_internal/main.py,sha256=r-UnUe8HLo5XFJz8inTcOOTiu_sxNhgHb6VwlGUllOI,340
+pip/_internal/metadata/__init__.py,sha256=9pU3W3s-6HtjFuYhWcLTYVmSaziklPv7k2x8p7X1GmA,4339
+pip/_internal/metadata/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/metadata/__pycache__/_json.cpython-311.pyc,,
+pip/_internal/metadata/__pycache__/base.cpython-311.pyc,,
+pip/_internal/metadata/__pycache__/pkg_resources.cpython-311.pyc,,
+pip/_internal/metadata/_json.py,sha256=P0cAJrH_mtmMZvlZ16ZXm_-izA4lpr5wy08laICuiaA,2644
+pip/_internal/metadata/base.py,sha256=ft0K5XNgI4ETqZnRv2-CtvgYiMOMAeGMAzxT-f6VLJA,25298
+pip/_internal/metadata/importlib/__init__.py,sha256=jUUidoxnHcfITHHaAWG1G2i5fdBYklv_uJcjo2x7VYE,135
+pip/_internal/metadata/importlib/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/metadata/importlib/__pycache__/_compat.cpython-311.pyc,,
+pip/_internal/metadata/importlib/__pycache__/_dists.cpython-311.pyc,,
+pip/_internal/metadata/importlib/__pycache__/_envs.cpython-311.pyc,,
+pip/_internal/metadata/importlib/_compat.py,sha256=c6av8sP8BBjAZuFSJow1iWfygUXNM3xRTCn5nqw6B9M,2796
+pip/_internal/metadata/importlib/_dists.py,sha256=anh0mLI-FYRPUhAdipd0Va3YJJc6HelCKQ0bFhY10a0,8017
+pip/_internal/metadata/importlib/_envs.py,sha256=UUB980XSrDWrMpQ1_G45i0r8Hqlg_tg3IPQ63mEqbNc,7431
+pip/_internal/metadata/pkg_resources.py,sha256=U07ETAINSGeSRBfWUG93E4tZZbaW_f7PGzEqZN0hulc,10542
+pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63
+pip/_internal/models/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/models/__pycache__/candidate.cpython-311.pyc,,
+pip/_internal/models/__pycache__/direct_url.cpython-311.pyc,,
+pip/_internal/models/__pycache__/format_control.cpython-311.pyc,,
+pip/_internal/models/__pycache__/index.cpython-311.pyc,,
+pip/_internal/models/__pycache__/installation_report.cpython-311.pyc,,
+pip/_internal/models/__pycache__/link.cpython-311.pyc,,
+pip/_internal/models/__pycache__/scheme.cpython-311.pyc,,
+pip/_internal/models/__pycache__/search_scope.cpython-311.pyc,,
+pip/_internal/models/__pycache__/selection_prefs.cpython-311.pyc,,
+pip/_internal/models/__pycache__/target_python.cpython-311.pyc,,
+pip/_internal/models/__pycache__/wheel.cpython-311.pyc,,
+pip/_internal/models/candidate.py,sha256=zzgFRuw_kWPjKpGw7LC0ZUMD2CQ2EberUIYs8izjdCA,753
+pip/_internal/models/direct_url.py,sha256=uBtY2HHd3TO9cKQJWh0ThvE5FRr-MWRYChRU4IG9HZE,6578
+pip/_internal/models/format_control.py,sha256=wtsQqSK9HaUiNxQEuB-C62eVimw6G4_VQFxV9-_KDBE,2486
+pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030
+pip/_internal/models/installation_report.py,sha256=zRVZoaz-2vsrezj_H3hLOhMZCK9c7TbzWgC-jOalD00,2818
+pip/_internal/models/link.py,sha256=jHax9O-9zlSzEwjBCDkx0OXjKXwBDwOuPwn-PsR8dCs,21034
+pip/_internal/models/scheme.py,sha256=PakmHJM3e8OOWSZFtfz1Az7f1meONJnkGuQxFlt3wBE,575
+pip/_internal/models/search_scope.py,sha256=67NEnsYY84784S-MM7ekQuo9KXLH-7MzFntXjapvAo0,4531
+pip/_internal/models/selection_prefs.py,sha256=qaFfDs3ciqoXPg6xx45N1jPLqccLJw4N0s4P0PyHTQ8,2015
+pip/_internal/models/target_python.py,sha256=2XaH2rZ5ZF-K5wcJbEMGEl7SqrTToDDNkrtQ2v_v_-Q,4271
+pip/_internal/models/wheel.py,sha256=G7dND_s4ebPkEL7RJ1qCY0QhUUWIIK6AnjWgRATF5no,4539
+pip/_internal/network/__init__.py,sha256=jf6Tt5nV_7zkARBrKojIXItgejvoegVJVKUbhAa5Ioc,50
+pip/_internal/network/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/network/__pycache__/auth.cpython-311.pyc,,
+pip/_internal/network/__pycache__/cache.cpython-311.pyc,,
+pip/_internal/network/__pycache__/download.cpython-311.pyc,,
+pip/_internal/network/__pycache__/lazy_wheel.cpython-311.pyc,,
+pip/_internal/network/__pycache__/session.cpython-311.pyc,,
+pip/_internal/network/__pycache__/utils.cpython-311.pyc,,
+pip/_internal/network/__pycache__/xmlrpc.cpython-311.pyc,,
+pip/_internal/network/auth.py,sha256=D4gASjUrqoDFlSt6gQ767KAAjv6PUyJU0puDlhXNVRE,20809
+pip/_internal/network/cache.py,sha256=48A971qCzKNFvkb57uGEk7-0xaqPS0HWj2711QNTxkU,3935
+pip/_internal/network/download.py,sha256=FLOP29dPYECBiAi7eEjvAbNkyzaKNqbyjOT2m8HPW8U,6048
+pip/_internal/network/lazy_wheel.py,sha256=PBdoMoNQQIA84Fhgne38jWF52W4x_KtsHjxgv4dkRKA,7622
+pip/_internal/network/session.py,sha256=XmanBKjVwPFmh1iJ58q6TDh9xabH37gREuQJ_feuZGA,18741
+pip/_internal/network/utils.py,sha256=Inaxel-NxBu4PQWkjyErdnfewsFCcgHph7dzR1-FboY,4088
+pip/_internal/network/xmlrpc.py,sha256=sAxzOacJ-N1NXGPvap9jC3zuYWSnnv3GXtgR2-E2APA,1838
+pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/operations/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/operations/__pycache__/check.cpython-311.pyc,,
+pip/_internal/operations/__pycache__/freeze.cpython-311.pyc,,
+pip/_internal/operations/__pycache__/prepare.cpython-311.pyc,,
+pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/operations/build/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/operations/build/__pycache__/build_tracker.cpython-311.pyc,,
+pip/_internal/operations/build/__pycache__/metadata.cpython-311.pyc,,
+pip/_internal/operations/build/__pycache__/metadata_editable.cpython-311.pyc,,
+pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-311.pyc,,
+pip/_internal/operations/build/__pycache__/wheel.cpython-311.pyc,,
+pip/_internal/operations/build/__pycache__/wheel_editable.cpython-311.pyc,,
+pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-311.pyc,,
+pip/_internal/operations/build/build_tracker.py,sha256=-ARW_TcjHCOX7D2NUOGntB4Fgc6b4aolsXkAK6BWL7w,4774
+pip/_internal/operations/build/metadata.py,sha256=9S0CUD8U3QqZeXp-Zyt8HxwU90lE4QrnYDgrqZDzBnc,1422
+pip/_internal/operations/build/metadata_editable.py,sha256=VLL7LvntKE8qxdhUdEJhcotFzUsOSI8NNS043xULKew,1474
+pip/_internal/operations/build/metadata_legacy.py,sha256=8i6i1QZX9m_lKPStEFsHKM0MT4a-CD408JOw99daLmo,2190
+pip/_internal/operations/build/wheel.py,sha256=sT12FBLAxDC6wyrDorh8kvcZ1jG5qInCRWzzP-UkJiQ,1075
+pip/_internal/operations/build/wheel_editable.py,sha256=yOtoH6zpAkoKYEUtr8FhzrYnkNHQaQBjWQ2HYae1MQg,1417
+pip/_internal/operations/build/wheel_legacy.py,sha256=K-6kNhmj-1xDF45ny1yheMerF0ui4EoQCLzEoHh6-tc,3045
+pip/_internal/operations/check.py,sha256=L24vRL8VWbyywdoeAhM89WCd8zLTnjIbULlKelUgIec,5912
+pip/_internal/operations/freeze.py,sha256=V59yEyCSz_YhZuhH09-6aV_zvYBMrS_IxFFNqn2QzlA,9864
+pip/_internal/operations/install/__init__.py,sha256=mX7hyD2GNBO2mFGokDQ30r_GXv7Y_PLdtxcUv144e-s,51
+pip/_internal/operations/install/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/operations/install/__pycache__/editable_legacy.cpython-311.pyc,,
+pip/_internal/operations/install/__pycache__/wheel.cpython-311.pyc,,
+pip/_internal/operations/install/editable_legacy.py,sha256=PoEsNEPGbIZ2yQphPsmYTKLOCMs4gv5OcCdzW124NcA,1283
+pip/_internal/operations/install/wheel.py,sha256=X5Iz9yUg5LlK5VNQ9g2ikc6dcRu8EPi_SUi5iuEDRgo,27615
+pip/_internal/operations/prepare.py,sha256=joWJwPkuqGscQgVNImLK71e9hRapwKvRCM8HclysmvU,28118
+pip/_internal/pyproject.py,sha256=rw4fwlptDp1hZgYoplwbAGwWA32sWQkp7ysf8Ju6iXc,7287
+pip/_internal/req/__init__.py,sha256=HxBFtZy_BbCclLgr26waMtpzYdO5T3vxePvpGAXSt5s,2653
+pip/_internal/req/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/req/__pycache__/constructors.cpython-311.pyc,,
+pip/_internal/req/__pycache__/req_file.cpython-311.pyc,,
+pip/_internal/req/__pycache__/req_install.cpython-311.pyc,,
+pip/_internal/req/__pycache__/req_set.cpython-311.pyc,,
+pip/_internal/req/__pycache__/req_uninstall.cpython-311.pyc,,
+pip/_internal/req/constructors.py,sha256=v1qzCN1mIldwx-nCrPc8JO4lxkm3Fv8M5RWvt8LISjc,18430
+pip/_internal/req/req_file.py,sha256=gOOJTzL-mDRPcQhjwqjDrjn4V-3rK9TnEFnU3v8RA4Q,18752
+pip/_internal/req/req_install.py,sha256=yhT98NGDoAEk03jznTJnYCznzhiMEEA2ocgsUG_dcNU,35788
+pip/_internal/req/req_set.py,sha256=j3esG0s6SzoVReX9rWn4rpYNtyET_fwxbwJPRimvRxo,2858
+pip/_internal/req/req_uninstall.py,sha256=qzDIxJo-OETWqGais7tSMCDcWbATYABT-Tid3ityF0s,23853
+pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/resolution/__pycache__/base.cpython-311.pyc,,
+pip/_internal/resolution/base.py,sha256=qlmh325SBVfvG6Me9gc5Nsh5sdwHBwzHBq6aEXtKsLA,583
+pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/legacy/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/resolution/legacy/__pycache__/resolver.cpython-311.pyc,,
+pip/_internal/resolution/legacy/resolver.py,sha256=3HZiJBRd1FTN6jQpI4qRO8-TbLYeIbUTS6PFvXnXs2w,24068
+pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/base.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-311.pyc,,
+pip/_internal/resolution/resolvelib/base.py,sha256=DCf669FsqyQY5uqXeePDHQY1e4QO-pBzWH8O0s9-K94,5023
+pip/_internal/resolution/resolvelib/candidates.py,sha256=5UZ1upNnmqsP-nmEZaDYxaBgCoejw_e2WVGmmAvBxXc,20001
+pip/_internal/resolution/resolvelib/factory.py,sha256=511CaUR41LqjALuFafLVfx15WRvMhxYTdjQCoSvp4gw,32661
+pip/_internal/resolution/resolvelib/found_candidates.py,sha256=9hrTyQqFvl9I7Tji79F1AxHv39Qh1rkJ_7deSHSMfQc,6383
+pip/_internal/resolution/resolvelib/provider.py,sha256=bcsFnYvlmtB80cwVdW1fIwgol8ZNr1f1VHyRTkz47SM,9935
+pip/_internal/resolution/resolvelib/reporter.py,sha256=00JtoXEkTlw0-rl_sl54d71avwOsJHt9GGHcrj5Sza0,3168
+pip/_internal/resolution/resolvelib/requirements.py,sha256=7JG4Z72e5Yk4vU0S5ulGvbqTy4FMQGYhY5zQhX9zTtY,8065
+pip/_internal/resolution/resolvelib/resolver.py,sha256=nLJOsVMEVi2gQUVJoUFKMZAeu2f7GRMjGMvNSWyz0Bc,12592
+pip/_internal/self_outdated_check.py,sha256=pkjQixuWyQ1vrVxZAaYD6SSHgXuFUnHZybXEWTkh0S0,8145
+pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/utils/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/_jaraco_text.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/_log.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/appdirs.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/compat.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/compatibility_tags.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/datetime.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/deprecation.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/direct_url_helpers.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/egg_link.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/encoding.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/entrypoints.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/filesystem.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/filetypes.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/glibc.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/hashes.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/logging.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/misc.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/packaging.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/retry.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/setuptools_build.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/subprocess.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/temp_dir.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/unpacking.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/urls.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/virtualenv.cpython-311.pyc,,
+pip/_internal/utils/__pycache__/wheel.cpython-311.pyc,,
+pip/_internal/utils/_jaraco_text.py,sha256=M15uUPIh5NpP1tdUGBxRau6q1ZAEtI8-XyLEETscFfE,3350
+pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015
+pip/_internal/utils/appdirs.py,sha256=swgcTKOm3daLeXTW6v5BUS2Ti2RvEnGRQYH_yDXklAo,1665
+pip/_internal/utils/compat.py,sha256=ckkFveBiYQjRWjkNsajt_oWPS57tJvE8XxoC4OIYgCY,2399
+pip/_internal/utils/compatibility_tags.py,sha256=OWq5axHpW-MEEPztGdvgADrgJPAcV9a88Rxm4Z8VBs8,6272
+pip/_internal/utils/datetime.py,sha256=m21Y3wAtQc-ji6Veb6k_M5g6A0ZyFI4egchTdnwh-pQ,242
+pip/_internal/utils/deprecation.py,sha256=k7Qg_UBAaaTdyq82YVARA6D7RmcGTXGv7fnfcgigj4Q,3707
+pip/_internal/utils/direct_url_helpers.py,sha256=r2MRtkVDACv9AGqYODBUC9CjwgtsUU1s68hmgfCJMtA,3196
+pip/_internal/utils/egg_link.py,sha256=0FePZoUYKv4RGQ2t6x7w5Z427wbA_Uo3WZnAkrgsuqo,2463
+pip/_internal/utils/encoding.py,sha256=qqsXDtiwMIjXMEiIVSaOjwH5YmirCaK-dIzb6-XJsL0,1169
+pip/_internal/utils/entrypoints.py,sha256=YlhLTRl2oHBAuqhc-zmL7USS67TPWVHImjeAQHreZTQ,3064
+pip/_internal/utils/filesystem.py,sha256=ajvA-q4ocliW9kPp8Yquh-4vssXbu-UKbo5FV9V4X64,4950
+pip/_internal/utils/filetypes.py,sha256=i8XAQ0eFCog26Fw9yV0Yb1ygAqKYB1w9Cz9n0fj8gZU,716
+pip/_internal/utils/glibc.py,sha256=vUkWq_1pJuzcYNcGKLlQmABoUiisK8noYY1yc8Wq4w4,3734
+pip/_internal/utils/hashes.py,sha256=XGGLL0AG8-RhWnyz87xF6MFZ--BKadHU35D47eApCKI,4972
+pip/_internal/utils/logging.py,sha256=7BFKB1uFjdxD5crM-GtwA5T2qjbQ2LPD-gJDuJeDNTg,11606
+pip/_internal/utils/misc.py,sha256=NRV0_2fFhzy1jhvInSBv4dqCmTwct8PV7Kp0m-BPRGM,23530
+pip/_internal/utils/packaging.py,sha256=iI3LH43lVNR4hWBOqF6lFsZq4aycb2j0UcHlmDmcqUg,2109
+pip/_internal/utils/retry.py,sha256=mhFbykXjhTnZfgzeuy-vl9c8nECnYn_CMtwNJX2tYzQ,1392
+pip/_internal/utils/setuptools_build.py,sha256=ouXpud-jeS8xPyTPsXJ-m34NPvK5os45otAzdSV_IJE,4435
+pip/_internal/utils/subprocess.py,sha256=EsvqSRiSMHF98T8Txmu6NLU3U--MpTTQjtNgKP0P--M,8988
+pip/_internal/utils/temp_dir.py,sha256=5qOXe8M4JeY6vaFQM867d5zkp1bSwMZ-KT5jymmP0Zg,9310
+pip/_internal/utils/unpacking.py,sha256=eyDkSsk4nW8ZfiSjNzJduCznpHyaGHVv3ak_LMGsiEM,11951
+pip/_internal/utils/urls.py,sha256=qceSOZb5lbNDrHNsv7_S4L4Ytszja5NwPKUMnZHbYnM,1599
+pip/_internal/utils/virtualenv.py,sha256=S6f7csYorRpiD6cvn3jISZYc3I8PJC43H5iMFpRAEDU,3456
+pip/_internal/utils/wheel.py,sha256=b442jkydFHjXzDy6cMR7MpzWBJ1Q82hR5F33cmcHV3g,4494
+pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596
+pip/_internal/vcs/__pycache__/__init__.cpython-311.pyc,,
+pip/_internal/vcs/__pycache__/bazaar.cpython-311.pyc,,
+pip/_internal/vcs/__pycache__/git.cpython-311.pyc,,
+pip/_internal/vcs/__pycache__/mercurial.cpython-311.pyc,,
+pip/_internal/vcs/__pycache__/subversion.cpython-311.pyc,,
+pip/_internal/vcs/__pycache__/versioncontrol.cpython-311.pyc,,
+pip/_internal/vcs/bazaar.py,sha256=EKStcQaKpNu0NK4p5Q10Oc4xb3DUxFw024XrJy40bFQ,3528
+pip/_internal/vcs/git.py,sha256=3tpc9LQA_J4IVW5r5NvWaaSeDzcmJOrSFZN0J8vIKfU,18177
+pip/_internal/vcs/mercurial.py,sha256=oULOhzJ2Uie-06d1omkL-_Gc6meGaUkyogvqG9ZCyPs,5249
+pip/_internal/vcs/subversion.py,sha256=ddTugHBqHzV3ebKlU5QXHPN4gUqlyXbOx8q8NgXKvs8,11735
+pip/_internal/vcs/versioncontrol.py,sha256=cvf_-hnTAjQLXJ3d17FMNhQfcO1AcKWUF10tfrYyP-c,22440
+pip/_internal/wheel_builder.py,sha256=DL3A8LKeRj_ACp11WS5wSgASgPFqeyAeXJKdXfmaWXU,11799
+pip/_vendor/__init__.py,sha256=JYuAXvClhInxIrA2FTp5p-uuWVL7WV6-vEpTs46-Qh4,4873
+pip/_vendor/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/__pycache__/typing_extensions.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__init__.py,sha256=GiYoagwPEiJ_xR_lbwWGaoCiPtF_rz4isjfjdDAgHU4,676
+pip/_vendor/cachecontrol/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/adapter.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/cache.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/controller.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/serialize.cpython-311.pyc,,
+pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-311.pyc,,
+pip/_vendor/cachecontrol/_cmd.py,sha256=iist2EpzJvDVIhMAxXq8iFnTBsiZAd6iplxfmNboNyk,1737
+pip/_vendor/cachecontrol/adapter.py,sha256=fByO_Pd_EOemjWbuocvBWdN85xT0q_TBm2lxS6vD4fk,6355
+pip/_vendor/cachecontrol/cache.py,sha256=OTQj72tUf8C1uEgczdl3Gc8vkldSzsTITKtDGKMx4z8,1952
+pip/_vendor/cachecontrol/caches/__init__.py,sha256=dtrrroK5BnADR1GWjCZ19aZ0tFsMfvFBtLQQU1sp_ag,303
+pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-311.pyc,,
+pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-311.pyc,,
+pip/_vendor/cachecontrol/caches/file_cache.py,sha256=9AlmmTJc6cslb6k5z_6q0sGPHVrMj8zv-uWy-simmfE,5406
+pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=9rmqwtYu_ljVkW6_oLqbC7EaX_a8YT_yLuna-eS0dgo,1386
+pip/_vendor/cachecontrol/controller.py,sha256=o-ejGJlBmpKK8QQLyTPJj0t7siU8XVHXuV8MCybCxQ8,18575
+pip/_vendor/cachecontrol/filewrapper.py,sha256=STttGmIPBvZzt2b51dUOwoWX5crcMCpKZOisM3f5BNc,4292
+pip/_vendor/cachecontrol/heuristics.py,sha256=IYe4QmHERWsMvtxNrp920WeaIsaTTyqLB14DSheSbtY,4834
+pip/_vendor/cachecontrol/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/cachecontrol/serialize.py,sha256=HQd2IllQ05HzPkVLMXTF2uX5mjEQjDBkxCqUJUODpZk,5163
+pip/_vendor/cachecontrol/wrapper.py,sha256=hsGc7g8QGQTT-4f8tgz3AM5qwScg6FO0BSdLSRdEvpU,1417
+pip/_vendor/certifi/__init__.py,sha256=p_GYZrjUwPBUhpLlCZoGb0miKBKSqDAyZC5DvIuqbHQ,94
+pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255
+pip/_vendor/certifi/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/certifi/__pycache__/__main__.cpython-311.pyc,,
+pip/_vendor/certifi/__pycache__/core.cpython-311.pyc,,
+pip/_vendor/certifi/cacert.pem,sha256=lO3rZukXdPyuk6BWUJFOKQliWaXH6HGh9l1GGrUgG0c,299427
+pip/_vendor/certifi/core.py,sha256=2SRT5rIcQChFDbe37BQa-kULxAgJ8qN6l1jfqTp4HIs,4486
+pip/_vendor/certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/distlib/__init__.py,sha256=dcwgYGYGQqAEawBXPDtIx80DO_3cOmFv8HTc8JMzknQ,625
+pip/_vendor/distlib/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/compat.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/database.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/index.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/locators.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/manifest.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/markers.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/metadata.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/resources.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/scripts.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/util.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/version.cpython-311.pyc,,
+pip/_vendor/distlib/__pycache__/wheel.cpython-311.pyc,,
+pip/_vendor/distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467
+pip/_vendor/distlib/database.py,sha256=mHy_LxiXIsIVRb-T0-idBrVLw3Ffij5teHCpbjmJ9YU,51160
+pip/_vendor/distlib/index.py,sha256=lTbw268rRhj8dw1sib3VZ_0EhSGgoJO3FKJzSFMOaeA,20797
+pip/_vendor/distlib/locators.py,sha256=oBeAZpFuPQSY09MgNnLfQGGAXXvVO96BFpZyKMuK4tM,51026
+pip/_vendor/distlib/manifest.py,sha256=3qfmAmVwxRqU1o23AlfXrQGZzh6g_GGzTAP_Hb9C5zQ,14168
+pip/_vendor/distlib/markers.py,sha256=X6sDvkFGcYS8gUW8hfsWuKEKAqhQZAJ7iXOMLxRYjYk,5164
+pip/_vendor/distlib/metadata.py,sha256=zil3sg2EUfLXVigljY2d_03IJt-JSs7nX-73fECMX2s,38724
+pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820
+pip/_vendor/distlib/scripts.py,sha256=BJliaDAZaVB7WAkwokgC3HXwLD2iWiHaVI50H7C6eG8,18608
+pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792
+pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784
+pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032
+pip/_vendor/distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682
+pip/_vendor/distlib/version.py,sha256=s5VIs8wBn0fxzGxWM_aA2ZZyx525HcZbMvcTlTyZ3Rg,23727
+pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648
+pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448
+pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888
+pip/_vendor/distlib/wheel.py,sha256=DFIVguEQHCdxnSdAO0dfFsgMcvVZitg7bCOuLwZ7A_s,43979
+pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981
+pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64
+pip/_vendor/distro/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/distro/__pycache__/__main__.cpython-311.pyc,,
+pip/_vendor/distro/__pycache__/distro.cpython-311.pyc,,
+pip/_vendor/distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430
+pip/_vendor/distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849
+pip/_vendor/idna/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/idna/__pycache__/codec.cpython-311.pyc,,
+pip/_vendor/idna/__pycache__/compat.cpython-311.pyc,,
+pip/_vendor/idna/__pycache__/core.cpython-311.pyc,,
+pip/_vendor/idna/__pycache__/idnadata.cpython-311.pyc,,
+pip/_vendor/idna/__pycache__/intranges.cpython-311.pyc,,
+pip/_vendor/idna/__pycache__/package_data.cpython-311.pyc,,
+pip/_vendor/idna/__pycache__/uts46data.cpython-311.pyc,,
+pip/_vendor/idna/codec.py,sha256=PS6m-XmdST7Wj7J7ulRMakPDt5EBJyYrT3CPtjh-7t4,3426
+pip/_vendor/idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321
+pip/_vendor/idna/core.py,sha256=lyhpoe2vulEaB_65xhXmoKgO-xUqFDvcwxu5hpNNO4E,12663
+pip/_vendor/idna/idnadata.py,sha256=dqRwytzkjIHMBa2R1lYvHDwACenZPt8eGVu1Y8UBE-E,78320
+pip/_vendor/idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881
+pip/_vendor/idna/package_data.py,sha256=Tkt0KnIeyIlnHddOaz9WSkkislNgokJAuE-p5GorMqo,21
+pip/_vendor/idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/idna/uts46data.py,sha256=1KuksWqLuccPXm2uyRVkhfiFLNIhM_H2m4azCcnOqEU,206503
+pip/_vendor/msgpack/__init__.py,sha256=gsMP7JTECZNUSjvOyIbdhNOkpB9Z8BcGwabVGY2UcdQ,1077
+pip/_vendor/msgpack/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/msgpack/__pycache__/exceptions.cpython-311.pyc,,
+pip/_vendor/msgpack/__pycache__/ext.cpython-311.pyc,,
+pip/_vendor/msgpack/__pycache__/fallback.cpython-311.pyc,,
+pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081
+pip/_vendor/msgpack/ext.py,sha256=fKp00BqDLjUtZnPd70Llr138zk8JsCuSpJkkZ5S4dt8,5629
+pip/_vendor/msgpack/fallback.py,sha256=wdUWJkWX2gzfRW9BBCTOuIE1Wvrf5PtBtR8ZtY7G_EE,33175
+pip/_vendor/packaging/__init__.py,sha256=dtw2bNmWCQ9WnMoK3bk_elL1svSlikXtLpZhCFIB9SE,496
+pip/_vendor/packaging/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/_elffile.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/_manylinux.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/_musllinux.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/_parser.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/_structures.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/_tokenizer.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/markers.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/metadata.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/requirements.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/specifiers.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/tags.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/utils.cpython-311.pyc,,
+pip/_vendor/packaging/__pycache__/version.cpython-311.pyc,,
+pip/_vendor/packaging/_elffile.py,sha256=_LcJW4YNKywYsl4169B2ukKRqwxjxst_8H0FRVQKlz8,3282
+pip/_vendor/packaging/_manylinux.py,sha256=Xo4V0PZz8sbuVCbTni0t1CR0AHeir_7ib4lTmV8scD4,9586
+pip/_vendor/packaging/_musllinux.py,sha256=p9ZqNYiOItGee8KcZFeHF_YcdhVwGHdK6r-8lgixvGQ,2694
+pip/_vendor/packaging/_parser.py,sha256=s_TvTvDNK0NrM2QB3VKThdWFM4Nc0P6JnkObkl3MjpM,10236
+pip/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
+pip/_vendor/packaging/_tokenizer.py,sha256=J6v5H7Jzvb-g81xp_2QACKwO7LxHQA6ikryMU7zXwN8,5273
+pip/_vendor/packaging/markers.py,sha256=dWKSqn5Sp-jDmOG-W3GfLHKjwhf1IsznbT71VlBoB5M,10671
+pip/_vendor/packaging/metadata.py,sha256=KINuSkJ12u-SyoKNTy_pHNGAfMUtxNvZ53qA1zAKcKI,32349
+pip/_vendor/packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/packaging/requirements.py,sha256=gYyRSAdbrIyKDY66ugIDUQjRMvxkH2ALioTmX3tnL6o,2947
+pip/_vendor/packaging/specifiers.py,sha256=HfGgfNJRvrzC759gnnoojHyiWs_DYmcw5PEh5jHH-YE,39738
+pip/_vendor/packaging/tags.py,sha256=Fo6_cit95-7QfcMb16XtI7AUiSMgdwA_hCO_9lV2pz4,21388
+pip/_vendor/packaging/utils.py,sha256=NAdYUwnlAOpkat_RthavX8a07YuVxgGL_vwrx73GSDM,5287
+pip/_vendor/packaging/version.py,sha256=wE4sSVlF-d1H6HFC1vszEe35CwTig_fh4HHIFg95hFE,16210
+pip/_vendor/pkg_resources/__init__.py,sha256=jrhDRbOubP74QuPXxd7U7Po42PH2l-LZ2XfcO7llpZ4,124463
+pip/_vendor/pkg_resources/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/platformdirs/__init__.py,sha256=FTA6LGNm40GwNZt3gG3uLAacWvf2E_2HTmH0rAALGR8,22285
+pip/_vendor/platformdirs/__main__.py,sha256=jBJ8zb7Mpx5ebcqF83xrpO94MaeCpNGHVf9cvDN2JLg,1505
+pip/_vendor/platformdirs/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/platformdirs/__pycache__/__main__.cpython-311.pyc,,
+pip/_vendor/platformdirs/__pycache__/android.cpython-311.pyc,,
+pip/_vendor/platformdirs/__pycache__/api.cpython-311.pyc,,
+pip/_vendor/platformdirs/__pycache__/macos.cpython-311.pyc,,
+pip/_vendor/platformdirs/__pycache__/unix.cpython-311.pyc,,
+pip/_vendor/platformdirs/__pycache__/version.cpython-311.pyc,,
+pip/_vendor/platformdirs/__pycache__/windows.cpython-311.pyc,,
+pip/_vendor/platformdirs/android.py,sha256=xZXY9Jd46WOsxT2U6-5HsNtDZ-IQqxcEUrBLl3hYk4o,9016
+pip/_vendor/platformdirs/api.py,sha256=QBYdUac2eC521ek_y53uD1Dcq-lJX8IgSRVd4InC6uc,8996
+pip/_vendor/platformdirs/macos.py,sha256=wftsbsvq6nZ0WORXSiCrZNkRHz_WKuktl0a6mC7MFkI,5580
+pip/_vendor/platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/platformdirs/unix.py,sha256=Cci9Wqt35dAMsg6HT9nRGHSBW5obb0pR3AE1JJnsCXg,10643
+pip/_vendor/platformdirs/version.py,sha256=r7F76tZRjgQKzrpx_I0_ZMQOMU-PS7eGnHD7zEK3KB0,411
+pip/_vendor/platformdirs/windows.py,sha256=IFpiohUBwxPtCzlyKwNtxyW4Jk8haa6W8o59mfrDXVo,10125
+pip/_vendor/pygments/__init__.py,sha256=7N1oiaWulw_nCsTY4EEixYLz15pWY5u4uPAFFi-ielU,2983
+pip/_vendor/pygments/__main__.py,sha256=isIhBxLg65nLlXukG4VkMuPfNdd7gFzTZ_R_z3Q8diY,353
+pip/_vendor/pygments/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/__main__.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/cmdline.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/console.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/filter.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/formatter.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/lexer.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/modeline.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/plugin.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/regexopt.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/scanner.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/sphinxext.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/style.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/token.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/unistring.cpython-311.pyc,,
+pip/_vendor/pygments/__pycache__/util.cpython-311.pyc,,
+pip/_vendor/pygments/cmdline.py,sha256=LIVzmAunlk9sRJJp54O4KRy9GDIN4Wu13v9p9QzfGPM,23656
+pip/_vendor/pygments/console.py,sha256=yhP9UsLAVmWKVQf2446JJewkA7AiXeeTf4Ieg3Oi2fU,1718
+pip/_vendor/pygments/filter.py,sha256=_ADNPCskD8_GmodHi6_LoVgPU3Zh336aBCT5cOeTMs0,1910
+pip/_vendor/pygments/filters/__init__.py,sha256=RdedK2KWKXlKwR7cvkfr3NUj9YiZQgMgilRMFUg2jPA,40392
+pip/_vendor/pygments/filters/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/pygments/formatter.py,sha256=jDWBTndlBH2Z5IYZFVDnP0qn1CaTQjTWt7iAGtCnJEg,4390
+pip/_vendor/pygments/formatters/__init__.py,sha256=8No-NUs8rBTSSBJIv4hSEQt2M0cFB4hwAT0snVc2QGE,5385
+pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/groff.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/html.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/img.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/irc.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/latex.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/other.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/rtf.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/svg.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/terminal.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-311.pyc,,
+pip/_vendor/pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176
+pip/_vendor/pygments/formatters/bbcode.py,sha256=3JQLI45tcrQ_kRUMjuab6C7Hb0XUsbVWqqbSn9cMjkI,3320
+pip/_vendor/pygments/formatters/groff.py,sha256=M39k0PaSSZRnxWjqBSVPkF0mu1-Vr7bm6RsFvs-CNN4,5106
+pip/_vendor/pygments/formatters/html.py,sha256=SE2jc3YCqbMS3rZW9EAmDlAUhdVxJ52gA4dileEvCGU,35669
+pip/_vendor/pygments/formatters/img.py,sha256=MwA4xWPLOwh6j7Yc6oHzjuqSPt0M1fh5r-5BTIIUfsU,23287
+pip/_vendor/pygments/formatters/irc.py,sha256=dp1Z0l_ObJ5NFh9MhqLGg5ptG5hgJqedT2Vkutt9v0M,4981
+pip/_vendor/pygments/formatters/latex.py,sha256=XMmhOCqUKDBQtG5mGJNAFYxApqaC5puo5cMmPfK3944,19306
+pip/_vendor/pygments/formatters/other.py,sha256=56PMJOliin-rAUdnRM0i1wsV1GdUPd_dvQq0_UPfF9c,5034
+pip/_vendor/pygments/formatters/pangomarkup.py,sha256=y16U00aVYYEFpeCfGXlYBSMacG425CbfoG8oKbKegIg,2218
+pip/_vendor/pygments/formatters/rtf.py,sha256=ZT90dmcKyJboIB0mArhL7IhE467GXRN0G7QAUgG03To,11957
+pip/_vendor/pygments/formatters/svg.py,sha256=KKsiophPupHuxm0So-MsbQEWOT54IAiSF7hZPmxtKXE,7174
+pip/_vendor/pygments/formatters/terminal.py,sha256=AojNG4MlKq2L6IsC_VnXHu4AbHCBn9Otog6u45XvxeI,4674
+pip/_vendor/pygments/formatters/terminal256.py,sha256=kGkNUVo3FpwjytIDS0if79EuUoroAprcWt3igrcIqT0,11753
+pip/_vendor/pygments/lexer.py,sha256=TYHDt___gNW4axTl2zvPZff-VQi8fPaIh5OKRcVSjUM,35349
+pip/_vendor/pygments/lexers/__init__.py,sha256=pIlxyQJuu_syh9lE080cq8ceVbEVcKp0osAFU5fawJU,12115
+pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc,,
+pip/_vendor/pygments/lexers/__pycache__/python.cpython-311.pyc,,
+pip/_vendor/pygments/lexers/_mapping.py,sha256=61-h3zr103m01OS5BUq_AfUiL9YI06Ves9ipQ7k4vr4,76097
+pip/_vendor/pygments/lexers/python.py,sha256=2J_YJrPTr_A6fJY_qKiKv0GpgPwHMrlMSeo59qN3fe4,53687
+pip/_vendor/pygments/modeline.py,sha256=gtRYZBS-CKOCDXHhGZqApboHBaZwGH8gznN3O6nuxj4,1005
+pip/_vendor/pygments/plugin.py,sha256=ioeJ3QeoJ-UQhZpY9JL7vbxsTVuwwM7BCu-Jb8nN0AU,1891
+pip/_vendor/pygments/regexopt.py,sha256=Hky4EB13rIXEHQUNkwmCrYqtIlnXDehNR3MztafZ43w,3072
+pip/_vendor/pygments/scanner.py,sha256=NDy3ofK_fHRFK4hIDvxpamG871aewqcsIb6sgTi7Fhk,3092
+pip/_vendor/pygments/sphinxext.py,sha256=iOptJBcqOGPwMEJ2p70PvwpZPIGdvdZ8dxvq6kzxDgA,7981
+pip/_vendor/pygments/style.py,sha256=rSCZWFpg1_DwFMXDU0nEVmAcBHpuQGf9RxvOPPQvKLQ,6420
+pip/_vendor/pygments/styles/__init__.py,sha256=qUk6_1z5KmT8EdJFZYgESmG6P_HJF_2vVrDD7HSCGYY,2042
+pip/_vendor/pygments/styles/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-311.pyc,,
+pip/_vendor/pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312
+pip/_vendor/pygments/token.py,sha256=qZwT7LSPy5YBY3JgDjut642CCy7JdQzAfmqD9NmT5j0,6226
+pip/_vendor/pygments/unistring.py,sha256=p5c1i-HhoIhWemy9CUsaN9o39oomYHNxXll0Xfw6tEA,63208
+pip/_vendor/pygments/util.py,sha256=2tj2nS1X9_OpcuSjf8dOET2bDVZhs8cEKd_uT6-Fgg8,10031
+pip/_vendor/pyproject_hooks/__init__.py,sha256=kCehmy0UaBa9oVMD7ZIZrnswfnP3LXZ5lvnNJAL5JBM,491
+pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/pyproject_hooks/__pycache__/_compat.cpython-311.pyc,,
+pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-311.pyc,,
+pip/_vendor/pyproject_hooks/_compat.py,sha256=by6evrYnqkisiM-MQcvOKs5bgDMzlOSgZqRHNqf04zE,138
+pip/_vendor/pyproject_hooks/_impl.py,sha256=61GJxzQip0IInhuO69ZI5GbNQ82XEDUB_1Gg5_KtUoc,11920
+pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=9gQATptbFkelkIy0OfWFEACzqxXJMQDWCH9rBOAZVwQ,546
+pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-311.pyc,,
+pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=m2b34c917IW5o-Q_6TYIHlsK9lSUlNiyrITTUH_zwew,10927
+pip/_vendor/requests/__init__.py,sha256=HlB_HzhrzGtfD_aaYUwUh1zWXLZ75_YCLyit75d0Vz8,5057
+pip/_vendor/requests/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/__version__.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/_internal_utils.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/adapters.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/api.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/auth.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/certs.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/compat.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/cookies.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/exceptions.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/help.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/hooks.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/models.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/packages.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/sessions.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/status_codes.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/structures.cpython-311.pyc,,
+pip/_vendor/requests/__pycache__/utils.cpython-311.pyc,,
+pip/_vendor/requests/__version__.py,sha256=FVfglgZmNQnmYPXpOohDU58F5EUb_-VnSTaAesS187g,435
+pip/_vendor/requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495
+pip/_vendor/requests/adapters.py,sha256=J7VeVxKBvawbtlX2DERVo05J9BXTcWYLMHNd1Baa-bk,27607
+pip/_vendor/requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449
+pip/_vendor/requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186
+pip/_vendor/requests/certs.py,sha256=PVPooB0jP5hkZEULSCwC074532UFbR2Ptgu0I5zwmCs,575
+pip/_vendor/requests/compat.py,sha256=Mo9f9xZpefod8Zm-n9_StJcVTmwSukXR2p3IQyyVXvU,1485
+pip/_vendor/requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590
+pip/_vendor/requests/exceptions.py,sha256=D1wqzYWne1mS2rU43tP9CeN1G7QAy7eqL9o1god6Ejw,4272
+pip/_vendor/requests/help.py,sha256=hRKaf9u0G7fdwrqMHtF3oG16RKktRf6KiwtSq2Fo1_0,3813
+pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733
+pip/_vendor/requests/models.py,sha256=x4K4CmH-lC0l2Kb-iPfMN4dRXxHEcbOaEWBL_i09AwI,35483
+pip/_vendor/requests/packages.py,sha256=_ZQDCJTJ8SP3kVWunSqBsRZNPzj2c1WFVqbdr08pz3U,1057
+pip/_vendor/requests/sessions.py,sha256=ykTI8UWGSltOfH07HKollH7kTBGw4WhiBVaQGmckTw4,30495
+pip/_vendor/requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322
+pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912
+pip/_vendor/requests/utils.py,sha256=L79vnFbzJ3SFLKtJwpoWe41Tozi3RlZv94pY1TFIyow,33631
+pip/_vendor/resolvelib/__init__.py,sha256=h509TdEcpb5-44JonaU3ex2TM15GVBLjM9CNCPwnTTs,537
+pip/_vendor/resolvelib/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/resolvelib/__pycache__/providers.cpython-311.pyc,,
+pip/_vendor/resolvelib/__pycache__/reporters.cpython-311.pyc,,
+pip/_vendor/resolvelib/__pycache__/resolvers.cpython-311.pyc,,
+pip/_vendor/resolvelib/__pycache__/structs.cpython-311.pyc,,
+pip/_vendor/resolvelib/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-311.pyc,,
+pip/_vendor/resolvelib/compat/collections_abc.py,sha256=uy8xUZ-NDEw916tugUXm8HgwCGiMO0f-RcdnpkfXfOs,156
+pip/_vendor/resolvelib/providers.py,sha256=fuuvVrCetu5gsxPB43ERyjfO8aReS3rFQHpDgiItbs4,5871
+pip/_vendor/resolvelib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/resolvelib/reporters.py,sha256=TSbRmWzTc26w0ggsV1bxVpeWDB8QNIre6twYl7GIZBE,1601
+pip/_vendor/resolvelib/resolvers.py,sha256=G8rsLZSq64g5VmIq-lB7UcIJ1gjAxIQJmTF4REZleQ0,20511
+pip/_vendor/resolvelib/structs.py,sha256=0_1_XO8z_CLhegP3Vpf9VJ3zJcfLm0NOHRM-i0Ykz3o,4963
+pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090
+pip/_vendor/rich/__main__.py,sha256=eO7Cq8JnrgG8zVoeImiAs92q3hXNMIfp0w5lMsO7Q2Y,8477
+pip/_vendor/rich/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/__main__.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_cell_widths.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_emoji_codes.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_emoji_replace.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_export_format.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_extension.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_fileno.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_inspect.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_log_render.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_loop.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_null_file.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_palettes.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_pick.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_ratio.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_spinners.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_stack.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_timer.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_win32_console.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_windows.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_windows_renderer.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/_wrap.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/abc.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/align.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/ansi.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/bar.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/box.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/cells.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/color.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/color_triplet.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/columns.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/console.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/constrain.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/containers.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/control.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/default_styles.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/diagnose.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/emoji.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/errors.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/file_proxy.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/filesize.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/highlighter.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/json.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/jupyter.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/layout.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/live.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/live_render.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/logging.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/markup.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/measure.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/padding.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/pager.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/palette.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/panel.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/pretty.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/progress.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/progress_bar.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/prompt.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/protocol.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/region.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/repr.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/rule.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/scope.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/screen.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/segment.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/spinner.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/status.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/style.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/styled.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/syntax.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/table.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/terminal_theme.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/text.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/theme.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/themes.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/traceback.cpython-311.pyc,,
+pip/_vendor/rich/__pycache__/tree.cpython-311.pyc,,
+pip/_vendor/rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209
+pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235
+pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064
+pip/_vendor/rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128
+pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265
+pip/_vendor/rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799
+pip/_vendor/rich/_inspect.py,sha256=oZJGw31e64dwXSCmrDnvZbwVb1ZKhWfU8wI3VWohjJk,9695
+pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225
+pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236
+pip/_vendor/rich/_null_file.py,sha256=tGSXk_v-IZmbj1GAzHit8A3kYIQMiCpVsCFfsC-_KJ4,1387
+pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063
+pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423
+pip/_vendor/rich/_ratio.py,sha256=Zt58apszI6hAAcXPpgdWKpu3c31UBWebOeR4mbyptvU,5471
+pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919
+pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351
+pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417
+pip/_vendor/rich/_win32_console.py,sha256=P0vxI2fcndym1UU1S37XAzQzQnkyY7YqAKmxm24_gug,22820
+pip/_vendor/rich/_windows.py,sha256=aBwaD_S56SbgopIvayVmpk0Y28uwY2C5Bab1wl3Bp-I,1925
+pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783
+pip/_vendor/rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404
+pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890
+pip/_vendor/rich/align.py,sha256=sCUkisXkQfoq-IQPyBELfJ8l7LihZJX3HbH8K7Cie-M,10368
+pip/_vendor/rich/ansi.py,sha256=iD6532QYqnBm6hADulKjrV8l8kFJ-9fEVooHJHH3hMg,6906
+pip/_vendor/rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263
+pip/_vendor/rich/box.py,sha256=nr5fYIUghB_iUCEq6y0Z3LlCT8gFPDrzN9u2kn7tJl4,10831
+pip/_vendor/rich/cells.py,sha256=aMmGK4BjXhgE6_JF1ZEGmW3O7mKkE8g84vUnj4Et4To,4780
+pip/_vendor/rich/color.py,sha256=bCRATVdRe5IClJ6Hl62de2PKQ_U4i2MZ4ugjUEg7Tao,18223
+pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054
+pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131
+pip/_vendor/rich/console.py,sha256=deFZIubq2M9A2MCsKFAsFQlWDvcOMsGuUA07QkOaHIw,99173
+pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288
+pip/_vendor/rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502
+pip/_vendor/rich/control.py,sha256=DSkHTUQLorfSERAKE_oTAEUFefZnZp4bQb4q8rHbKws,6630
+pip/_vendor/rich/default_styles.py,sha256=-Fe318kMVI_IwciK5POpThcO0-9DYJ67TZAN6DlmlmM,8082
+pip/_vendor/rich/diagnose.py,sha256=an6uouwhKPAlvQhYpNNpGq9EJysfMIOvvCbO3oSoR24,972
+pip/_vendor/rich/emoji.py,sha256=omTF9asaAnsM4yLY94eR_9dgRRSm1lHUszX20D1yYCQ,2501
+pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642
+pip/_vendor/rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683
+pip/_vendor/rich/filesize.py,sha256=9fTLAPCAwHmBXdRv7KZU194jSgNrRb6Wx7RIoBgqeKY,2508
+pip/_vendor/rich/highlighter.py,sha256=6ZAjUcNhBRajBCo9umFUclyi2xL0-55JL7S0vYGUJu4,9585
+pip/_vendor/rich/json.py,sha256=vVEoKdawoJRjAFayPwXkMBPLy7RSTs-f44wSQDR2nJ0,5031
+pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252
+pip/_vendor/rich/layout.py,sha256=ajkSFAtEVv9EFTcFs-w4uZfft7nEXhNzL7ZVdgrT5rI,14004
+pip/_vendor/rich/live.py,sha256=vUcnJV2LMSK3sQNaILbm0-_B8BpAeiHfcQMAMLfpRe0,14271
+pip/_vendor/rich/live_render.py,sha256=zJtB471jGziBtEwxc54x12wEQtH4BuQr1SA8v9kU82w,3666
+pip/_vendor/rich/logging.py,sha256=uB-cB-3Q4bmXDLLpbOWkmFviw-Fde39zyMV6tKJ2WHQ,11903
+pip/_vendor/rich/markup.py,sha256=3euGKP5s41NCQwaSjTnJxus5iZMHjxpIM0W6fCxra38,8451
+pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305
+pip/_vendor/rich/padding.py,sha256=kTFGsdGe0os7tXLnHKpwTI90CXEvrceeZGCshmJy5zw,4970
+pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828
+pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396
+pip/_vendor/rich/panel.py,sha256=2Fd1V7e1kHxlPFIusoHY5T7-Cs0RpkrihgVG9ZVqJ4g,10705
+pip/_vendor/rich/pretty.py,sha256=5oIHP_CGWnHEnD0zMdW5qfGC5kHqIKn7zH_eC4crULE,35848
+pip/_vendor/rich/progress.py,sha256=P02xi7T2Ua3qq17o83bkshe4c0v_45cg8VyTj6US6Vg,59715
+pip/_vendor/rich/progress_bar.py,sha256=L4jw8E6Qb_x-jhOrLVhkuMaPmiAhFIl8jHQbWFrKuR8,8164
+pip/_vendor/rich/prompt.py,sha256=wdOn2X8XTJKnLnlw6PoMY7xG4iUPp3ezt4O5gqvpV-E,11304
+pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391
+pip/_vendor/rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166
+pip/_vendor/rich/repr.py,sha256=5MZJZmONgC6kud-QW-_m1okXwL2aR6u6y-pUcUCJz28,4431
+pip/_vendor/rich/rule.py,sha256=0fNaS_aERa3UMRc3T5WMpN_sumtDxfaor2y3of1ftBk,4602
+pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843
+pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591
+pip/_vendor/rich/segment.py,sha256=hU1ueeXqI6YeFa08K9DAjlF2QLxcJY9pwZx7RsXavlk,24246
+pip/_vendor/rich/spinner.py,sha256=15koCmF0DQeD8-k28Lpt6X_zJQUlzEhgo_6A6uy47lc,4339
+pip/_vendor/rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424
+pip/_vendor/rich/style.py,sha256=3hiocH_4N8vwRm3-8yFWzM7tSwjjEven69XqWasSQwM,27073
+pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258
+pip/_vendor/rich/syntax.py,sha256=TnZDuOD4DeHFbkaVEAji1gf8qgAlMU9Boe_GksMGCkk,35475
+pip/_vendor/rich/table.py,sha256=nGEvAZHF4dy1vT9h9Gj9O5qhSQO3ODAxJv0RY1vnIB8,39680
+pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370
+pip/_vendor/rich/text.py,sha256=5rQ3zvNrg5UZKNLecbh7fiw9v3HeFulNVtRY_CBDjjE,47312
+pip/_vendor/rich/theme.py,sha256=belFJogzA0W0HysQabKaHOc3RWH2ko3fQAJhoN-AFdo,3777
+pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102
+pip/_vendor/rich/traceback.py,sha256=CUpxYLjQWIb6vQQ6O72X0hvDV6caryGqU6UweHgOyCY,29601
+pip/_vendor/rich/tree.py,sha256=meAOUU6sYnoBEOX2ILrPLY9k5bWrWNQKkaiEFvHinXM,9167
+pip/_vendor/tomli/__init__.py,sha256=JhUwV66DB1g4Hvt1UQCVMdfCu-IgAV8FXmvDU9onxd4,396
+pip/_vendor/tomli/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/tomli/__pycache__/_parser.cpython-311.pyc,,
+pip/_vendor/tomli/__pycache__/_re.cpython-311.pyc,,
+pip/_vendor/tomli/__pycache__/_types.cpython-311.pyc,,
+pip/_vendor/tomli/_parser.py,sha256=g9-ENaALS-B8dokYpCuzUFalWlog7T-SIYMjLZSWrtM,22633
+pip/_vendor/tomli/_re.py,sha256=dbjg5ChZT23Ka9z9DHOXfdtSpPwUfdgMXnj8NOoly-w,2943
+pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254
+pip/_vendor/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26
+pip/_vendor/truststore/__init__.py,sha256=WIDeyzWm7EVX44g354M25vpRXbeY1lsPH6EmUJUcq4o,1264
+pip/_vendor/truststore/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/truststore/__pycache__/_api.cpython-311.pyc,,
+pip/_vendor/truststore/__pycache__/_macos.cpython-311.pyc,,
+pip/_vendor/truststore/__pycache__/_openssl.cpython-311.pyc,,
+pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-311.pyc,,
+pip/_vendor/truststore/__pycache__/_windows.cpython-311.pyc,,
+pip/_vendor/truststore/_api.py,sha256=GeXRNTlxPZ3kif4kNoh6JY0oE4QRzTGcgXr6l_X_Gk0,10555
+pip/_vendor/truststore/_macos.py,sha256=nZlLkOmszUE0g6ryRwBVGY5COzPyudcsiJtDWarM5LQ,20503
+pip/_vendor/truststore/_openssl.py,sha256=LLUZ7ZGaio-i5dpKKjKCSeSufmn6T8pi9lDcFnvSyq0,2324
+pip/_vendor/truststore/_ssl_constants.py,sha256=NUD4fVKdSD02ri7-db0tnO0VqLP9aHuzmStcW7tAl08,1130
+pip/_vendor/truststore/_windows.py,sha256=rAHyKYD8M7t-bXfG8VgOVa3TpfhVhbt4rZQlO45YuP8,17993
+pip/_vendor/truststore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/typing_extensions.py,sha256=78hFl0HpDY-ylHUVCnWdU5nTHxUP2-S-3wEZk6CQmLk,134499
+pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333
+pip/_vendor/urllib3/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/_collections.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/_version.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/connection.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/connectionpool.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/exceptions.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/fields.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/filepost.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/poolmanager.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/request.cpython-311.pyc,,
+pip/_vendor/urllib3/__pycache__/response.cpython-311.pyc,,
+pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372
+pip/_vendor/urllib3/_version.py,sha256=t9wGB6ooOTXXgiY66K1m6BZS1CJyXHAU8EoWDTe6Shk,64
+pip/_vendor/urllib3/connection.py,sha256=ttIA909BrbTUzwkqEe_TzZVh4JOOj7g61Ysei2mrwGg,20314
+pip/_vendor/urllib3/connectionpool.py,sha256=e2eiAwNbFNCKxj4bwDKNK-w7HIdSz3OmMxU_TIt-evQ,40408
+pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957
+pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-311.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632
+pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922
+pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036
+pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528
+pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081
+pip/_vendor/urllib3/contrib/securetransport.py,sha256=Fef1IIUUFHqpevzXiDPbIGkDKchY2FVKeVeLGR1Qq3g,34446
+pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097
+pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217
+pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579
+pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440
+pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/urllib3/packages/__pycache__/six.cpython-311.pyc,,
+pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-311.pyc,,
+pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-311.pyc,,
+pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417
+pip/_vendor/urllib3/packages/backports/weakref_finalize.py,sha256=tRCal5OAhNSRyb0DhHp-38AtIlCsRP8BxF3NX-6rqIA,5343
+pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665
+pip/_vendor/urllib3/poolmanager.py,sha256=aWyhXRtNO4JUnCSVVqKTKQd8EXTvUm1VN9pgs2bcONo,19990
+pip/_vendor/urllib3/request.py,sha256=YTWFNr7QIwh7E1W9dde9LM77v2VWTJ5V78XuTTw7D1A,6691
+pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641
+pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155
+pip/_vendor/urllib3/util/__pycache__/__init__.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/connection.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/proxy.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/queue.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/request.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/response.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/retry.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/timeout.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/url.cpython-311.pyc,,
+pip/_vendor/urllib3/util/__pycache__/wait.cpython-311.pyc,,
+pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901
+pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605
+pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498
+pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997
+pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510
+pip/_vendor/urllib3/util/retry.py,sha256=6ENvOZ8PBDzh8kgixpql9lIrb2dxH-k7ZmBanJF2Ng4,22050
+pip/_vendor/urllib3/util/ssl_.py,sha256=QDuuTxPSCj1rYtZ4xpD7Ux-r20TD50aHyqKyhQ7Bq4A,17460
+pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758
+pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895
+pip/_vendor/urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168
+pip/_vendor/urllib3/util/url.py,sha256=lCAE7M5myA8EDdW0sJuyyZhVB9K_j38ljWhHAnFaWoE,14296
+pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403
+pip/_vendor/vendor.txt,sha256=43152uDtpsunEE29vmLqqKZUosdrbvzIFkzscLB55Cg,332
+pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/WHEEL b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..da25d7b42315fc321e466d6e69705ea21bcff507
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (75.2.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a5b7f87f973b36af0ee6fbfb76ce38420f5f9d7
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__init__.py
@@ -0,0 +1,18 @@
+from typing import List, Optional
+
+from pip._internal.utils import _log
+
+# init_logging() must be called before any call to logging.getLogger()
+# which happens at import of most modules.
+_log.init_logging()
+
+
+def main(args: Optional[List[str]] = None) -> int:
+ """This is preserved for old console scripts that may still be referencing
+ it.
+
+ For additional details, see https://github.com/pypa/pip/issues/7498.
+ """
+ from pip._internal.utils.entrypoints import _wrapper
+
+ return _wrapper(args)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/main.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/main.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f6f2912bd2b2193d8dcb89642f99a9c1e24e616
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/main.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e331b708d061d7df1cd4ee76abf18009bbcc3ad
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/build_env.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/build_env.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f1e2667caf75eb9a7e995ab1060b059b90c60e8
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/build_env.py
@@ -0,0 +1,319 @@
+"""Build Environment used for isolation during sdist building
+"""
+
+import logging
+import os
+import pathlib
+import site
+import sys
+import textwrap
+from collections import OrderedDict
+from types import TracebackType
+from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union
+
+from pip._vendor.certifi import where
+from pip._vendor.packaging.version import Version
+
+from pip import __file__ as pip_location
+from pip._internal.cli.spinners import open_spinner
+from pip._internal.locations import get_platlib, get_purelib, get_scheme
+from pip._internal.metadata import get_default_environment, get_environment
+from pip._internal.utils.logging import VERBOSE
+from pip._internal.utils.packaging import get_requirement
+from pip._internal.utils.subprocess import call_subprocess
+from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
+
+if TYPE_CHECKING:
+ from pip._internal.index.package_finder import PackageFinder
+
+logger = logging.getLogger(__name__)
+
+
+def _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]:
+ return (a, b) if a != b else (a,)
+
+
+class _Prefix:
+ def __init__(self, path: str) -> None:
+ self.path = path
+ self.setup = False
+ scheme = get_scheme("", prefix=path)
+ self.bin_dir = scheme.scripts
+ self.lib_dirs = _dedup(scheme.purelib, scheme.platlib)
+
+
+def get_runnable_pip() -> str:
+ """Get a file to pass to a Python executable, to run the currently-running pip.
+
+ This is used to run a pip subprocess, for installing requirements into the build
+ environment.
+ """
+ source = pathlib.Path(pip_location).resolve().parent
+
+ if not source.is_dir():
+ # This would happen if someone is using pip from inside a zip file. In that
+ # case, we can use that directly.
+ return str(source)
+
+ return os.fsdecode(source / "__pip-runner__.py")
+
+
+def _get_system_sitepackages() -> Set[str]:
+ """Get system site packages
+
+ Usually from site.getsitepackages,
+ but fallback on `get_purelib()/get_platlib()` if unavailable
+ (e.g. in a virtualenv created by virtualenv<20)
+
+ Returns normalized set of strings.
+ """
+ if hasattr(site, "getsitepackages"):
+ system_sites = site.getsitepackages()
+ else:
+ # virtualenv < 20 overwrites site.py without getsitepackages
+ # fallback on get_purelib/get_platlib.
+ # this is known to miss things, but shouldn't in the cases
+ # where getsitepackages() has been removed (inside a virtualenv)
+ system_sites = [get_purelib(), get_platlib()]
+ return {os.path.normcase(path) for path in system_sites}
+
+
+class BuildEnvironment:
+ """Creates and manages an isolated environment to install build deps"""
+
+ def __init__(self) -> None:
+ temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)
+
+ self._prefixes = OrderedDict(
+ (name, _Prefix(os.path.join(temp_dir.path, name)))
+ for name in ("normal", "overlay")
+ )
+
+ self._bin_dirs: List[str] = []
+ self._lib_dirs: List[str] = []
+ for prefix in reversed(list(self._prefixes.values())):
+ self._bin_dirs.append(prefix.bin_dir)
+ self._lib_dirs.extend(prefix.lib_dirs)
+
+ # Customize site to:
+ # - ensure .pth files are honored
+ # - prevent access to system site packages
+ system_sites = _get_system_sitepackages()
+
+ self._site_dir = os.path.join(temp_dir.path, "site")
+ if not os.path.exists(self._site_dir):
+ os.mkdir(self._site_dir)
+ with open(
+ os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8"
+ ) as fp:
+ fp.write(
+ textwrap.dedent(
+ """
+ import os, site, sys
+
+ # First, drop system-sites related paths.
+ original_sys_path = sys.path[:]
+ known_paths = set()
+ for path in {system_sites!r}:
+ site.addsitedir(path, known_paths=known_paths)
+ system_paths = set(
+ os.path.normcase(path)
+ for path in sys.path[len(original_sys_path):]
+ )
+ original_sys_path = [
+ path for path in original_sys_path
+ if os.path.normcase(path) not in system_paths
+ ]
+ sys.path = original_sys_path
+
+ # Second, add lib directories.
+ # ensuring .pth file are processed.
+ for path in {lib_dirs!r}:
+ assert not path in sys.path
+ site.addsitedir(path)
+ """
+ ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)
+ )
+
+ def __enter__(self) -> None:
+ self._save_env = {
+ name: os.environ.get(name, None)
+ for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH")
+ }
+
+ path = self._bin_dirs[:]
+ old_path = self._save_env["PATH"]
+ if old_path:
+ path.extend(old_path.split(os.pathsep))
+
+ pythonpath = [self._site_dir]
+
+ os.environ.update(
+ {
+ "PATH": os.pathsep.join(path),
+ "PYTHONNOUSERSITE": "1",
+ "PYTHONPATH": os.pathsep.join(pythonpath),
+ }
+ )
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ for varname, old_value in self._save_env.items():
+ if old_value is None:
+ os.environ.pop(varname, None)
+ else:
+ os.environ[varname] = old_value
+
+ def check_requirements(
+ self, reqs: Iterable[str]
+ ) -> Tuple[Set[Tuple[str, str]], Set[str]]:
+ """Return 2 sets:
+ - conflicting requirements: set of (installed, wanted) reqs tuples
+ - missing requirements: set of reqs
+ """
+ missing = set()
+ conflicting = set()
+ if reqs:
+ env = (
+ get_environment(self._lib_dirs)
+ if hasattr(self, "_lib_dirs")
+ else get_default_environment()
+ )
+ for req_str in reqs:
+ req = get_requirement(req_str)
+ # We're explicitly evaluating with an empty extra value, since build
+ # environments are not provided any mechanism to select specific extras.
+ if req.marker is not None and not req.marker.evaluate({"extra": ""}):
+ continue
+ dist = env.get_distribution(req.name)
+ if not dist:
+ missing.add(req_str)
+ continue
+ if isinstance(dist.version, Version):
+ installed_req_str = f"{req.name}=={dist.version}"
+ else:
+ installed_req_str = f"{req.name}==={dist.version}"
+ if not req.specifier.contains(dist.version, prereleases=True):
+ conflicting.add((installed_req_str, req_str))
+ # FIXME: Consider direct URL?
+ return conflicting, missing
+
+ def install_requirements(
+ self,
+ finder: "PackageFinder",
+ requirements: Iterable[str],
+ prefix_as_string: str,
+ *,
+ kind: str,
+ ) -> None:
+ prefix = self._prefixes[prefix_as_string]
+ assert not prefix.setup
+ prefix.setup = True
+ if not requirements:
+ return
+ self._install_requirements(
+ get_runnable_pip(),
+ finder,
+ requirements,
+ prefix,
+ kind=kind,
+ )
+
+ @staticmethod
+ def _install_requirements(
+ pip_runnable: str,
+ finder: "PackageFinder",
+ requirements: Iterable[str],
+ prefix: _Prefix,
+ *,
+ kind: str,
+ ) -> None:
+ args: List[str] = [
+ sys.executable,
+ pip_runnable,
+ "install",
+ "--ignore-installed",
+ "--no-user",
+ "--prefix",
+ prefix.path,
+ "--no-warn-script-location",
+ "--disable-pip-version-check",
+ # The prefix specified two lines above, thus
+ # target from config file or env var should be ignored
+ "--target",
+ "",
+ ]
+ if logger.getEffectiveLevel() <= logging.DEBUG:
+ args.append("-vv")
+ elif logger.getEffectiveLevel() <= VERBOSE:
+ args.append("-v")
+ for format_control in ("no_binary", "only_binary"):
+ formats = getattr(finder.format_control, format_control)
+ args.extend(
+ (
+ "--" + format_control.replace("_", "-"),
+ ",".join(sorted(formats or {":none:"})),
+ )
+ )
+
+ index_urls = finder.index_urls
+ if index_urls:
+ args.extend(["-i", index_urls[0]])
+ for extra_index in index_urls[1:]:
+ args.extend(["--extra-index-url", extra_index])
+ else:
+ args.append("--no-index")
+ for link in finder.find_links:
+ args.extend(["--find-links", link])
+
+ for host in finder.trusted_hosts:
+ args.extend(["--trusted-host", host])
+ if finder.allow_all_prereleases:
+ args.append("--pre")
+ if finder.prefer_binary:
+ args.append("--prefer-binary")
+ args.append("--")
+ args.extend(requirements)
+ extra_environ = {"_PIP_STANDALONE_CERT": where()}
+ with open_spinner(f"Installing {kind}") as spinner:
+ call_subprocess(
+ args,
+ command_desc=f"pip subprocess to install {kind}",
+ spinner=spinner,
+ extra_environ=extra_environ,
+ )
+
+
+class NoOpBuildEnvironment(BuildEnvironment):
+ """A no-op drop-in replacement for BuildEnvironment"""
+
+ def __init__(self) -> None:
+ pass
+
+ def __enter__(self) -> None:
+ pass
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ pass
+
+ def cleanup(self) -> None:
+ pass
+
+ def install_requirements(
+ self,
+ finder: "PackageFinder",
+ requirements: Iterable[str],
+ prefix_as_string: str,
+ *,
+ kind: str,
+ ) -> None:
+ raise NotImplementedError()
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a89a838b9a5cb264e9ae9d269fbedca6e2d6333
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py
@@ -0,0 +1,21 @@
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.distributions.sdist import SourceDistribution
+from pip._internal.distributions.wheel import WheelDistribution
+from pip._internal.req.req_install import InstallRequirement
+
+
+def make_distribution_for_install_requirement(
+ install_req: InstallRequirement,
+) -> AbstractDistribution:
+ """Returns a Distribution for the given InstallRequirement"""
+ # Editable requirements will always be source distributions. They use the
+ # legacy logic until we create a modern standard for them.
+ if install_req.editable:
+ return SourceDistribution(install_req)
+
+ # If it's a wheel, it's a WheelDistribution
+ if install_req.is_wheel:
+ return WheelDistribution(install_req)
+
+ # Otherwise, a SourceDistribution
+ return SourceDistribution(install_req)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8997fcb1767494722f5410f9f570705fb62eafb0
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64eaddcc4f11da2053ffeb62d690a4ec0af359ba
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b5b059d4664e0c3d7562add720e29d5054fb82c
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e4d0c91a901c46ab20be813af083cd19809318a
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py
@@ -0,0 +1,53 @@
+import abc
+from typing import TYPE_CHECKING, Optional
+
+from pip._internal.metadata.base import BaseDistribution
+from pip._internal.req import InstallRequirement
+
+if TYPE_CHECKING:
+ from pip._internal.index.package_finder import PackageFinder
+
+
+class AbstractDistribution(metaclass=abc.ABCMeta):
+ """A base class for handling installable artifacts.
+
+ The requirements for anything installable are as follows:
+
+ - we must be able to determine the requirement name
+ (or we can't correctly handle the non-upgrade case).
+
+ - for packages with setup requirements, we must also be able
+ to determine their requirements without installing additional
+ packages (for the same reason as run-time dependencies)
+
+ - we must be able to create a Distribution object exposing the
+ above metadata.
+
+ - if we need to do work in the build tracker, we must be able to generate a unique
+ string to identify the requirement in the build tracker.
+ """
+
+ def __init__(self, req: InstallRequirement) -> None:
+ super().__init__()
+ self.req = req
+
+ @abc.abstractproperty
+ def build_tracker_id(self) -> Optional[str]:
+ """A string that uniquely identifies this requirement to the build tracker.
+
+ If None, then this dist has no work to do in the build tracker, and
+ ``.prepare_distribution_metadata()`` will not be called."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def get_metadata_distribution(self) -> BaseDistribution:
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def prepare_distribution_metadata(
+ self,
+ finder: "PackageFinder",
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ raise NotImplementedError()
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/installed.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/installed.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab8d53be7408626719c27aa29fdc2e143b7c380a
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/installed.py
@@ -0,0 +1,29 @@
+from typing import Optional
+
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import BaseDistribution
+
+
+class InstalledDistribution(AbstractDistribution):
+ """Represents an installed package.
+
+ This does not need any preparation as the required information has already
+ been computed.
+ """
+
+ @property
+ def build_tracker_id(self) -> Optional[str]:
+ return None
+
+ def get_metadata_distribution(self) -> BaseDistribution:
+ assert self.req.satisfied_by is not None, "not actually installed"
+ return self.req.satisfied_by
+
+ def prepare_distribution_metadata(
+ self,
+ finder: PackageFinder,
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ pass
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/sdist.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/sdist.py
new file mode 100644
index 0000000000000000000000000000000000000000..28ea5cea16cdf9b740809553cbf2d3bf8d626e1e
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/sdist.py
@@ -0,0 +1,158 @@
+import logging
+from typing import TYPE_CHECKING, Iterable, Optional, Set, Tuple
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.exceptions import InstallationError
+from pip._internal.metadata import BaseDistribution
+from pip._internal.utils.subprocess import runner_with_spinner_message
+
+if TYPE_CHECKING:
+ from pip._internal.index.package_finder import PackageFinder
+
+logger = logging.getLogger(__name__)
+
+
+class SourceDistribution(AbstractDistribution):
+ """Represents a source distribution.
+
+ The preparation step for these needs metadata for the packages to be
+ generated, either using PEP 517 or using the legacy `setup.py egg_info`.
+ """
+
+ @property
+ def build_tracker_id(self) -> Optional[str]:
+ """Identify this requirement uniquely by its link."""
+ assert self.req.link
+ return self.req.link.url_without_fragment
+
+ def get_metadata_distribution(self) -> BaseDistribution:
+ return self.req.get_dist()
+
+ def prepare_distribution_metadata(
+ self,
+ finder: "PackageFinder",
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ # Load pyproject.toml, to determine whether PEP 517 is to be used
+ self.req.load_pyproject_toml()
+
+ # Set up the build isolation, if this requirement should be isolated
+ should_isolate = self.req.use_pep517 and build_isolation
+ if should_isolate:
+ # Setup an isolated environment and install the build backend static
+ # requirements in it.
+ self._prepare_build_backend(finder)
+ # Check that if the requirement is editable, it either supports PEP 660 or
+ # has a setup.py or a setup.cfg. This cannot be done earlier because we need
+ # to setup the build backend to verify it supports build_editable, nor can
+ # it be done later, because we want to avoid installing build requirements
+ # needlessly. Doing it here also works around setuptools generating
+ # UNKNOWN.egg-info when running get_requires_for_build_wheel on a directory
+ # without setup.py nor setup.cfg.
+ self.req.isolated_editable_sanity_check()
+ # Install the dynamic build requirements.
+ self._install_build_reqs(finder)
+ # Check if the current environment provides build dependencies
+ should_check_deps = self.req.use_pep517 and check_build_deps
+ if should_check_deps:
+ pyproject_requires = self.req.pyproject_requires
+ assert pyproject_requires is not None
+ conflicting, missing = self.req.build_env.check_requirements(
+ pyproject_requires
+ )
+ if conflicting:
+ self._raise_conflicts("the backend dependencies", conflicting)
+ if missing:
+ self._raise_missing_reqs(missing)
+ self.req.prepare_metadata()
+
+ def _prepare_build_backend(self, finder: "PackageFinder") -> None:
+ # Isolate in a BuildEnvironment and install the build-time
+ # requirements.
+ pyproject_requires = self.req.pyproject_requires
+ assert pyproject_requires is not None
+
+ self.req.build_env = BuildEnvironment()
+ self.req.build_env.install_requirements(
+ finder, pyproject_requires, "overlay", kind="build dependencies"
+ )
+ conflicting, missing = self.req.build_env.check_requirements(
+ self.req.requirements_to_check
+ )
+ if conflicting:
+ self._raise_conflicts("PEP 517/518 supported requirements", conflicting)
+ if missing:
+ logger.warning(
+ "Missing build requirements in pyproject.toml for %s.",
+ self.req,
+ )
+ logger.warning(
+ "The project does not specify a build backend, and "
+ "pip cannot fall back to setuptools without %s.",
+ " and ".join(map(repr, sorted(missing))),
+ )
+
+ def _get_build_requires_wheel(self) -> Iterable[str]:
+ with self.req.build_env:
+ runner = runner_with_spinner_message("Getting requirements to build wheel")
+ backend = self.req.pep517_backend
+ assert backend is not None
+ with backend.subprocess_runner(runner):
+ return backend.get_requires_for_build_wheel()
+
+ def _get_build_requires_editable(self) -> Iterable[str]:
+ with self.req.build_env:
+ runner = runner_with_spinner_message(
+ "Getting requirements to build editable"
+ )
+ backend = self.req.pep517_backend
+ assert backend is not None
+ with backend.subprocess_runner(runner):
+ return backend.get_requires_for_build_editable()
+
+ def _install_build_reqs(self, finder: "PackageFinder") -> None:
+ # Install any extra build dependencies that the backend requests.
+ # This must be done in a second pass, as the pyproject.toml
+ # dependencies must be installed before we can call the backend.
+ if (
+ self.req.editable
+ and self.req.permit_editable_wheels
+ and self.req.supports_pyproject_editable
+ ):
+ build_reqs = self._get_build_requires_editable()
+ else:
+ build_reqs = self._get_build_requires_wheel()
+ conflicting, missing = self.req.build_env.check_requirements(build_reqs)
+ if conflicting:
+ self._raise_conflicts("the backend dependencies", conflicting)
+ self.req.build_env.install_requirements(
+ finder, missing, "normal", kind="backend dependencies"
+ )
+
+ def _raise_conflicts(
+ self, conflicting_with: str, conflicting_reqs: Set[Tuple[str, str]]
+ ) -> None:
+ format_string = (
+ "Some build dependencies for {requirement} "
+ "conflict with {conflicting_with}: {description}."
+ )
+ error_message = format_string.format(
+ requirement=self.req,
+ conflicting_with=conflicting_with,
+ description=", ".join(
+ f"{installed} is incompatible with {wanted}"
+ for installed, wanted in sorted(conflicting_reqs)
+ ),
+ )
+ raise InstallationError(error_message)
+
+ def _raise_missing_reqs(self, missing: Set[str]) -> None:
+ format_string = (
+ "Some build dependencies for {requirement} are missing: {missing}."
+ )
+ error_message = format_string.format(
+ requirement=self.req, missing=", ".join(map(repr, sorted(missing)))
+ )
+ raise InstallationError(error_message)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/wheel.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfadd39dcb77dfdaa2cca24e8a6db7e5beac181e
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/distributions/wheel.py
@@ -0,0 +1,42 @@
+from typing import TYPE_CHECKING, Optional
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.metadata import (
+ BaseDistribution,
+ FilesystemWheel,
+ get_wheel_distribution,
+)
+
+if TYPE_CHECKING:
+ from pip._internal.index.package_finder import PackageFinder
+
+
+class WheelDistribution(AbstractDistribution):
+ """Represents a wheel distribution.
+
+ This does not need any preparation as wheels can be directly unpacked.
+ """
+
+ @property
+ def build_tracker_id(self) -> Optional[str]:
+ return None
+
+ def get_metadata_distribution(self) -> BaseDistribution:
+ """Loads the metadata from the wheel file into memory and returns a
+ Distribution that uses it, not relying on the wheel file or
+ requirement.
+ """
+ assert self.req.local_file_path, "Set as part of preparation during download"
+ assert self.req.name, "Wheels are never unnamed"
+ wheel = FilesystemWheel(self.req.local_file_path)
+ return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
+
+ def prepare_distribution_metadata(
+ self,
+ finder: "PackageFinder",
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ pass
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/index/collector.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/index/collector.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f8fdee3d46271652d498cbfc865a25c50f2cab0
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/index/collector.py
@@ -0,0 +1,494 @@
+"""
+The main purpose of this module is to expose LinkCollector.collect_sources().
+"""
+
+import collections
+import email.message
+import functools
+import itertools
+import json
+import logging
+import os
+import urllib.parse
+import urllib.request
+from dataclasses import dataclass
+from html.parser import HTMLParser
+from optparse import Values
+from typing import (
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ MutableMapping,
+ NamedTuple,
+ Optional,
+ Protocol,
+ Sequence,
+ Tuple,
+ Union,
+)
+
+from pip._vendor import requests
+from pip._vendor.requests import Response
+from pip._vendor.requests.exceptions import RetryError, SSLError
+
+from pip._internal.exceptions import NetworkConnectionError
+from pip._internal.models.link import Link
+from pip._internal.models.search_scope import SearchScope
+from pip._internal.network.session import PipSession
+from pip._internal.network.utils import raise_for_status
+from pip._internal.utils.filetypes import is_archive_file
+from pip._internal.utils.misc import redact_auth_from_url
+from pip._internal.vcs import vcs
+
+from .sources import CandidatesFromPage, LinkSource, build_source
+
+logger = logging.getLogger(__name__)
+
+ResponseHeaders = MutableMapping[str, str]
+
+
+def _match_vcs_scheme(url: str) -> Optional[str]:
+ """Look for VCS schemes in the URL.
+
+ Returns the matched VCS scheme, or None if there's no match.
+ """
+ for scheme in vcs.schemes:
+ if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
+ return scheme
+ return None
+
+
+class _NotAPIContent(Exception):
+ def __init__(self, content_type: str, request_desc: str) -> None:
+ super().__init__(content_type, request_desc)
+ self.content_type = content_type
+ self.request_desc = request_desc
+
+
+def _ensure_api_header(response: Response) -> None:
+ """
+ Check the Content-Type header to ensure the response contains a Simple
+ API Response.
+
+ Raises `_NotAPIContent` if the content type is not a valid content-type.
+ """
+ content_type = response.headers.get("Content-Type", "Unknown")
+
+ content_type_l = content_type.lower()
+ if content_type_l.startswith(
+ (
+ "text/html",
+ "application/vnd.pypi.simple.v1+html",
+ "application/vnd.pypi.simple.v1+json",
+ )
+ ):
+ return
+
+ raise _NotAPIContent(content_type, response.request.method)
+
+
+class _NotHTTP(Exception):
+ pass
+
+
+def _ensure_api_response(url: str, session: PipSession) -> None:
+ """
+ Send a HEAD request to the URL, and ensure the response contains a simple
+ API Response.
+
+ Raises `_NotHTTP` if the URL is not available for a HEAD request, or
+ `_NotAPIContent` if the content type is not a valid content type.
+ """
+ scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
+ if scheme not in {"http", "https"}:
+ raise _NotHTTP()
+
+ resp = session.head(url, allow_redirects=True)
+ raise_for_status(resp)
+
+ _ensure_api_header(resp)
+
+
+def _get_simple_response(url: str, session: PipSession) -> Response:
+ """Access an Simple API response with GET, and return the response.
+
+ This consists of three parts:
+
+ 1. If the URL looks suspiciously like an archive, send a HEAD first to
+ check the Content-Type is HTML or Simple API, to avoid downloading a
+ large file. Raise `_NotHTTP` if the content type cannot be determined, or
+ `_NotAPIContent` if it is not HTML or a Simple API.
+ 2. Actually perform the request. Raise HTTP exceptions on network failures.
+ 3. Check the Content-Type header to make sure we got a Simple API response,
+ and raise `_NotAPIContent` otherwise.
+ """
+ if is_archive_file(Link(url).filename):
+ _ensure_api_response(url, session=session)
+
+ logger.debug("Getting page %s", redact_auth_from_url(url))
+
+ resp = session.get(
+ url,
+ headers={
+ "Accept": ", ".join(
+ [
+ "application/vnd.pypi.simple.v1+json",
+ "application/vnd.pypi.simple.v1+html; q=0.1",
+ "text/html; q=0.01",
+ ]
+ ),
+ # We don't want to blindly returned cached data for
+ # /simple/, because authors generally expecting that
+ # twine upload && pip install will function, but if
+ # they've done a pip install in the last ~10 minutes
+ # it won't. Thus by setting this to zero we will not
+ # blindly use any cached data, however the benefit of
+ # using max-age=0 instead of no-cache, is that we will
+ # still support conditional requests, so we will still
+ # minimize traffic sent in cases where the page hasn't
+ # changed at all, we will just always incur the round
+ # trip for the conditional GET now instead of only
+ # once per 10 minutes.
+ # For more information, please see pypa/pip#5670.
+ "Cache-Control": "max-age=0",
+ },
+ )
+ raise_for_status(resp)
+
+ # The check for archives above only works if the url ends with
+ # something that looks like an archive. However that is not a
+ # requirement of an url. Unless we issue a HEAD request on every
+ # url we cannot know ahead of time for sure if something is a
+ # Simple API response or not. However we can check after we've
+ # downloaded it.
+ _ensure_api_header(resp)
+
+ logger.debug(
+ "Fetched page %s as %s",
+ redact_auth_from_url(url),
+ resp.headers.get("Content-Type", "Unknown"),
+ )
+
+ return resp
+
+
+def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
+ """Determine if we have any encoding information in our headers."""
+ if headers and "Content-Type" in headers:
+ m = email.message.Message()
+ m["content-type"] = headers["Content-Type"]
+ charset = m.get_param("charset")
+ if charset:
+ return str(charset)
+ return None
+
+
+class CacheablePageContent:
+ def __init__(self, page: "IndexContent") -> None:
+ assert page.cache_link_parsing
+ self.page = page
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, type(self)) and self.page.url == other.page.url
+
+ def __hash__(self) -> int:
+ return hash(self.page.url)
+
+
+class ParseLinks(Protocol):
+ def __call__(self, page: "IndexContent") -> Iterable[Link]: ...
+
+
+def with_cached_index_content(fn: ParseLinks) -> ParseLinks:
+ """
+ Given a function that parses an Iterable[Link] from an IndexContent, cache the
+ function's result (keyed by CacheablePageContent), unless the IndexContent
+ `page` has `page.cache_link_parsing == False`.
+ """
+
+ @functools.lru_cache(maxsize=None)
+ def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
+ return list(fn(cacheable_page.page))
+
+ @functools.wraps(fn)
+ def wrapper_wrapper(page: "IndexContent") -> List[Link]:
+ if page.cache_link_parsing:
+ return wrapper(CacheablePageContent(page))
+ return list(fn(page))
+
+ return wrapper_wrapper
+
+
+@with_cached_index_content
+def parse_links(page: "IndexContent") -> Iterable[Link]:
+ """
+ Parse a Simple API's Index Content, and yield its anchor elements as Link objects.
+ """
+
+ content_type_l = page.content_type.lower()
+ if content_type_l.startswith("application/vnd.pypi.simple.v1+json"):
+ data = json.loads(page.content)
+ for file in data.get("files", []):
+ link = Link.from_json(file, page.url)
+ if link is None:
+ continue
+ yield link
+ return
+
+ parser = HTMLLinkParser(page.url)
+ encoding = page.encoding or "utf-8"
+ parser.feed(page.content.decode(encoding))
+
+ url = page.url
+ base_url = parser.base_url or url
+ for anchor in parser.anchors:
+ link = Link.from_element(anchor, page_url=url, base_url=base_url)
+ if link is None:
+ continue
+ yield link
+
+
+@dataclass(frozen=True)
+class IndexContent:
+ """Represents one response (or page), along with its URL.
+
+ :param encoding: the encoding to decode the given content.
+ :param url: the URL from which the HTML was downloaded.
+ :param cache_link_parsing: whether links parsed from this page's url
+ should be cached. PyPI index urls should
+ have this set to False, for example.
+ """
+
+ content: bytes
+ content_type: str
+ encoding: Optional[str]
+ url: str
+ cache_link_parsing: bool = True
+
+ def __str__(self) -> str:
+ return redact_auth_from_url(self.url)
+
+
+class HTMLLinkParser(HTMLParser):
+ """
+ HTMLParser that keeps the first base HREF and a list of all anchor
+ elements' attributes.
+ """
+
+ def __init__(self, url: str) -> None:
+ super().__init__(convert_charrefs=True)
+
+ self.url: str = url
+ self.base_url: Optional[str] = None
+ self.anchors: List[Dict[str, Optional[str]]] = []
+
+ def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
+ if tag == "base" and self.base_url is None:
+ href = self.get_href(attrs)
+ if href is not None:
+ self.base_url = href
+ elif tag == "a":
+ self.anchors.append(dict(attrs))
+
+ def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
+ for name, value in attrs:
+ if name == "href":
+ return value
+ return None
+
+
+def _handle_get_simple_fail(
+ link: Link,
+ reason: Union[str, Exception],
+ meth: Optional[Callable[..., None]] = None,
+) -> None:
+ if meth is None:
+ meth = logger.debug
+ meth("Could not fetch URL %s: %s - skipping", link, reason)
+
+
+def _make_index_content(
+ response: Response, cache_link_parsing: bool = True
+) -> IndexContent:
+ encoding = _get_encoding_from_headers(response.headers)
+ return IndexContent(
+ response.content,
+ response.headers["Content-Type"],
+ encoding=encoding,
+ url=response.url,
+ cache_link_parsing=cache_link_parsing,
+ )
+
+
+def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
+ url = link.url.split("#", 1)[0]
+
+ # Check for VCS schemes that do not support lookup as web pages.
+ vcs_scheme = _match_vcs_scheme(url)
+ if vcs_scheme:
+ logger.warning(
+ "Cannot look at %s URL %s because it does not support lookup as web pages.",
+ vcs_scheme,
+ link,
+ )
+ return None
+
+ # Tack index.html onto file:// URLs that point to directories
+ scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
+ if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
+ # add trailing slash if not present so urljoin doesn't trim
+ # final segment
+ if not url.endswith("/"):
+ url += "/"
+ # TODO: In the future, it would be nice if pip supported PEP 691
+ # style responses in the file:// URLs, however there's no
+ # standard file extension for application/vnd.pypi.simple.v1+json
+ # so we'll need to come up with something on our own.
+ url = urllib.parse.urljoin(url, "index.html")
+ logger.debug(" file: URL is directory, getting %s", url)
+
+ try:
+ resp = _get_simple_response(url, session=session)
+ except _NotHTTP:
+ logger.warning(
+ "Skipping page %s because it looks like an archive, and cannot "
+ "be checked by a HTTP HEAD request.",
+ link,
+ )
+ except _NotAPIContent as exc:
+ logger.warning(
+ "Skipping page %s because the %s request got Content-Type: %s. "
+ "The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
+ "application/vnd.pypi.simple.v1+html, and text/html",
+ link,
+ exc.request_desc,
+ exc.content_type,
+ )
+ except NetworkConnectionError as exc:
+ _handle_get_simple_fail(link, exc)
+ except RetryError as exc:
+ _handle_get_simple_fail(link, exc)
+ except SSLError as exc:
+ reason = "There was a problem confirming the ssl certificate: "
+ reason += str(exc)
+ _handle_get_simple_fail(link, reason, meth=logger.info)
+ except requests.ConnectionError as exc:
+ _handle_get_simple_fail(link, f"connection error: {exc}")
+ except requests.Timeout:
+ _handle_get_simple_fail(link, "timed out")
+ else:
+ return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
+ return None
+
+
+class CollectedSources(NamedTuple):
+ find_links: Sequence[Optional[LinkSource]]
+ index_urls: Sequence[Optional[LinkSource]]
+
+
+class LinkCollector:
+ """
+ Responsible for collecting Link objects from all configured locations,
+ making network requests as needed.
+
+ The class's main method is its collect_sources() method.
+ """
+
+ def __init__(
+ self,
+ session: PipSession,
+ search_scope: SearchScope,
+ ) -> None:
+ self.search_scope = search_scope
+ self.session = session
+
+ @classmethod
+ def create(
+ cls,
+ session: PipSession,
+ options: Values,
+ suppress_no_index: bool = False,
+ ) -> "LinkCollector":
+ """
+ :param session: The Session to use to make requests.
+ :param suppress_no_index: Whether to ignore the --no-index option
+ when constructing the SearchScope object.
+ """
+ index_urls = [options.index_url] + options.extra_index_urls
+ if options.no_index and not suppress_no_index:
+ logger.debug(
+ "Ignoring indexes: %s",
+ ",".join(redact_auth_from_url(url) for url in index_urls),
+ )
+ index_urls = []
+
+ # Make sure find_links is a list before passing to create().
+ find_links = options.find_links or []
+
+ search_scope = SearchScope.create(
+ find_links=find_links,
+ index_urls=index_urls,
+ no_index=options.no_index,
+ )
+ link_collector = LinkCollector(
+ session=session,
+ search_scope=search_scope,
+ )
+ return link_collector
+
+ @property
+ def find_links(self) -> List[str]:
+ return self.search_scope.find_links
+
+ def fetch_response(self, location: Link) -> Optional[IndexContent]:
+ """
+ Fetch an HTML page containing package links.
+ """
+ return _get_index_content(location, session=self.session)
+
+ def collect_sources(
+ self,
+ project_name: str,
+ candidates_from_page: CandidatesFromPage,
+ ) -> CollectedSources:
+ # The OrderedDict calls deduplicate sources by URL.
+ index_url_sources = collections.OrderedDict(
+ build_source(
+ loc,
+ candidates_from_page=candidates_from_page,
+ page_validator=self.session.is_secure_origin,
+ expand_dir=False,
+ cache_link_parsing=False,
+ project_name=project_name,
+ )
+ for loc in self.search_scope.get_index_urls_locations(project_name)
+ ).values()
+ find_links_sources = collections.OrderedDict(
+ build_source(
+ loc,
+ candidates_from_page=candidates_from_page,
+ page_validator=self.session.is_secure_origin,
+ expand_dir=True,
+ cache_link_parsing=True,
+ project_name=project_name,
+ )
+ for loc in self.find_links
+ ).values()
+
+ if logger.isEnabledFor(logging.DEBUG):
+ lines = [
+ f"* {s.link}"
+ for s in itertools.chain(find_links_sources, index_url_sources)
+ if s is not None and s.link is not None
+ ]
+ lines = [
+ f"{len(lines)} location(s) to search "
+ f"for versions of {project_name}:"
+ ] + lines
+ logger.debug("\n".join(lines))
+
+ return CollectedSources(
+ find_links=list(find_links_sources),
+ index_urls=list(index_url_sources),
+ )
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a8927909be3769cac53b1a8fb8a507b5b0c4f65
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d7b63e2dd06bd6ab5d0cb06d102d24b548b15937
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40a5160e9c7d66f194076a8bed4c4b583c55adc9
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48a1a874bfa23da5ff6efde58d9287a77261738c
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..98627468e43519ee44fc81e01ee095fc26a496ab
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47cfea8f886dde92b3a3b37c659b7fb1681b5ac1
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/build_tracker.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/build_tracker.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ed8dd235960bf406e26d5c0069b12c7f2d463e9
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/build_tracker.py
@@ -0,0 +1,138 @@
+import contextlib
+import hashlib
+import logging
+import os
+from types import TracebackType
+from typing import Dict, Generator, Optional, Type, Union
+
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.temp_dir import TempDirectory
+
+logger = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def update_env_context_manager(**changes: str) -> Generator[None, None, None]:
+ target = os.environ
+
+ # Save values from the target and change them.
+ non_existent_marker = object()
+ saved_values: Dict[str, Union[object, str]] = {}
+ for name, new_value in changes.items():
+ try:
+ saved_values[name] = target[name]
+ except KeyError:
+ saved_values[name] = non_existent_marker
+ target[name] = new_value
+
+ try:
+ yield
+ finally:
+ # Restore original values in the target.
+ for name, original_value in saved_values.items():
+ if original_value is non_existent_marker:
+ del target[name]
+ else:
+ assert isinstance(original_value, str) # for mypy
+ target[name] = original_value
+
+
+@contextlib.contextmanager
+def get_build_tracker() -> Generator["BuildTracker", None, None]:
+ root = os.environ.get("PIP_BUILD_TRACKER")
+ with contextlib.ExitStack() as ctx:
+ if root is None:
+ root = ctx.enter_context(TempDirectory(kind="build-tracker")).path
+ ctx.enter_context(update_env_context_manager(PIP_BUILD_TRACKER=root))
+ logger.debug("Initialized build tracking at %s", root)
+
+ with BuildTracker(root) as tracker:
+ yield tracker
+
+
+class TrackerId(str):
+ """Uniquely identifying string provided to the build tracker."""
+
+
+class BuildTracker:
+ """Ensure that an sdist cannot request itself as a setup requirement.
+
+ When an sdist is prepared, it identifies its setup requirements in the
+ context of ``BuildTracker.track()``. If a requirement shows up recursively, this
+ raises an exception.
+
+ This stops fork bombs embedded in malicious packages."""
+
+ def __init__(self, root: str) -> None:
+ self._root = root
+ self._entries: Dict[TrackerId, InstallRequirement] = {}
+ logger.debug("Created build tracker: %s", self._root)
+
+ def __enter__(self) -> "BuildTracker":
+ logger.debug("Entered build tracker: %s", self._root)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.cleanup()
+
+ def _entry_path(self, key: TrackerId) -> str:
+ hashed = hashlib.sha224(key.encode()).hexdigest()
+ return os.path.join(self._root, hashed)
+
+ def add(self, req: InstallRequirement, key: TrackerId) -> None:
+ """Add an InstallRequirement to build tracking."""
+
+ # Get the file to write information about this requirement.
+ entry_path = self._entry_path(key)
+
+ # Try reading from the file. If it exists and can be read from, a build
+ # is already in progress, so a LookupError is raised.
+ try:
+ with open(entry_path) as fp:
+ contents = fp.read()
+ except FileNotFoundError:
+ pass
+ else:
+ message = f"{req.link} is already being built: {contents}"
+ raise LookupError(message)
+
+ # If we're here, req should really not be building already.
+ assert key not in self._entries
+
+ # Start tracking this requirement.
+ with open(entry_path, "w", encoding="utf-8") as fp:
+ fp.write(str(req))
+ self._entries[key] = req
+
+ logger.debug("Added %s to build tracker %r", req, self._root)
+
+ def remove(self, req: InstallRequirement, key: TrackerId) -> None:
+ """Remove an InstallRequirement from build tracking."""
+
+ # Delete the created file and the corresponding entry.
+ os.unlink(self._entry_path(key))
+ del self._entries[key]
+
+ logger.debug("Removed %s from build tracker %r", req, self._root)
+
+ def cleanup(self) -> None:
+ for key, req in list(self._entries.items()):
+ self.remove(req, key)
+
+ logger.debug("Removed build tracker: %r", self._root)
+
+ @contextlib.contextmanager
+ def track(self, req: InstallRequirement, key: str) -> Generator[None, None, None]:
+ """Ensure that `key` cannot install itself as a setup requirement.
+
+ :raises LookupError: If `key` was already provided in a parent invocation of
+ the context introduced by this method."""
+ tracker_id = TrackerId(key)
+ self.add(req, tracker_id)
+ yield
+ self.remove(req, tracker_id)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..064811ad11bb07b2b7bc8e30ec6c03f21997d6b2
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py
@@ -0,0 +1,37 @@
+import logging
+import os
+from typing import Optional
+
+from pip._vendor.pyproject_hooks import BuildBackendHookCaller
+
+from pip._internal.utils.subprocess import runner_with_spinner_message
+
+logger = logging.getLogger(__name__)
+
+
+def build_wheel_pep517(
+ name: str,
+ backend: BuildBackendHookCaller,
+ metadata_directory: str,
+ tempd: str,
+) -> Optional[str]:
+ """Build one InstallRequirement using the PEP 517 build process.
+
+ Returns path to wheel if successfully built. Otherwise, returns None.
+ """
+ assert metadata_directory is not None
+ try:
+ logger.debug("Destination directory: %s", tempd)
+
+ runner = runner_with_spinner_message(
+ f"Building wheel for {name} (pyproject.toml)"
+ )
+ with backend.subprocess_runner(runner):
+ wheel_name = backend.build_wheel(
+ tempd,
+ metadata_directory=metadata_directory,
+ )
+ except Exception:
+ logger.error("Failed building wheel for %s", name)
+ return None
+ return os.path.join(tempd, wheel_name)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/constructors.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/constructors.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9d2c19dc5faba1fc318e541b41f907fb9472f97
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/constructors.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_file.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_file.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68fee0858ba6aaa3ac1d8dcaefceb78a09941af3
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_file.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_install.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_install.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..362766265e705493bf03764228a24c352f77f0a4
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_install.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_set.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_set.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66c3b695ce6c734d7d23afb7b6f21794ffb0772d
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_set.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3de321d076803c46c14a3675cd4acad92d98e1d
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py
new file mode 100644
index 0000000000000000000000000000000000000000..56a964f3177dc47d747ccf55361fccb8aea70ebf
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py
@@ -0,0 +1,560 @@
+"""Backing implementation for InstallRequirement's various constructors
+
+The idea here is that these formed a major chunk of InstallRequirement's size
+so, moving them and support code dedicated to them outside of that class
+helps creates for better understandability for the rest of the code.
+
+These are meant to be used elsewhere within pip to create instances of
+InstallRequirement.
+"""
+
+import copy
+import logging
+import os
+import re
+from dataclasses import dataclass
+from typing import Collection, Dict, List, Optional, Set, Tuple, Union
+
+from pip._vendor.packaging.markers import Marker
+from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
+from pip._vendor.packaging.specifiers import Specifier
+
+from pip._internal.exceptions import InstallationError
+from pip._internal.models.index import PyPI, TestPyPI
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.req.req_file import ParsedRequirement
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.filetypes import is_archive_file
+from pip._internal.utils.misc import is_installable_dir
+from pip._internal.utils.packaging import get_requirement
+from pip._internal.utils.urls import path_to_url
+from pip._internal.vcs import is_url, vcs
+
+__all__ = [
+ "install_req_from_editable",
+ "install_req_from_line",
+ "parse_editable",
+]
+
+logger = logging.getLogger(__name__)
+operators = Specifier._operators.keys()
+
+
+def _strip_extras(path: str) -> Tuple[str, Optional[str]]:
+ m = re.match(r"^(.+)(\[[^\]]+\])$", path)
+ extras = None
+ if m:
+ path_no_extras = m.group(1)
+ extras = m.group(2)
+ else:
+ path_no_extras = path
+
+ return path_no_extras, extras
+
+
+def convert_extras(extras: Optional[str]) -> Set[str]:
+ if not extras:
+ return set()
+ return get_requirement("placeholder" + extras.lower()).extras
+
+
+def _set_requirement_extras(req: Requirement, new_extras: Set[str]) -> Requirement:
+ """
+ Returns a new requirement based on the given one, with the supplied extras. If the
+ given requirement already has extras those are replaced (or dropped if no new extras
+ are given).
+ """
+ match: Optional[re.Match[str]] = re.fullmatch(
+ # see https://peps.python.org/pep-0508/#complete-grammar
+ r"([\w\t .-]+)(\[[^\]]*\])?(.*)",
+ str(req),
+ flags=re.ASCII,
+ )
+ # ireq.req is a valid requirement so the regex should always match
+ assert (
+ match is not None
+ ), f"regex match on requirement {req} failed, this should never happen"
+ pre: Optional[str] = match.group(1)
+ post: Optional[str] = match.group(3)
+ assert (
+ pre is not None and post is not None
+ ), f"regex group selection for requirement {req} failed, this should never happen"
+ extras: str = "[{}]".format(",".join(sorted(new_extras)) if new_extras else "")
+ return get_requirement(f"{pre}{extras}{post}")
+
+
+def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]:
+ """Parses an editable requirement into:
+ - a requirement name
+ - an URL
+ - extras
+ - editable options
+ Accepted requirements:
+ svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
+ .[some_extra]
+ """
+
+ url = editable_req
+
+ # If a file path is specified with extras, strip off the extras.
+ url_no_extras, extras = _strip_extras(url)
+
+ if os.path.isdir(url_no_extras):
+ # Treating it as code that has already been checked out
+ url_no_extras = path_to_url(url_no_extras)
+
+ if url_no_extras.lower().startswith("file:"):
+ package_name = Link(url_no_extras).egg_fragment
+ if extras:
+ return (
+ package_name,
+ url_no_extras,
+ get_requirement("placeholder" + extras.lower()).extras,
+ )
+ else:
+ return package_name, url_no_extras, set()
+
+ for version_control in vcs:
+ if url.lower().startswith(f"{version_control}:"):
+ url = f"{version_control}+{url}"
+ break
+
+ link = Link(url)
+
+ if not link.is_vcs:
+ backends = ", ".join(vcs.all_schemes)
+ raise InstallationError(
+ f"{editable_req} is not a valid editable requirement. "
+ f"It should either be a path to a local project or a VCS URL "
+ f"(beginning with {backends})."
+ )
+
+ package_name = link.egg_fragment
+ if not package_name:
+ raise InstallationError(
+ f"Could not detect requirement name for '{editable_req}', "
+ "please specify one with #egg=your_package_name"
+ )
+ return package_name, url, set()
+
+
+def check_first_requirement_in_file(filename: str) -> None:
+ """Check if file is parsable as a requirements file.
+
+ This is heavily based on ``pkg_resources.parse_requirements``, but
+ simplified to just check the first meaningful line.
+
+ :raises InvalidRequirement: If the first meaningful line cannot be parsed
+ as an requirement.
+ """
+ with open(filename, encoding="utf-8", errors="ignore") as f:
+ # Create a steppable iterator, so we can handle \-continuations.
+ lines = (
+ line
+ for line in (line.strip() for line in f)
+ if line and not line.startswith("#") # Skip blank lines/comments.
+ )
+
+ for line in lines:
+ # Drop comments -- a hash without a space may be in a URL.
+ if " #" in line:
+ line = line[: line.find(" #")]
+ # If there is a line continuation, drop it, and append the next line.
+ if line.endswith("\\"):
+ line = line[:-2].strip() + next(lines, "")
+ get_requirement(line)
+ return
+
+
+def deduce_helpful_msg(req: str) -> str:
+ """Returns helpful msg in case requirements file does not exist,
+ or cannot be parsed.
+
+ :params req: Requirements file path
+ """
+ if not os.path.exists(req):
+ return f" File '{req}' does not exist."
+ msg = " The path does exist. "
+ # Try to parse and check if it is a requirements file.
+ try:
+ check_first_requirement_in_file(req)
+ except InvalidRequirement:
+ logger.debug("Cannot parse '%s' as requirements file", req)
+ else:
+ msg += (
+ f"The argument you provided "
+ f"({req}) appears to be a"
+ f" requirements file. If that is the"
+ f" case, use the '-r' flag to install"
+ f" the packages specified within it."
+ )
+ return msg
+
+
+@dataclass(frozen=True)
+class RequirementParts:
+ requirement: Optional[Requirement]
+ link: Optional[Link]
+ markers: Optional[Marker]
+ extras: Set[str]
+
+
+def parse_req_from_editable(editable_req: str) -> RequirementParts:
+ name, url, extras_override = parse_editable(editable_req)
+
+ if name is not None:
+ try:
+ req: Optional[Requirement] = get_requirement(name)
+ except InvalidRequirement as exc:
+ raise InstallationError(f"Invalid requirement: {name!r}: {exc}")
+ else:
+ req = None
+
+ link = Link(url)
+
+ return RequirementParts(req, link, None, extras_override)
+
+
+# ---- The actual constructors follow ----
+
+
+def install_req_from_editable(
+ editable_req: str,
+ comes_from: Optional[Union[InstallRequirement, str]] = None,
+ *,
+ use_pep517: Optional[bool] = None,
+ isolated: bool = False,
+ global_options: Optional[List[str]] = None,
+ hash_options: Optional[Dict[str, List[str]]] = None,
+ constraint: bool = False,
+ user_supplied: bool = False,
+ permit_editable_wheels: bool = False,
+ config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
+) -> InstallRequirement:
+ parts = parse_req_from_editable(editable_req)
+
+ return InstallRequirement(
+ parts.requirement,
+ comes_from=comes_from,
+ user_supplied=user_supplied,
+ editable=True,
+ permit_editable_wheels=permit_editable_wheels,
+ link=parts.link,
+ constraint=constraint,
+ use_pep517=use_pep517,
+ isolated=isolated,
+ global_options=global_options,
+ hash_options=hash_options,
+ config_settings=config_settings,
+ extras=parts.extras,
+ )
+
+
+def _looks_like_path(name: str) -> bool:
+ """Checks whether the string "looks like" a path on the filesystem.
+
+ This does not check whether the target actually exists, only judge from the
+ appearance.
+
+ Returns true if any of the following conditions is true:
+ * a path separator is found (either os.path.sep or os.path.altsep);
+ * a dot is found (which represents the current directory).
+ """
+ if os.path.sep in name:
+ return True
+ if os.path.altsep is not None and os.path.altsep in name:
+ return True
+ if name.startswith("."):
+ return True
+ return False
+
+
+def _get_url_from_path(path: str, name: str) -> Optional[str]:
+ """
+ First, it checks whether a provided path is an installable directory. If it
+ is, returns the path.
+
+ If false, check if the path is an archive file (such as a .whl).
+ The function checks if the path is a file. If false, if the path has
+ an @, it will treat it as a PEP 440 URL requirement and return the path.
+ """
+ if _looks_like_path(name) and os.path.isdir(path):
+ if is_installable_dir(path):
+ return path_to_url(path)
+ # TODO: The is_installable_dir test here might not be necessary
+ # now that it is done in load_pyproject_toml too.
+ raise InstallationError(
+ f"Directory {name!r} is not installable. Neither 'setup.py' "
+ "nor 'pyproject.toml' found."
+ )
+ if not is_archive_file(path):
+ return None
+ if os.path.isfile(path):
+ return path_to_url(path)
+ urlreq_parts = name.split("@", 1)
+ if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
+ # If the path contains '@' and the part before it does not look
+ # like a path, try to treat it as a PEP 440 URL req instead.
+ return None
+ logger.warning(
+ "Requirement %r looks like a filename, but the file does not exist",
+ name,
+ )
+ return path_to_url(path)
+
+
+def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts:
+ if is_url(name):
+ marker_sep = "; "
+ else:
+ marker_sep = ";"
+ if marker_sep in name:
+ name, markers_as_string = name.split(marker_sep, 1)
+ markers_as_string = markers_as_string.strip()
+ if not markers_as_string:
+ markers = None
+ else:
+ markers = Marker(markers_as_string)
+ else:
+ markers = None
+ name = name.strip()
+ req_as_string = None
+ path = os.path.normpath(os.path.abspath(name))
+ link = None
+ extras_as_string = None
+
+ if is_url(name):
+ link = Link(name)
+ else:
+ p, extras_as_string = _strip_extras(path)
+ url = _get_url_from_path(p, name)
+ if url is not None:
+ link = Link(url)
+
+ # it's a local file, dir, or url
+ if link:
+ # Handle relative file URLs
+ if link.scheme == "file" and re.search(r"\.\./", link.url):
+ link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path))))
+ # wheel file
+ if link.is_wheel:
+ wheel = Wheel(link.filename) # can raise InvalidWheelFilename
+ req_as_string = f"{wheel.name}=={wheel.version}"
+ else:
+ # set the req to the egg fragment. when it's not there, this
+ # will become an 'unnamed' requirement
+ req_as_string = link.egg_fragment
+
+ # a requirement specifier
+ else:
+ req_as_string = name
+
+ extras = convert_extras(extras_as_string)
+
+ def with_source(text: str) -> str:
+ if not line_source:
+ return text
+ return f"{text} (from {line_source})"
+
+ def _parse_req_string(req_as_string: str) -> Requirement:
+ try:
+ return get_requirement(req_as_string)
+ except InvalidRequirement as exc:
+ if os.path.sep in req_as_string:
+ add_msg = "It looks like a path."
+ add_msg += deduce_helpful_msg(req_as_string)
+ elif "=" in req_as_string and not any(
+ op in req_as_string for op in operators
+ ):
+ add_msg = "= is not a valid operator. Did you mean == ?"
+ else:
+ add_msg = ""
+ msg = with_source(f"Invalid requirement: {req_as_string!r}: {exc}")
+ if add_msg:
+ msg += f"\nHint: {add_msg}"
+ raise InstallationError(msg)
+
+ if req_as_string is not None:
+ req: Optional[Requirement] = _parse_req_string(req_as_string)
+ else:
+ req = None
+
+ return RequirementParts(req, link, markers, extras)
+
+
+def install_req_from_line(
+ name: str,
+ comes_from: Optional[Union[str, InstallRequirement]] = None,
+ *,
+ use_pep517: Optional[bool] = None,
+ isolated: bool = False,
+ global_options: Optional[List[str]] = None,
+ hash_options: Optional[Dict[str, List[str]]] = None,
+ constraint: bool = False,
+ line_source: Optional[str] = None,
+ user_supplied: bool = False,
+ config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
+) -> InstallRequirement:
+ """Creates an InstallRequirement from a name, which might be a
+ requirement, directory containing 'setup.py', filename, or URL.
+
+ :param line_source: An optional string describing where the line is from,
+ for logging purposes in case of an error.
+ """
+ parts = parse_req_from_line(name, line_source)
+
+ return InstallRequirement(
+ parts.requirement,
+ comes_from,
+ link=parts.link,
+ markers=parts.markers,
+ use_pep517=use_pep517,
+ isolated=isolated,
+ global_options=global_options,
+ hash_options=hash_options,
+ config_settings=config_settings,
+ constraint=constraint,
+ extras=parts.extras,
+ user_supplied=user_supplied,
+ )
+
+
+def install_req_from_req_string(
+ req_string: str,
+ comes_from: Optional[InstallRequirement] = None,
+ isolated: bool = False,
+ use_pep517: Optional[bool] = None,
+ user_supplied: bool = False,
+) -> InstallRequirement:
+ try:
+ req = get_requirement(req_string)
+ except InvalidRequirement as exc:
+ raise InstallationError(f"Invalid requirement: {req_string!r}: {exc}")
+
+ domains_not_allowed = [
+ PyPI.file_storage_domain,
+ TestPyPI.file_storage_domain,
+ ]
+ if (
+ req.url
+ and comes_from
+ and comes_from.link
+ and comes_from.link.netloc in domains_not_allowed
+ ):
+ # Explicitly disallow pypi packages that depend on external urls
+ raise InstallationError(
+ "Packages installed from PyPI cannot depend on packages "
+ "which are not also hosted on PyPI.\n"
+ f"{comes_from.name} depends on {req} "
+ )
+
+ return InstallRequirement(
+ req,
+ comes_from,
+ isolated=isolated,
+ use_pep517=use_pep517,
+ user_supplied=user_supplied,
+ )
+
+
+def install_req_from_parsed_requirement(
+ parsed_req: ParsedRequirement,
+ isolated: bool = False,
+ use_pep517: Optional[bool] = None,
+ user_supplied: bool = False,
+ config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
+) -> InstallRequirement:
+ if parsed_req.is_editable:
+ req = install_req_from_editable(
+ parsed_req.requirement,
+ comes_from=parsed_req.comes_from,
+ use_pep517=use_pep517,
+ constraint=parsed_req.constraint,
+ isolated=isolated,
+ user_supplied=user_supplied,
+ config_settings=config_settings,
+ )
+
+ else:
+ req = install_req_from_line(
+ parsed_req.requirement,
+ comes_from=parsed_req.comes_from,
+ use_pep517=use_pep517,
+ isolated=isolated,
+ global_options=(
+ parsed_req.options.get("global_options", [])
+ if parsed_req.options
+ else []
+ ),
+ hash_options=(
+ parsed_req.options.get("hashes", {}) if parsed_req.options else {}
+ ),
+ constraint=parsed_req.constraint,
+ line_source=parsed_req.line_source,
+ user_supplied=user_supplied,
+ config_settings=config_settings,
+ )
+ return req
+
+
+def install_req_from_link_and_ireq(
+ link: Link, ireq: InstallRequirement
+) -> InstallRequirement:
+ return InstallRequirement(
+ req=ireq.req,
+ comes_from=ireq.comes_from,
+ editable=ireq.editable,
+ link=link,
+ markers=ireq.markers,
+ use_pep517=ireq.use_pep517,
+ isolated=ireq.isolated,
+ global_options=ireq.global_options,
+ hash_options=ireq.hash_options,
+ config_settings=ireq.config_settings,
+ user_supplied=ireq.user_supplied,
+ )
+
+
+def install_req_drop_extras(ireq: InstallRequirement) -> InstallRequirement:
+ """
+ Creates a new InstallationRequirement using the given template but without
+ any extras. Sets the original requirement as the new one's parent
+ (comes_from).
+ """
+ return InstallRequirement(
+ req=(
+ _set_requirement_extras(ireq.req, set()) if ireq.req is not None else None
+ ),
+ comes_from=ireq,
+ editable=ireq.editable,
+ link=ireq.link,
+ markers=ireq.markers,
+ use_pep517=ireq.use_pep517,
+ isolated=ireq.isolated,
+ global_options=ireq.global_options,
+ hash_options=ireq.hash_options,
+ constraint=ireq.constraint,
+ extras=[],
+ config_settings=ireq.config_settings,
+ user_supplied=ireq.user_supplied,
+ permit_editable_wheels=ireq.permit_editable_wheels,
+ )
+
+
+def install_req_extend_extras(
+ ireq: InstallRequirement,
+ extras: Collection[str],
+) -> InstallRequirement:
+ """
+ Returns a copy of an installation requirement with some additional extras.
+ Makes a shallow copy of the ireq object.
+ """
+ result = copy.copy(ireq)
+ result.extras = {*ireq.extras, *extras}
+ result.req = (
+ _set_requirement_extras(ireq.req, result.extras)
+ if ireq.req is not None
+ else None
+ )
+ return result
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/req_set.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/req_set.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec7a6e07a25acfa978030c65ae7c1d8609163249
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/req_set.py
@@ -0,0 +1,82 @@
+import logging
+from collections import OrderedDict
+from typing import Dict, List
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.req.req_install import InstallRequirement
+
+logger = logging.getLogger(__name__)
+
+
+class RequirementSet:
+ def __init__(self, check_supported_wheels: bool = True) -> None:
+ """Create a RequirementSet."""
+
+ self.requirements: Dict[str, InstallRequirement] = OrderedDict()
+ self.check_supported_wheels = check_supported_wheels
+
+ self.unnamed_requirements: List[InstallRequirement] = []
+
+ def __str__(self) -> str:
+ requirements = sorted(
+ (req for req in self.requirements.values() if not req.comes_from),
+ key=lambda req: canonicalize_name(req.name or ""),
+ )
+ return " ".join(str(req.req) for req in requirements)
+
+ def __repr__(self) -> str:
+ requirements = sorted(
+ self.requirements.values(),
+ key=lambda req: canonicalize_name(req.name or ""),
+ )
+
+ format_string = "<{classname} object; {count} requirement(s): {reqs}>"
+ return format_string.format(
+ classname=self.__class__.__name__,
+ count=len(requirements),
+ reqs=", ".join(str(req.req) for req in requirements),
+ )
+
+ def add_unnamed_requirement(self, install_req: InstallRequirement) -> None:
+ assert not install_req.name
+ self.unnamed_requirements.append(install_req)
+
+ def add_named_requirement(self, install_req: InstallRequirement) -> None:
+ assert install_req.name
+
+ project_name = canonicalize_name(install_req.name)
+ self.requirements[project_name] = install_req
+
+ def has_requirement(self, name: str) -> bool:
+ project_name = canonicalize_name(name)
+
+ return (
+ project_name in self.requirements
+ and not self.requirements[project_name].constraint
+ )
+
+ def get_requirement(self, name: str) -> InstallRequirement:
+ project_name = canonicalize_name(name)
+
+ if project_name in self.requirements:
+ return self.requirements[project_name]
+
+ raise KeyError(f"No project with the name {name!r}")
+
+ @property
+ def all_requirements(self) -> List[InstallRequirement]:
+ return self.unnamed_requirements + list(self.requirements.values())
+
+ @property
+ def requirements_to_install(self) -> List[InstallRequirement]:
+ """Return the list of requirements that need to be installed.
+
+ TODO remove this property together with the legacy resolver, since the new
+ resolver only returns requirements that need to be installed.
+ """
+ return [
+ install_req
+ for install_req in self.all_requirements
+ if not install_req.constraint and not install_req.satisfied_by
+ ]
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py
new file mode 100644
index 0000000000000000000000000000000000000000..26df20844b379bd7c06f3db7d75cddad5755d22e
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py
@@ -0,0 +1,633 @@
+import functools
+import os
+import sys
+import sysconfig
+from importlib.util import cache_from_source
+from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple
+
+from pip._internal.exceptions import LegacyDistutilsInstall, UninstallMissingRecord
+from pip._internal.locations import get_bin_prefix, get_bin_user
+from pip._internal.metadata import BaseDistribution
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.egg_link import egg_link_path_from_location
+from pip._internal.utils.logging import getLogger, indent_log
+from pip._internal.utils.misc import ask, normalize_path, renames, rmtree
+from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+logger = getLogger(__name__)
+
+
+def _script_names(
+ bin_dir: str, script_name: str, is_gui: bool
+) -> Generator[str, None, None]:
+ """Create the fully qualified name of the files created by
+ {console,gui}_scripts for the given ``dist``.
+ Returns the list of file names
+ """
+ exe_name = os.path.join(bin_dir, script_name)
+ yield exe_name
+ if not WINDOWS:
+ return
+ yield f"{exe_name}.exe"
+ yield f"{exe_name}.exe.manifest"
+ if is_gui:
+ yield f"{exe_name}-script.pyw"
+ else:
+ yield f"{exe_name}-script.py"
+
+
+def _unique(
+ fn: Callable[..., Generator[Any, None, None]]
+) -> Callable[..., Generator[Any, None, None]]:
+ @functools.wraps(fn)
+ def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]:
+ seen: Set[Any] = set()
+ for item in fn(*args, **kw):
+ if item not in seen:
+ seen.add(item)
+ yield item
+
+ return unique
+
+
+@_unique
+def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]:
+ """
+ Yield all the uninstallation paths for dist based on RECORD-without-.py[co]
+
+ Yield paths to all the files in RECORD. For each .py file in RECORD, add
+ the .pyc and .pyo in the same directory.
+
+ UninstallPathSet.add() takes care of the __pycache__ .py[co].
+
+ If RECORD is not found, raises an error,
+ with possible information from the INSTALLER file.
+
+ https://packaging.python.org/specifications/recording-installed-packages/
+ """
+ location = dist.location
+ assert location is not None, "not installed"
+
+ entries = dist.iter_declared_entries()
+ if entries is None:
+ raise UninstallMissingRecord(distribution=dist)
+
+ for entry in entries:
+ path = os.path.join(location, entry)
+ yield path
+ if path.endswith(".py"):
+ dn, fn = os.path.split(path)
+ base = fn[:-3]
+ path = os.path.join(dn, base + ".pyc")
+ yield path
+ path = os.path.join(dn, base + ".pyo")
+ yield path
+
+
+def compact(paths: Iterable[str]) -> Set[str]:
+ """Compact a path set to contain the minimal number of paths
+ necessary to contain all paths in the set. If /a/path/ and
+ /a/path/to/a/file.txt are both in the set, leave only the
+ shorter path."""
+
+ sep = os.path.sep
+ short_paths: Set[str] = set()
+ for path in sorted(paths, key=len):
+ should_skip = any(
+ path.startswith(shortpath.rstrip("*"))
+ and path[len(shortpath.rstrip("*").rstrip(sep))] == sep
+ for shortpath in short_paths
+ )
+ if not should_skip:
+ short_paths.add(path)
+ return short_paths
+
+
+def compress_for_rename(paths: Iterable[str]) -> Set[str]:
+ """Returns a set containing the paths that need to be renamed.
+
+ This set may include directories when the original sequence of paths
+ included every file on disk.
+ """
+ case_map = {os.path.normcase(p): p for p in paths}
+ remaining = set(case_map)
+ unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len)
+ wildcards: Set[str] = set()
+
+ def norm_join(*a: str) -> str:
+ return os.path.normcase(os.path.join(*a))
+
+ for root in unchecked:
+ if any(os.path.normcase(root).startswith(w) for w in wildcards):
+ # This directory has already been handled.
+ continue
+
+ all_files: Set[str] = set()
+ all_subdirs: Set[str] = set()
+ for dirname, subdirs, files in os.walk(root):
+ all_subdirs.update(norm_join(root, dirname, d) for d in subdirs)
+ all_files.update(norm_join(root, dirname, f) for f in files)
+ # If all the files we found are in our remaining set of files to
+ # remove, then remove them from the latter set and add a wildcard
+ # for the directory.
+ if not (all_files - remaining):
+ remaining.difference_update(all_files)
+ wildcards.add(root + os.sep)
+
+ return set(map(case_map.__getitem__, remaining)) | wildcards
+
+
+def compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str]]:
+ """Returns a tuple of 2 sets of which paths to display to user
+
+ The first set contains paths that would be deleted. Files of a package
+ are not added and the top-level directory of the package has a '*' added
+ at the end - to signify that all it's contents are removed.
+
+ The second set contains files that would have been skipped in the above
+ folders.
+ """
+
+ will_remove = set(paths)
+ will_skip = set()
+
+ # Determine folders and files
+ folders = set()
+ files = set()
+ for path in will_remove:
+ if path.endswith(".pyc"):
+ continue
+ if path.endswith("__init__.py") or ".dist-info" in path:
+ folders.add(os.path.dirname(path))
+ files.add(path)
+
+ _normcased_files = set(map(os.path.normcase, files))
+
+ folders = compact(folders)
+
+ # This walks the tree using os.walk to not miss extra folders
+ # that might get added.
+ for folder in folders:
+ for dirpath, _, dirfiles in os.walk(folder):
+ for fname in dirfiles:
+ if fname.endswith(".pyc"):
+ continue
+
+ file_ = os.path.join(dirpath, fname)
+ if (
+ os.path.isfile(file_)
+ and os.path.normcase(file_) not in _normcased_files
+ ):
+ # We are skipping this file. Add it to the set.
+ will_skip.add(file_)
+
+ will_remove = files | {os.path.join(folder, "*") for folder in folders}
+
+ return will_remove, will_skip
+
+
+class StashedUninstallPathSet:
+ """A set of file rename operations to stash files while
+ tentatively uninstalling them."""
+
+ def __init__(self) -> None:
+ # Mapping from source file root to [Adjacent]TempDirectory
+ # for files under that directory.
+ self._save_dirs: Dict[str, TempDirectory] = {}
+ # (old path, new path) tuples for each move that may need
+ # to be undone.
+ self._moves: List[Tuple[str, str]] = []
+
+ def _get_directory_stash(self, path: str) -> str:
+ """Stashes a directory.
+
+ Directories are stashed adjacent to their original location if
+ possible, or else moved/copied into the user's temp dir."""
+
+ try:
+ save_dir: TempDirectory = AdjacentTempDirectory(path)
+ except OSError:
+ save_dir = TempDirectory(kind="uninstall")
+ self._save_dirs[os.path.normcase(path)] = save_dir
+
+ return save_dir.path
+
+ def _get_file_stash(self, path: str) -> str:
+ """Stashes a file.
+
+ If no root has been provided, one will be created for the directory
+ in the user's temp directory."""
+ path = os.path.normcase(path)
+ head, old_head = os.path.dirname(path), None
+ save_dir = None
+
+ while head != old_head:
+ try:
+ save_dir = self._save_dirs[head]
+ break
+ except KeyError:
+ pass
+ head, old_head = os.path.dirname(head), head
+ else:
+ # Did not find any suitable root
+ head = os.path.dirname(path)
+ save_dir = TempDirectory(kind="uninstall")
+ self._save_dirs[head] = save_dir
+
+ relpath = os.path.relpath(path, head)
+ if relpath and relpath != os.path.curdir:
+ return os.path.join(save_dir.path, relpath)
+ return save_dir.path
+
+ def stash(self, path: str) -> str:
+ """Stashes the directory or file and returns its new location.
+ Handle symlinks as files to avoid modifying the symlink targets.
+ """
+ path_is_dir = os.path.isdir(path) and not os.path.islink(path)
+ if path_is_dir:
+ new_path = self._get_directory_stash(path)
+ else:
+ new_path = self._get_file_stash(path)
+
+ self._moves.append((path, new_path))
+ if path_is_dir and os.path.isdir(new_path):
+ # If we're moving a directory, we need to
+ # remove the destination first or else it will be
+ # moved to inside the existing directory.
+ # We just created new_path ourselves, so it will
+ # be removable.
+ os.rmdir(new_path)
+ renames(path, new_path)
+ return new_path
+
+ def commit(self) -> None:
+ """Commits the uninstall by removing stashed files."""
+ for save_dir in self._save_dirs.values():
+ save_dir.cleanup()
+ self._moves = []
+ self._save_dirs = {}
+
+ def rollback(self) -> None:
+ """Undoes the uninstall by moving stashed files back."""
+ for p in self._moves:
+ logger.info("Moving to %s\n from %s", *p)
+
+ for new_path, path in self._moves:
+ try:
+ logger.debug("Replacing %s from %s", new_path, path)
+ if os.path.isfile(new_path) or os.path.islink(new_path):
+ os.unlink(new_path)
+ elif os.path.isdir(new_path):
+ rmtree(new_path)
+ renames(path, new_path)
+ except OSError as ex:
+ logger.error("Failed to restore %s", new_path)
+ logger.debug("Exception: %s", ex)
+
+ self.commit()
+
+ @property
+ def can_rollback(self) -> bool:
+ return bool(self._moves)
+
+
+class UninstallPathSet:
+ """A set of file paths to be removed in the uninstallation of a
+ requirement."""
+
+ def __init__(self, dist: BaseDistribution) -> None:
+ self._paths: Set[str] = set()
+ self._refuse: Set[str] = set()
+ self._pth: Dict[str, UninstallPthEntries] = {}
+ self._dist = dist
+ self._moved_paths = StashedUninstallPathSet()
+ # Create local cache of normalize_path results. Creating an UninstallPathSet
+ # can result in hundreds/thousands of redundant calls to normalize_path with
+ # the same args, which hurts performance.
+ self._normalize_path_cached = functools.lru_cache(normalize_path)
+
+ def _permitted(self, path: str) -> bool:
+ """
+ Return True if the given path is one we are permitted to
+ remove/modify, False otherwise.
+
+ """
+ # aka is_local, but caching normalized sys.prefix
+ if not running_under_virtualenv():
+ return True
+ return path.startswith(self._normalize_path_cached(sys.prefix))
+
+ def add(self, path: str) -> None:
+ head, tail = os.path.split(path)
+
+ # we normalize the head to resolve parent directory symlinks, but not
+ # the tail, since we only want to uninstall symlinks, not their targets
+ path = os.path.join(self._normalize_path_cached(head), os.path.normcase(tail))
+
+ if not os.path.exists(path):
+ return
+ if self._permitted(path):
+ self._paths.add(path)
+ else:
+ self._refuse.add(path)
+
+ # __pycache__ files can show up after 'installed-files.txt' is created,
+ # due to imports
+ if os.path.splitext(path)[1] == ".py":
+ self.add(cache_from_source(path))
+
+ def add_pth(self, pth_file: str, entry: str) -> None:
+ pth_file = self._normalize_path_cached(pth_file)
+ if self._permitted(pth_file):
+ if pth_file not in self._pth:
+ self._pth[pth_file] = UninstallPthEntries(pth_file)
+ self._pth[pth_file].add(entry)
+ else:
+ self._refuse.add(pth_file)
+
+ def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None:
+ """Remove paths in ``self._paths`` with confirmation (unless
+ ``auto_confirm`` is True)."""
+
+ if not self._paths:
+ logger.info(
+ "Can't uninstall '%s'. No files were found to uninstall.",
+ self._dist.raw_name,
+ )
+ return
+
+ dist_name_version = f"{self._dist.raw_name}-{self._dist.raw_version}"
+ logger.info("Uninstalling %s:", dist_name_version)
+
+ with indent_log():
+ if auto_confirm or self._allowed_to_proceed(verbose):
+ moved = self._moved_paths
+
+ for_rename = compress_for_rename(self._paths)
+
+ for path in sorted(compact(for_rename)):
+ moved.stash(path)
+ logger.verbose("Removing file or directory %s", path)
+
+ for pth in self._pth.values():
+ pth.remove()
+
+ logger.info("Successfully uninstalled %s", dist_name_version)
+
+ def _allowed_to_proceed(self, verbose: bool) -> bool:
+ """Display which files would be deleted and prompt for confirmation"""
+
+ def _display(msg: str, paths: Iterable[str]) -> None:
+ if not paths:
+ return
+
+ logger.info(msg)
+ with indent_log():
+ for path in sorted(compact(paths)):
+ logger.info(path)
+
+ if not verbose:
+ will_remove, will_skip = compress_for_output_listing(self._paths)
+ else:
+ # In verbose mode, display all the files that are going to be
+ # deleted.
+ will_remove = set(self._paths)
+ will_skip = set()
+
+ _display("Would remove:", will_remove)
+ _display("Would not remove (might be manually added):", will_skip)
+ _display("Would not remove (outside of prefix):", self._refuse)
+ if verbose:
+ _display("Will actually move:", compress_for_rename(self._paths))
+
+ return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n"
+
+ def rollback(self) -> None:
+ """Rollback the changes previously made by remove()."""
+ if not self._moved_paths.can_rollback:
+ logger.error(
+ "Can't roll back %s; was not uninstalled",
+ self._dist.raw_name,
+ )
+ return
+ logger.info("Rolling back uninstall of %s", self._dist.raw_name)
+ self._moved_paths.rollback()
+ for pth in self._pth.values():
+ pth.rollback()
+
+ def commit(self) -> None:
+ """Remove temporary save dir: rollback will no longer be possible."""
+ self._moved_paths.commit()
+
+ @classmethod
+ def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet":
+ dist_location = dist.location
+ info_location = dist.info_location
+ if dist_location is None:
+ logger.info(
+ "Not uninstalling %s since it is not installed",
+ dist.canonical_name,
+ )
+ return cls(dist)
+
+ normalized_dist_location = normalize_path(dist_location)
+ if not dist.local:
+ logger.info(
+ "Not uninstalling %s at %s, outside environment %s",
+ dist.canonical_name,
+ normalized_dist_location,
+ sys.prefix,
+ )
+ return cls(dist)
+
+ if normalized_dist_location in {
+ p
+ for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")}
+ if p
+ }:
+ logger.info(
+ "Not uninstalling %s at %s, as it is in the standard library.",
+ dist.canonical_name,
+ normalized_dist_location,
+ )
+ return cls(dist)
+
+ paths_to_remove = cls(dist)
+ develop_egg_link = egg_link_path_from_location(dist.raw_name)
+
+ # Distribution is installed with metadata in a "flat" .egg-info
+ # directory. This means it is not a modern .dist-info installation, an
+ # egg, or legacy editable.
+ setuptools_flat_installation = (
+ dist.installed_with_setuptools_egg_info
+ and info_location is not None
+ and os.path.exists(info_location)
+ # If dist is editable and the location points to a ``.egg-info``,
+ # we are in fact in the legacy editable case.
+ and not info_location.endswith(f"{dist.setuptools_filename}.egg-info")
+ )
+
+ # Uninstall cases order do matter as in the case of 2 installs of the
+ # same package, pip needs to uninstall the currently detected version
+ if setuptools_flat_installation:
+ if info_location is not None:
+ paths_to_remove.add(info_location)
+ installed_files = dist.iter_declared_entries()
+ if installed_files is not None:
+ for installed_file in installed_files:
+ paths_to_remove.add(os.path.join(dist_location, installed_file))
+ # FIXME: need a test for this elif block
+ # occurs with --single-version-externally-managed/--record outside
+ # of pip
+ elif dist.is_file("top_level.txt"):
+ try:
+ namespace_packages = dist.read_text("namespace_packages.txt")
+ except FileNotFoundError:
+ namespaces = []
+ else:
+ namespaces = namespace_packages.splitlines(keepends=False)
+ for top_level_pkg in [
+ p
+ for p in dist.read_text("top_level.txt").splitlines()
+ if p and p not in namespaces
+ ]:
+ path = os.path.join(dist_location, top_level_pkg)
+ paths_to_remove.add(path)
+ paths_to_remove.add(f"{path}.py")
+ paths_to_remove.add(f"{path}.pyc")
+ paths_to_remove.add(f"{path}.pyo")
+
+ elif dist.installed_by_distutils:
+ raise LegacyDistutilsInstall(distribution=dist)
+
+ elif dist.installed_as_egg:
+ # package installed by easy_install
+ # We cannot match on dist.egg_name because it can slightly vary
+ # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
+ paths_to_remove.add(dist_location)
+ easy_install_egg = os.path.split(dist_location)[1]
+ easy_install_pth = os.path.join(
+ os.path.dirname(dist_location),
+ "easy-install.pth",
+ )
+ paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg)
+
+ elif dist.installed_with_dist_info:
+ for path in uninstallation_paths(dist):
+ paths_to_remove.add(path)
+
+ elif develop_egg_link:
+ # PEP 660 modern editable is handled in the ``.dist-info`` case
+ # above, so this only covers the setuptools-style editable.
+ with open(develop_egg_link) as fh:
+ link_pointer = os.path.normcase(fh.readline().strip())
+ normalized_link_pointer = paths_to_remove._normalize_path_cached(
+ link_pointer
+ )
+ assert os.path.samefile(
+ normalized_link_pointer, normalized_dist_location
+ ), (
+ f"Egg-link {develop_egg_link} (to {link_pointer}) does not match "
+ f"installed location of {dist.raw_name} (at {dist_location})"
+ )
+ paths_to_remove.add(develop_egg_link)
+ easy_install_pth = os.path.join(
+ os.path.dirname(develop_egg_link), "easy-install.pth"
+ )
+ paths_to_remove.add_pth(easy_install_pth, dist_location)
+
+ else:
+ logger.debug(
+ "Not sure how to uninstall: %s - Check: %s",
+ dist,
+ dist_location,
+ )
+
+ if dist.in_usersite:
+ bin_dir = get_bin_user()
+ else:
+ bin_dir = get_bin_prefix()
+
+ # find distutils scripts= scripts
+ try:
+ for script in dist.iter_distutils_script_names():
+ paths_to_remove.add(os.path.join(bin_dir, script))
+ if WINDOWS:
+ paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat"))
+ except (FileNotFoundError, NotADirectoryError):
+ pass
+
+ # find console_scripts and gui_scripts
+ def iter_scripts_to_remove(
+ dist: BaseDistribution,
+ bin_dir: str,
+ ) -> Generator[str, None, None]:
+ for entry_point in dist.iter_entry_points():
+ if entry_point.group == "console_scripts":
+ yield from _script_names(bin_dir, entry_point.name, False)
+ elif entry_point.group == "gui_scripts":
+ yield from _script_names(bin_dir, entry_point.name, True)
+
+ for s in iter_scripts_to_remove(dist, bin_dir):
+ paths_to_remove.add(s)
+
+ return paths_to_remove
+
+
+class UninstallPthEntries:
+ def __init__(self, pth_file: str) -> None:
+ self.file = pth_file
+ self.entries: Set[str] = set()
+ self._saved_lines: Optional[List[bytes]] = None
+
+ def add(self, entry: str) -> None:
+ entry = os.path.normcase(entry)
+ # On Windows, os.path.normcase converts the entry to use
+ # backslashes. This is correct for entries that describe absolute
+ # paths outside of site-packages, but all the others use forward
+ # slashes.
+ # os.path.splitdrive is used instead of os.path.isabs because isabs
+ # treats non-absolute paths with drive letter markings like c:foo\bar
+ # as absolute paths. It also does not recognize UNC paths if they don't
+ # have more than "\\sever\share". Valid examples: "\\server\share\" or
+ # "\\server\share\folder".
+ if WINDOWS and not os.path.splitdrive(entry)[0]:
+ entry = entry.replace("\\", "/")
+ self.entries.add(entry)
+
+ def remove(self) -> None:
+ logger.verbose("Removing pth entries from %s:", self.file)
+
+ # If the file doesn't exist, log a warning and return
+ if not os.path.isfile(self.file):
+ logger.warning("Cannot remove entries from nonexistent file %s", self.file)
+ return
+ with open(self.file, "rb") as fh:
+ # windows uses '\r\n' with py3k, but uses '\n' with py2.x
+ lines = fh.readlines()
+ self._saved_lines = lines
+ if any(b"\r\n" in line for line in lines):
+ endline = "\r\n"
+ else:
+ endline = "\n"
+ # handle missing trailing newline
+ if lines and not lines[-1].endswith(endline.encode("utf-8")):
+ lines[-1] = lines[-1] + endline.encode("utf-8")
+ for entry in self.entries:
+ try:
+ logger.verbose("Removing entry: %s", entry)
+ lines.remove((entry + endline).encode("utf-8"))
+ except ValueError:
+ pass
+ with open(self.file, "wb") as fh:
+ fh.writelines(lines)
+
+ def rollback(self) -> bool:
+ if self._saved_lines is None:
+ logger.error("Cannot roll back changes to %s, none were made", self.file)
+ return False
+ logger.debug("Rolling %s back to previous state", self.file)
+ with open(self.file, "wb") as fh:
+ fh.writelines(self._saved_lines)
+ return True
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/autograd.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/autograd.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8aa98c4ade67d516497ef2445554f75cfec412de
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/autograd.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/lazy.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/lazy.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1046bb3bb1e840653d7f8b218dd76576d9fd81c
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/lazy.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/structured.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/structured.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f5a7f372174f361de727c1e46912c7e680b72be
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/structured.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/translate.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/translate.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9d0c87425ec884d7f772a1f36c8662600959b9ae
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/translate.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/functionalization.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/functionalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc492588e60fdfe3edba051288c83527203252fd
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/functionalization.py
@@ -0,0 +1,199 @@
+from typing import List, Optional
+
+from torchgen.api import dispatcher
+from torchgen.api.types import (
+ BaseCppType,
+ BaseCType,
+ Binding,
+ boolT,
+ ConstRefCType,
+ CType,
+ longT,
+ NamedCType,
+ tensorT,
+)
+from torchgen.model import (
+ Argument,
+ BaseTy,
+ BaseType,
+ FunctionSchema,
+ NativeFunction,
+ NativeFunctionsViewGroup,
+)
+
+
+# This file describes the translation of JIT schema to API's used
+# when creating view lambdas that are used by the functionalization pass.
+# There are two types of lambdas: forward lambdas and reverse lambdas.
+# These API's mostly follow the dispatcher API, with a few quirks:
+# - The lambda capture has to convert reference types to value types
+# - While the forward lambda just directly calls into the at::_ops API
+# (following the dispatcher convention), the logic here for the reverse lambda
+# is responsible for generating both the call-site, and the declarations
+# (which are implemented manually in the at::functionalization::impl namespace).
+
+# The lambdas generated for each view op in the functionalization pass are of the form
+# [capture_arguments](outer_arguments) -> returns_type {
+# return name(inner_arguments);
+# }
+
+# Define some specific lambda input arguments.
+base_binding = Binding(
+ name="base",
+ nctype=NamedCType(name="base", type=ConstRefCType(BaseCType(tensorT))),
+ argument=Argument(
+ name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
+ ),
+ default=None,
+)
+mutated_view_binding = Binding(
+ name="mutated_view",
+ nctype=NamedCType(name="mutated_view", type=ConstRefCType(BaseCType(tensorT))),
+ argument=Argument(
+ name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
+ ),
+ default=None,
+)
+mutated_view_idx_binding = Binding(
+ name="mutated_view_idx",
+ nctype=NamedCType(name="mutated_view_idx", type=BaseCType(longT)),
+ argument=Argument(
+ name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
+ ),
+ default=None,
+)
+reapply_views_binding = Binding(
+ name="reapply_views",
+ nctype=NamedCType(name="reapply_views", type=BaseCType(boolT)),
+ argument=Argument(
+ name="reapply_views", type=BaseType(BaseTy.bool), default=None, annotation=None
+ ),
+ default=None,
+)
+
+InverseReturnModeT = BaseCppType("at::functionalization", "InverseReturnMode")
+inverse_return_mode_binding = Binding(
+ name="inverse_return_mode",
+ nctype=NamedCType(name="inverse_return_mode", type=BaseCType(InverseReturnModeT)),
+ argument=Argument(
+ name="inverse_return_mode",
+ # NB: not actually a bool but it doesn't matter because this isn't used
+ type=BaseType(BaseTy.bool),
+ default=None,
+ annotation=None,
+ ),
+ default=None,
+)
+
+
+# The lambda capture itself doesn't have a name.
+# The name returned here corresponds to the name of the inner function called by the lambda.
+def name(
+ g: NativeFunctionsViewGroup,
+ *,
+ is_reverse: bool,
+ include_namespace: bool,
+ reapply_views: Optional[bool] = None,
+) -> str:
+ if reapply_views is None:
+ # reapply_views is only important for the fwd lambda,
+ # since we always plumb the runtime "reapply_views" argument into the reverse function.
+ assert is_reverse
+ if is_reverse:
+ return reverse_name(g.view, include_namespace)
+ # in the forward case, we just directly call into the at::_ops API (so we always need the namespace)
+ assert include_namespace
+ assert g.view_copy is not None
+ api_name = (
+ g.view.func.name.unambiguous_name()
+ if reapply_views
+ else g.view_copy.func.name.unambiguous_name()
+ )
+ return f"at::_ops::{api_name}::call"
+
+
+def reverse_name(f: NativeFunction, include_namespace: bool) -> str:
+ # for the reverse: we plumb the "reapply_views" flag into that function and support
+ # both copy and non-copy variants. (We could avoid doing that, but that would require
+ # writing out twice as many view inverse functions).
+ api_name = f.func.name.unambiguous_name()
+ # in the reverse case, we codegen both the call-sites (which need the full namespace) and the declarations (which don't)
+ if include_namespace:
+ return f"at::functionalization::FunctionalInverses::{api_name}_inverse"
+ else:
+ return f"{api_name}_inverse"
+
+
+def capture_arguments(func: FunctionSchema, *, is_reverse: bool) -> List[Binding]:
+ # capture arguments include all arguments except `self`.
+ # Importantly, they don't include any C++ reference types (or else we'll get a dangling reference in the capture),
+ # So any reference types (IntArrayRef) need to be converted to value types (vector)
+ args = func.arguments.flat_all
+ assert args[0].type == BaseType(BaseTy.Tensor)
+ non_self_args = args[1:]
+ non_self_value_bindings = [
+ dispatcher.argument(a, remove_non_owning_ref_types=True) for a in non_self_args
+ ]
+
+ all_bindings = [
+ inverse_return_mode_binding if is_reverse else reapply_views_binding
+ ]
+ all_bindings.extend(non_self_value_bindings)
+ return all_bindings
+
+
+def returns_type(func: FunctionSchema) -> CType:
+ # Assertion: all view ops return tensor-like outputs
+ assert len(func.returns) >= 1
+ for ret in func.returns:
+ assert ret.type.is_tensor_like()
+ # However, the return type of the lambda is always an individual tensor.
+ # For multi-tensor outputs, each tensor needs to be tracked individually.
+ return BaseCType(tensorT)
+
+
+def outer_arguments(*, is_reverse: bool) -> List[Binding]:
+ if is_reverse:
+ return [base_binding, mutated_view_binding, mutated_view_idx_binding]
+ else:
+ return [base_binding, mutated_view_idx_binding]
+
+
+def inner_call_index(func: FunctionSchema) -> Optional[Binding]:
+ # For view ops that return multiple tensors (like `split`), we generate a separate lambda for each output.
+ # When we replay a view op that returns multiple tensors, we need to index into the output appropriately
+ if len(func.returns) > 1 or (
+ len(func.returns) == 1 and func.returns[0].type.is_list_like()
+ ):
+ return mutated_view_idx_binding
+ return None
+
+
+def inner_arguments(func: FunctionSchema, is_reverse: bool) -> List[Binding]:
+ args = func.arguments.flat_all
+ assert args[0].type == BaseType(BaseTy.Tensor)
+ non_self_args = args[1:]
+ # The forward lambda calls the at::_ops API, while the reverse lambda calls the view inverse API.
+ # Both of these follow the dispatcher API.
+ non_self_bindings = [dispatcher.argument(a) for a in non_self_args]
+ if not is_reverse:
+ # the forward lambda swaps out the original tensor argument with the lambd arg "base"
+ return [base_binding] + non_self_bindings
+ else:
+ # the reverse lambda does the same, but with an additional "mutated_view" arg
+ # additionally, we have a calling convention: for view ops that return multiple tensor outputs
+ # their corresponding view_inverse function takes in an additional index argument.
+ index_binding = inner_call_index(func)
+ if index_binding is not None:
+ return [
+ base_binding,
+ mutated_view_binding,
+ inverse_return_mode_binding,
+ index_binding,
+ ] + non_self_bindings
+ else:
+ return [
+ base_binding,
+ mutated_view_binding,
+ inverse_return_mode_binding,
+ ] + non_self_bindings
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/python.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/python.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a3b4505d9df6324462ee5dc4632505c4d658118
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/python.py
@@ -0,0 +1,1509 @@
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
+
+from torchgen.api import cpp
+
+from torchgen.api.types import Binding, CppSignature, CppSignatureGroup
+from torchgen.gen import pythonify_default
+from torchgen.model import (
+ Argument,
+ BaseTy,
+ BaseType,
+ FunctionSchema,
+ ListType,
+ NativeFunction,
+ OptionalType,
+ Return,
+ Type,
+ Variant,
+)
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+#
+# Data Models
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+#
+# [Notes] python binding codegen
+#
+# The Python binding codegen produces code that takes the input list of
+# PyObjects, finds the matching ATen C++ function using PythonArgParser,
+# converts the PyObjects into C++ types and calls the ATen C++ function:
+#
+# +--------+ parsing +------------------------+ binding +-----------------------+
+# | PyObjs | ---------> | PythonArgParser Output | ---------> | Cpp Function Dispatch |
+# +--------+ +------------------------+ +-----------------------+
+#
+# The following examples demonstrate the data models the Python binding
+# codegen needs to deal with and the tasks it needs to accomplish. It
+# helps understand the purpose of the new data types we introduced below.
+#
+# - Function Schema (source of truth)
+#
+# aten::empty.names(int[] size, *, Dimname[]? names,
+# ScalarType? dtype=None, Layout? layout=None,
+# Device? device=None, bool? pin_memory=None,
+# MemoryFormat? memory_format=None) -> Tensor
+#
+# - Python Signature
+#
+# It's used to generate input schema string for PythonArgParser.
+# Note: TensorOptions fields are reordered and the additional
+# 'requires_grad' field is added:
+#
+# empty(IntArrayRef size, *, DimnameList? names,
+# MemoryFormat? memory_format=None, ScalarType dtype=None,
+# Layout layout=torch.strided, Device device=None,
+# bool pin_memory=False, bool requires_grad=False)
+#
+# - C++ Signature
+#
+# It's used to generate C++ lambda formals & dispatch call.
+# Note: the scattered TensorOptions fields are packed into 'options'.
+#
+# auto dispatch_empty =
+# [](IntArrayRef size, c10::optional names,
+# const TensorOptions & options,
+# c10::optional memory_format) -> Tensor {
+# pybind11::gil_scoped_release no_gil;
+# return torch::empty(size, names, options, memory_format);
+# };
+#
+# - Binding between Python Arguments and C++ Arguments
+#
+# Given a set of Python Arguments in scope, we need produce the
+# binding expressions that translate the Python API into C++ API:
+#
+# Python Args Cpp Args Binding Exprs
+# -----------------------------------------------------------------
+# 0: size size '_r.intlist(0)'
+# 1: names names 'names' [special init]
+# 2: memory_format -------+
+# 3: dtype -----+-|--> options 'options' [special packing]
+# 4: layout / |
+# 5: device / +--> memory_format '_r.memoryformatOptional(2)'
+# 6: pin_memory /
+# 7: requires_grad -+
+#
+# So the full dispatch expression would look like:
+#
+# dispatch_empty(_r.intlist(0), names, options,
+# _r.memoryformatOptional(2))
+#
+# Where does 'names' come from? It involves special local init:
+#
+# auto __names = _r.toDimnameListOptional(1);
+# c10::optional names =
+# __names ? c10::make_optional(DimnameList(__names.value()))
+# : c10::nullopt;
+#
+# Where does 'options' come from? It involves special local init
+# for TensorOptions. Note that Python side has the additional
+# 'requires_grad' field:
+#
+# const auto options = TensorOptions()
+# .dtype(_r.scalartype(3))
+# .device(_r.device(5))
+# .layout(_r.layoutOptional(4))
+# .requires_grad(_r.toBool(7))
+# .pinned_memory(_r.toBool(6));
+#
+# In some other cases one Python Argument can map to multiple C++
+# Arguments. For example:
+#
+# aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False)
+# -> (Tensor values, Tensor indices)
+#
+# Python Args Cpp Args Binding Exprs
+# ---------------------------------------------------------------------
+# +----> max 'out[0]'
+# /-----> max_values 'out[1]
+# 0: input / self '_r.tensor(0)'
+# 1: dim / dim '_r.dimname(1)'
+# 2: keepdim / keepdim '_r.toBool(2)'
+# 3: out -----+ [local init] out '_r.tensorlist_n<2>(3)'
+#
+# As demonstrated above, the binding can involve reordering,
+# packing, unpacking and special local inits.
+#
+#
+# Let's look at a concrete example:
+#
+# static PythonArgParser parser({
+# "abs(Tensor input, *, Tensor out=None)",
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ^
+# +--- Python Schema, represented by PythonSignature and PythonArgument
+#
+# }, /*traceable=*/true);
+#
+# ParsedArgs<2> parsed_args;
+# auto _r = parser.parse(nullptr, args, kwargs, parsed_args);
+#
+# ...
+#
+# if (_r.isNone(1)) {
+# ~~~~~~~~~~~~ <--- Scattered PythonArgParser output (arg name = 'out')
+# represented by PythonArgParserOutputExpr
+#
+# // aten::abs(Tensor self) -> Tensor
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ^
+# +--- NativeFunction schema, base version
+#
+# auto dispatch_abs = [](const Tensor & self) -> Tensor {
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ^
+# +--- dispatch_lambda_args / dispatch_lambda_return_str
+# generated from NativeFunction / CppSignature
+# (deprecated PythonSignature is special)
+# arguments are represented by DispatchLambdaArgument
+#
+# pybind11::gil_scoped_release no_gil;
+# return self.abs();
+# ~~~~~~~~~~~ <--- cpp_dispatch_target / cpp_dispatch_exprs
+# generated from NativeFunction / CppSignature
+# };
+# return wrap(dispatch_abs(_r.tensor(0)));
+# ~~~~~~~~~~~~~
+# ^
+# +--- dispatch_lambda_exprs
+# binding PythonArgParserOutputExpr (python args)
+# and DispatchLambdaArgument (c++ args)
+#
+# } else {
+# // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ^
+# +--- NativeFunction schema, out-variant
+#
+# auto dispatch_abs_out = [](Tensor out, const Tensor & self) -> Tensor {
+# pybind11::gil_scoped_release no_gil;
+# return at::abs_out(out, self);
+# };
+# return wrap(dispatch_abs_out(_r.tensor(1), _r.tensor(0)));
+# }
+#
+#
+# [Notes] python interface codegen
+# The python dataclasses below are used used to generate both python binding code
+# and pyi type hint signatures.
+# In theory these two should look very similar, but there are number of differences
+# in how pyi signatures vs. python_arg_parser signatures are generated.
+# These differences have been encapsulated in signature_str() vs. signature_str_pyi()
+# to display the full signatures, and argument_str() vs argument_str_pyi() to display arguments.
+# For examples, only pyi signatures include return types.
+
+
+@dataclass(frozen=True)
+class PythonReturns:
+ returns: Tuple[Return, ...]
+
+
+@dataclass(frozen=True)
+class PythonArgument:
+ name: str
+ type: Type
+ default: Optional[str]
+
+ # Used to generate the default init expr for some PythonArgParser outputs, e.g.:
+ #
+ # _r.layoutWithDefault(3, layout_from_backend(self.options().backend())))
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ # ^
+ # +--- default_init str
+ default_init: Optional[str]
+
+ # Compute argument formal for python argument parsing.
+ # Needs to be consistent with torch/csrc/utils/python_arg_parser.h.
+ def argument_str(self, *, method: bool = False, symint: bool = True) -> str:
+ type_str = (
+ argument_type_str(self.type, symint=symint)
+ .replace("const ", "")
+ .replace(" &", "")
+ )
+
+ name = self.name
+ # s/self/input/ outside method bindings
+ # [old codegen] TODO: remove this? doesn't rename in codegen, it's just
+ # for the parse string
+ if name == "self" and type_str in ["Tensor", "Number"] and not method:
+ name = "input"
+
+ # add default
+ if self.default is not None:
+ default = {
+ "nullptr": "None",
+ "c10::nullopt": "None",
+ "{}": "None",
+ }.get(self.default, self.default)
+ return f"{type_str} {name}={default}"
+ else:
+ return f"{type_str} {name}"
+
+ def argument_str_pyi(
+ self, *, method: bool = False, deprecated: bool = False
+ ) -> str:
+ type_str = argument_type_str_pyi(self.type)
+
+ name = self.name
+ # s/self/input/ outside method bindings
+ # [old codegen] TODO: remove this? doesn't rename in codegen, it's just
+ # for the parse string
+ if name == "self" and type_str == "Tensor" and not method and not deprecated:
+ name = "input"
+
+ if name == "from": # from is a Python keyword...
+ name += "_"
+
+ # pyi merges the _out and functional variants into the same signature, with an optional out arg
+ if name == "out" and type_str == "Tensor" and not deprecated:
+ type_str = "Optional[" + type_str + "]"
+
+ # pyi deprecated signatures don't get defaults for their out arg
+ treat_as_no_default = (
+ deprecated
+ and isinstance(self, PythonOutArgument)
+ and self.default == "None"
+ )
+
+ # add default
+ if self.default is not None and not treat_as_no_default:
+ if (
+ isinstance(self.type, ListType)
+ and self.type.elem == BaseType(BaseTy.int)
+ and self.default.startswith("{")
+ and self.default.endswith("}")
+ ):
+ default = "(" + self.default[1:-1] + ")"
+ else:
+ default = {
+ "nullptr": "None",
+ "c10::nullopt": "None",
+ "{}": "None",
+ "MemoryFormat::Contiguous": "contiguous_format",
+ "QScheme::PER_TENSOR_AFFINE": "per_tensor_affine",
+ }.get(self.default, self.default)
+ return f"{name}: {type_str} = {default}"
+ else:
+ return f"{name}: {type_str}"
+
+
+@dataclass(frozen=True)
+class PythonOutArgument(PythonArgument):
+ # In Python signature multiple output fields are packed into one 'out' argument.
+ # When binding to C++, it's first binded to a local 'out' variable:
+ # 'auto out = _r.tensorlist_n<2>(2);',
+ # then binded to scattered C++ output arguments as 'out[0]', 'out[1]', and etc.
+ # TODO: maybe don't need keep scattered out fields for python signature?
+ outputs: Tuple[PythonArgument, ...]
+
+ @staticmethod
+ def from_outputs(
+ outputs: Tuple[PythonArgument, ...]
+ ) -> Optional["PythonOutArgument"]:
+ if not outputs:
+ return None
+
+ size = len(outputs)
+ if size == 1:
+ return PythonOutArgument(
+ name=outputs[0].name,
+ type=outputs[0].type,
+ default="None",
+ default_init=None,
+ outputs=outputs,
+ )
+ elif size > 1:
+ if any(not a.type.is_tensor_like() for a in outputs):
+ raise RuntimeError(f"Unsupported output type: {outputs}")
+ return PythonOutArgument(
+ name="out",
+ # TODO: shouldn't this be OptionalType[ListType[...]], since it defaults to None?
+ type=ListType(BaseType(BaseTy.Tensor), size),
+ default="None",
+ default_init=None,
+ outputs=outputs,
+ )
+ raise AssertionError(r"Unexpected PythonOutArgument size")
+
+
+@dataclass(frozen=True)
+class PythonSignature:
+ # Base operator name, without inplace/outplace suffix.
+ name: str
+
+ # Positional arguments.
+ # TODO: create a dedicated SelfArgument type for 'self'?
+ input_args: Tuple[PythonArgument, ...]
+
+ # Keyword arguments excluding the 'out' argument and scattered kwargs belonging
+ # to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc).
+ input_kwargs: Tuple[PythonArgument, ...]
+
+ output_args: Optional[PythonOutArgument]
+
+ # Return types, which are only used by pyi
+ returns: PythonReturns
+
+ # These are scattered kwargs arguments belonging to TensorOptions.
+ # When binding to C++, they are packed into a TensorOptions object 'options'.
+ # It's possible that the C++ signature doesn't take TensorOptions object (e.g.
+ # for out variant), in which case they will be used as scattered fields without
+ # being packed into 'options'.
+ # TODO: maybe create a PythonTensorOptionsArgument?
+ tensor_options_args: Tuple[PythonArgument, ...]
+
+ # method or function signature?
+ method: bool
+
+ @property
+ def deprecated(self) -> bool:
+ return False
+
+ def arguments(
+ self, *, skip_outputs: bool = False, skip_tensor_options: bool = False
+ ) -> Tuple[Union[PythonArgument, PythonOutArgument], ...]:
+ result: List[Union[PythonArgument, PythonOutArgument]] = []
+ result.extend(self.input_args)
+ result.extend(self.input_kwargs)
+ if self.output_args is not None and not skip_outputs:
+ result.append(self.output_args)
+ if not skip_tensor_options:
+ result.extend(self.tensor_options_args)
+ return tuple(result)
+
+ def arguments_count(self) -> int:
+ return len(self.arguments())
+
+ def output_idx(self) -> int:
+ return len(self.input_args) + len(self.input_kwargs)
+
+ # [old codegen] Compute the Python function signature for argument parsing,
+ # as specified in torch/csrc/utils/python_arg_parser.h. WARNING:
+ # this is NOT the same type signature as specified by PEP 484
+ # as understood by mypy; our format was independently developed
+ # and has some quirks to make it more suitable specifically
+ # for error parsing.
+ #
+ # For a translation to mypy-valid type signatures, see
+ # signature_str_pyi().
+ def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
+ args = self.arguments(skip_outputs=skip_outputs)
+ schema_formals: List[str] = [
+ a.argument_str(method=self.method, symint=symint) for a in args
+ ]
+ positional_argc = len(self.input_args)
+ if len(schema_formals) > positional_argc:
+ schema_formals.insert(positional_argc, "*")
+
+ return f'{self.name}({", ".join(schema_formals)})'
+
+ def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
+ args = self.arguments(skip_outputs=skip_outputs)
+ schema_formals: List[str] = [
+ a.argument_str_pyi(method=self.method) for a in args
+ ]
+ positional_argc = len(self.input_args)
+ if len(schema_formals) > positional_argc:
+ schema_formals.insert(positional_argc, "*")
+
+ # only pyi signatures include returns
+ returns_str = returns_str_pyi(self)
+ # pyi also includes self (with no typing/defaults) for methods
+ if self.method:
+ schema_formals.insert(0, "self")
+ return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
+
+ def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
+ # only pyi uses vararg signatures
+ args = self.arguments(skip_outputs=skip_outputs)
+ schema_formals: List[str] = [
+ a.argument_str_pyi(method=self.method) for a in args
+ ]
+ # vararg only applies to pyi signatures. vararg variants are not generated for all signatures
+ num_args = self.arguments_count()
+ num_positionalargs = len(self.input_args)
+
+ have_vararg_version = False
+ if num_args > 0:
+ vararg_type = args[0].type
+ if (
+ isinstance(vararg_type, ListType)
+ and str(vararg_type.elem) in ["int", "SymInt"]
+ and num_positionalargs == 1
+ ):
+ have_vararg_version = True
+
+ if not have_vararg_version:
+ return None
+ # Below are the major changes in vararg vs. regular pyi signatures
+ # vararg signatures also omit the asterix
+ schema_formals[0] = "*" + args[0].name + ": _int"
+
+ returns_str = returns_str_pyi(self)
+ # pyi also includes self (with no typing/defaults) for methods
+ if self.method:
+ schema_formals.insert(0, "self")
+ return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
+
+
+# The deprecated python signature involves some special logic, so create a
+# dedicated data model to store these extra properties.
+@dataclass(frozen=True)
+class PythonSignatureDeprecated(PythonSignature):
+ # Schema for the deprecated function
+ deprecated_schema: FunctionSchema
+
+ # The deprecated signature might miss some arguments that the corresponding
+ # C++ signature expects. We need store the constant default values to pass in.
+ # For example:
+ # [deprecate signature]: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2)
+ # [func schema]: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
+ # [func call]: self.addmm(mat1, mat2, beta, 1)
+ # We store ['self', 'mat1', 'mat2', 'beta', '1'] in this case.
+ deprecated_args_exprs: Tuple[str, ...]
+
+ @property
+ def deprecated(self) -> bool:
+ return True
+
+ def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
+ return (
+ PythonSignature.signature_str(
+ self, skip_outputs=skip_outputs, symint=symint
+ )
+ + "|deprecated"
+ )
+
+ def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
+ args = self.arguments(skip_outputs=skip_outputs)
+ schema_formals: List[str] = [
+ a.argument_str_pyi(method=self.method, deprecated=True) for a in args
+ ]
+ positional_argc = len(self.input_args)
+ if len(schema_formals) > positional_argc:
+ schema_formals.insert(positional_argc, "*")
+
+ returns_str = returns_str_pyi(self)
+ return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
+
+ def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
+ # the codegen doesn't include vararg variants for deprecated signatures
+ return None
+
+
+# This struct is used to hold the PythonSignature and its corresponding
+# NativeFunction BEFORE grouping base and out-variant functions.
+# Why not store NativeFunction in PythonSignature or construct PythonSignature
+# from NativeFunction? Because they are not 1-1 mapped.
+# One native function could have both deprecated and non-deprecated python
+# signatures - NativeFunction doesn't contain information to construct the
+# deprecated python signature.
+# One python signature is used to handle both the base and the out-variant
+# function - see 'PythonSignatureGroup'.
+@dataclass(frozen=True)
+class PythonSignatureNativeFunctionPair:
+ signature: PythonSignature
+ function: NativeFunction
+
+
+# We merge pairs of functions with signatures that are equivalent mod
+# output arguments, and use a single entry in the python_arg_parser sig
+# list for both (output arguments become optional).
+@dataclass(frozen=True)
+class PythonSignatureGroup:
+ # The signature used for Python argument parsing. The outplace signature
+ # is preferred if exists, because it can be used to parse inputs for both
+ # the out-place variant and the base version (with output omitted).
+ signature: PythonSignature
+
+ # The regular ATen declaration (e.g. conv2d)
+ base: NativeFunction
+
+ # The out variant (e.g. conv2d_out)
+ outplace: Optional[NativeFunction]
+
+ @classmethod
+ def from_pairs(
+ cls,
+ functional: PythonSignatureNativeFunctionPair,
+ out: Optional[PythonSignatureNativeFunctionPair],
+ ) -> "PythonSignatureGroup":
+ if out is None:
+ return PythonSignatureGroup(
+ signature=functional.signature,
+ base=functional.function,
+ outplace=None,
+ )
+
+ # prefer the signature with optional out=... arguments because it's the
+ # superset that can be used to parse input for both base and outplace.
+ signature_kwargs = out.signature.__dict__.copy()
+
+ # Out overloads in C++ don't have TensorOptions arguments,
+ # so take these from the functional variant
+ signature_kwargs[
+ "tensor_options_args"
+ ] = functional.signature.tensor_options_args
+
+ return PythonSignatureGroup(
+ signature=type(out.signature)(**signature_kwargs),
+ base=functional.function,
+ outplace=out.function,
+ )
+
+
+# C++ function dispatch is wrapped in a lambda function. The lambda function
+# has almost the same signature as the C++ function, only with some small
+# variants - see details below.
+# This data model is used to represent arguments of the lambda function
+# signature.
+@dataclass(frozen=True)
+class DispatchLambdaArgument:
+ name: str
+ type_str: str
+ is_out_arg: bool
+
+
+# To pass PyObjects arguments to C++ function (via the lambda wrapper),
+# we need first convert PyObjects into simple C++ objects. This work
+# is done by PythonArgParser.
+# This data model is used to represent the output of PythonArgParser.
+# It has 1-1 mapping with PythonArgument in PythonSignature.
+@dataclass(frozen=True)
+class PythonArgParserOutputExpr:
+ # argument name
+ name: str
+
+ # RHS expression to reference PythonArgParser output.
+ expr: str
+
+ # In some special cases we need create different expr, e.g.:
+ # '_r.isNone(1)' instead of '_r.tensor(1)'.
+ index: int
+
+ # The python argument it maps to.
+ argument: PythonArgument
+
+ @property
+ def is_none_expr(self) -> str:
+ return f"_r.isNone({self.index})"
+
+
+# To pass PythonArgParser output to the lambda wrapper, we need bind
+# PythonArgParserOutputExpr to DispatchLambdaArgument.
+# They are not always 1-1 mapped, e.g. scattered TensorOptions fields
+# need be packed into a TensorOptions object, which is the argument
+# that the lambda function wrapper takes.
+@dataclass(frozen=True)
+class DispatchLambdaArgumentExprs:
+ # The exprs that provide the binding for lambda arguments, e.g.:
+ #
+ # 'self' -> '_r.tensor(0)'
+ # 'min' -> 'out[0]' / 'min_indices' -> 'out[1]'
+ # 'options' -> 'options'
+ #
+ # It has 1-1 mapping with DispatchLambdaArgument.
+ exprs: Sequence[str]
+
+ # Special local inits, which might introduce new variables that
+ # the 'exprs' above reference, e.g.:
+ #
+ # 'auto out = _r.tensorlist_n<2>(2);'
+ #
+ inits: Sequence[str]
+
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+#
+# Helper Functions
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+
+
+def _cpp_signature(f: NativeFunction, *, method: bool = False) -> CppSignature:
+ return CppSignatureGroup.from_native_function(f, method=method).signature
+
+
+def has_tensor_options(f: NativeFunction) -> bool:
+ return f.func.arguments.tensor_options is not None
+
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+#
+# Python Signature
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+
+
+# 'simple_type' was introduced by the old codegen, which is slightly
+# different from the python schema type, e.g.: doesn't have '?' suffix
+# for optional Tensor/TensorList; doesn't have '[size]' suffix for list type.
+def argument_type_str(
+ t: Type, *, simple_type: bool = False, symint: bool = True
+) -> str:
+ if isinstance(t, BaseType):
+ if t.name == BaseTy.Tensor:
+ return "Tensor"
+ elif t.name == BaseTy.int:
+ return "int64_t"
+ elif t.name == BaseTy.float:
+ return "double"
+ elif t.name == BaseTy.str:
+ return "c10::string_view"
+ elif t.name in [
+ BaseTy.bool,
+ BaseTy.QScheme,
+ BaseTy.Scalar,
+ BaseTy.ScalarType,
+ BaseTy.Generator,
+ BaseTy.Storage,
+ BaseTy.Layout,
+ BaseTy.Device,
+ BaseTy.DeviceIndex,
+ BaseTy.MemoryFormat,
+ BaseTy.Dimname,
+ BaseTy.Stream,
+ BaseTy.ConstQuantizerPtr,
+ BaseTy.SymInt,
+ ]:
+ # These python schema type names line up with their function schema names
+ return t.name.name
+
+ elif isinstance(t, OptionalType):
+ if str(t.elem) == "Tensor":
+ # Is it desired to keep '?' for simple_type with new style dispatcher?
+ return "Tensor?"
+ elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
+ return f"{elem}?"
+ elif isinstance(t, ListType):
+ size = t.size if not simple_type else None
+ if str(t.elem) == "bool":
+ assert t.size is not None
+ return f"::std::array"
+ elif str(t.elem) == "int":
+ return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
+ elif str(t.elem) == "SymInt":
+ if symint:
+ return (
+ f"SymIntArrayRef[{size}]" if size is not None else "SymIntArrayRef"
+ )
+ else:
+ return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
+ elif str(t.elem) == "Tensor":
+ return f"TensorList[{size}]" if size is not None else "TensorList"
+ elif str(t.elem) == "Scalar":
+ return f"ScalarList[{size}]" if size is not None else "ScalarList"
+ elif str(t.elem) == "Tensor?":
+ if simple_type:
+ return "c10::List>"
+ else:
+ return "const c10::List> &"
+ elif str(t.elem) == "Dimname":
+ return f"DimnameList[{size}]" if size is not None else "DimnameList"
+ elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
+ return f"ArrayRef<{elem}>"
+
+ raise RuntimeError(f"unrecognized type {repr(t)}")
+
+
+def argument_type_size(t: Type) -> Optional[int]:
+ l = t.is_list_like()
+ if l is not None and str(l.elem) != "bool":
+ return l.size
+ else:
+ return None
+
+
+def argument(a: Argument) -> PythonArgument:
+ return PythonArgument(
+ name=a.name,
+ type=a.type,
+ # TODO: directly translate a.default to python default
+ default=str(
+ pythonify_default(cpp.default_expr(a.default, a.type, symint=False))
+ )
+ if a.default is not None
+ else None,
+ default_init=None,
+ )
+
+
+# Generates a PythonSignature that can be used for either .pyi or PythonArgParser codegen
+def signature(
+ f: NativeFunction, *, method: bool = False, pyi: bool = False
+) -> PythonSignature:
+ return signature_from_schema(
+ f.func, category_override=f.category_override, method=method, pyi=pyi
+ )
+
+
+def signature_from_schema(
+ func: FunctionSchema,
+ *,
+ category_override: Optional[str],
+ method: bool = False,
+ pyi: bool = False,
+) -> PythonSignature:
+ args: List[Argument] = []
+ args.extend(func.arguments.pre_self_positional)
+ # Skip SelfArgument if this is method.
+ if not method and func.arguments.self_arg is not None:
+ args.append(func.arguments.self_arg.argument)
+ args.extend(func.arguments.post_self_positional)
+ args.extend(func.arguments.pre_tensor_options_kwarg_only)
+ # Skip TensorOptionsArguments. Python side TensorOptions
+ # arguments are created based on different rules - see below.
+ args.extend(func.arguments.post_tensor_options_kwarg_only)
+ args.extend(func.arguments.out)
+
+ input_arg_set = {a.name for a in func.arguments.flat_positional}
+ kwarg_only_set = {a.name for a in func.arguments.flat_kwarg_only}
+ out_arg_set = {a.name for a in func.arguments.out}
+
+ input_args = tuple(map(argument, filter(lambda a: a.name in input_arg_set, args)))
+ input_kwargs = tuple(
+ map(argument, filter(lambda a: a.name in kwarg_only_set, args))
+ )
+ outputs = tuple(map(argument, filter(lambda a: a.name in out_arg_set, args)))
+
+ # Reintroduce the scattered fields of TensorOptions for Python.
+ # Compared to the cpp counterpart, the python arguments have new property
+ # (default_init) and a new argument 'requires_grad', which require some
+ # special handlings.
+ # [old codegen] TODO: because these aren't guaranteed to be 100% faithful
+ # to the original versions in the yaml, this recreation is a potential
+ # source of drift between eager and JIT. Pull this logic out to a shared place.
+
+ has_tensor_input_arg = any(
+ a.type.is_tensor_like() for a in func.arguments.flat_non_out
+ )
+ if any(a.name == "requires_grad" for a in func.schema_order_arguments()):
+ raise ValueError(
+ "argument named requires_grad is reserved, should not explicitly add it in the schema"
+ )
+
+ # [old codegen] this probably won't work if one of the returns is not a tensor,
+ # but it will produce a compile-time error that is obvious.
+ has_tensor_return = any(r.type.is_tensor_like() for r in func.returns)
+
+ name: str = cpp.name(func)
+ is_factory_function = category_override == "factory" or (
+ has_tensor_return and not has_tensor_input_arg
+ )
+ is_like_or_new_function = (
+ category_override in ("new", "like")
+ or name.startswith("new_")
+ or name.endswith("_like")
+ )
+ is_dummy_function = category_override == "dummy"
+
+ tensor_options_args: List[PythonArgument] = []
+ if (is_factory_function or is_like_or_new_function) and not is_dummy_function:
+
+ def topt_default_init(name: str) -> Optional[str]:
+ topt_args = func.arguments.tensor_options
+ if topt_args is None:
+ return None
+ a = getattr(topt_args, name)
+ if a.default is None or a.default == "None":
+ return None
+ return cpp.default_expr(a.default, a.type, symint=False)
+
+ tensor_options_args.append(
+ PythonArgument(
+ name="dtype",
+ type=OptionalType(BaseType(BaseTy.ScalarType)),
+ default="None",
+ default_init=(
+ None if is_like_or_new_function else topt_default_init("dtype")
+ ),
+ )
+ )
+ tensor_options_args.append(
+ PythonArgument(
+ name="layout",
+ type=OptionalType(BaseType(BaseTy.Layout)),
+ default="None",
+ default_init=(
+ None if is_like_or_new_function else topt_default_init("layout")
+ ),
+ )
+ )
+ tensor_options_args.append(
+ PythonArgument(
+ name="device",
+ type=OptionalType(BaseType(BaseTy.Device)),
+ default="None",
+ default_init=(
+ None
+ if is_like_or_new_function
+ else (
+ topt_default_init("device")
+ or "torch::tensors::get_default_device()"
+ )
+ ),
+ )
+ )
+ tensor_options_args.append(
+ PythonArgument(
+ name="pin_memory",
+ type=OptionalType(BaseType(BaseTy.bool)),
+ default="False",
+ default_init=None,
+ )
+ )
+ tensor_options_args.append(
+ PythonArgument(
+ name="requires_grad",
+ type=OptionalType(BaseType(BaseTy.bool)),
+ default="False",
+ default_init=None,
+ )
+ )
+
+ returns = PythonReturns(returns=func.returns)
+
+ return PythonSignature(
+ name=str(func.name.name),
+ input_args=input_args,
+ input_kwargs=input_kwargs,
+ output_args=PythonOutArgument.from_outputs(outputs),
+ tensor_options_args=tuple(tensor_options_args),
+ returns=returns,
+ method=method,
+ )
+
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+#
+# Python Interface
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+
+
+def structseq_fieldnames(returns: Tuple[Return, ...]) -> List[str]:
+ if len(returns) <= 1 or all(r.name is None for r in returns):
+ return []
+ else:
+ if any(r.name is None for r in returns):
+ # When building on Windows, `PyStructSequence_UnnamedField` could not be
+ # resolved by the linker for some reason, which cause error in building:
+ #
+ # python_nn_functions.cpp.obj : error LNK2001: unresolved external symbol
+ # PyStructSequence_UnnamedField
+ #
+ # Thus, at this point in time, we do not support unnamed
+ # fields in structseq; you must either name all fields,
+ # or none of them.
+ raise ValueError("Unnamed field is not supported by codegen")
+
+ return [str(r.name) for r in returns]
+
+
+def argument_type_str_pyi(t: Type) -> str:
+ add_optional = False
+ if isinstance(t, OptionalType):
+ t = t.elem
+ add_optional = True
+
+ if isinstance(t, BaseType):
+ if t.name in [BaseTy.int, BaseTy.DeviceIndex]:
+ ret = "_int"
+ if t.name == BaseTy.SymInt:
+ ret = "Union[_int, SymInt]"
+ elif t.name == BaseTy.float:
+ ret = "_float"
+ elif t.name == BaseTy.str:
+ ret = "str"
+ elif t.name == BaseTy.Scalar:
+ ret = "Union[Number, _complex]"
+ elif t.name == BaseTy.ScalarType:
+ ret = "_dtype"
+ elif t.name == BaseTy.bool:
+ ret = "_bool"
+ elif t.name == BaseTy.QScheme:
+ ret = "_qscheme"
+ elif t.name == BaseTy.Layout:
+ ret = "_layout"
+ elif t.name == BaseTy.Device:
+ ret = "Optional[DeviceLikeType]"
+ elif t.name == BaseTy.MemoryFormat:
+ ret = "memory_format"
+ elif t.name == BaseTy.Dimname:
+ ret = "Union[str, ellipsis, None]"
+ elif t.name == BaseTy.Storage:
+ ret = "Union[Storage, UntypedStorage]"
+ elif t.name in [BaseTy.Tensor, BaseTy.Generator, BaseTy.Stream]:
+ # These python schema type names line up with their function schema names
+ ret = t.name.name
+
+ elif isinstance(t, ListType):
+ if str(t.elem) == "int":
+ ret = "Union[_int, _size]" if t.size is not None else "_size"
+ elif t.is_tensor_like():
+ # TODO: this doesn't seem right...
+ # Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]]
+ # It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]]
+ if isinstance(t.elem, OptionalType):
+ add_optional = True
+ ret = (
+ "Union[Tensor, Tuple[Tensor, ...], List[Tensor]]"
+ if t.size is not None
+ else "Union[Tuple[Tensor, ...], List[Tensor]]"
+ )
+ elif str(t.elem) == "float":
+ ret = "Sequence[_float]"
+ elif str(t.elem) == "SymInt" and t.size is not None:
+ elem = argument_type_str_pyi(t.elem)
+ ret = f"Union[{elem}, Sequence[{elem}]]"
+ else:
+ elem = argument_type_str_pyi(t.elem)
+ ret = f"Sequence[{elem}]"
+
+ else:
+ raise RuntimeError(f"unrecognized type {repr(t)}")
+
+ if add_optional:
+ ret = "Optional[" + ret + "]"
+
+ return ret
+
+
+def return_type_str_pyi(t: Type) -> str:
+ # Where arguments are open to accepting Union, return types should return
+ # concrete types
+
+ if isinstance(t, OptionalType):
+ inner = return_type_str_pyi(t.elem)
+ return f"Optional[{inner}]"
+
+ if isinstance(t, BaseType):
+ if t.name == BaseTy.Device:
+ return "_device"
+ elif t.name == BaseTy.Dimname:
+ ret = "Optional[str]"
+ else:
+ return argument_type_str_pyi(t)
+
+ if isinstance(t, ListType):
+ inner = return_type_str_pyi(t.elem)
+ return f"Tuple[{inner}, ...]"
+
+ return argument_type_str_pyi(t)
+
+
+def returns_structseq_pyi(signature: PythonSignature) -> Optional[Tuple[str, str]]:
+ python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
+ structseq_name = signature.name
+ field_names = structseq_fieldnames(signature.returns.returns)
+ if field_names:
+ # These types are structseq objects which act like named NamedTuples, but
+ # the constructor acts like the constructor of tuple. Using typing.NamedTuple
+ # does not allow us to override __init__.
+ field_names_str = ", ".join(repr(name) for name in field_names)
+ seq_type = f"Tuple[{', '.join(python_returns)}]"
+ structseq_def_lines = [
+ f"class {structseq_name}({seq_type}):",
+ ]
+ for name, typ in zip(field_names, python_returns):
+ structseq_def_lines.extend(
+ [
+ " @property",
+ f" def {name}(self) -> {typ}: ...",
+ ]
+ )
+ structseq_def_lines.extend(
+ [
+ f" def __new__(cls, sequence: {seq_type}): ...",
+ f" n_fields: _int = {len(field_names)}",
+ f" n_sequeunce_fields: _int = {len(field_names)}",
+ " n_unnamed_fields: _int = 0",
+ " def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing",
+ "", # add an extra newline
+ ]
+ )
+ structseq_def = "\n".join(structseq_def_lines)
+ # Example:
+ # structseq_def = (
+ # "class max(Tuple[Tensor, Tensor]):\n"
+ # " @property\n"
+ # " def values(self) -> Tensor: ...\n"
+ # " @property\n"
+ # " def indices(self) -> Tensor: ...\n"
+ # " def __new__(cls, sequence: Tuple[Tensor, Tensor]): ...\n"
+ # " n_fields: _int = 2",
+ # " n_sequeunce_fields: _int = 2",
+ # " n_unnamed_fields: _int = 0",
+ # " def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing",
+ # )
+ return structseq_name, structseq_def
+ return None
+
+
+def returns_str_pyi(signature: PythonSignature) -> str:
+ field_names = structseq_fieldnames(signature.returns.returns)
+ if field_names:
+ return f"torch.return_types.{signature.name}"
+
+ python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
+ if len(python_returns) > 1:
+ return "Tuple[" + ", ".join(python_returns) + "]"
+ if len(python_returns) == 1:
+ return python_returns[0]
+ return "None"
+
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+#
+# C++ Function Dispatch
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+# This section provides APIs to generate the code that does C++ function
+# dispatch. The C++ function call is wrapped by a lambda function.
+# For example:
+#
+# // aten::selu_(Tensor(a!) self) -> Tensor(a!)
+# auto dispatch_selu_ = [](Tensor self) -> Tensor {
+# pybind11::gil_scoped_release no_gil;
+# return at::selu_(self);
+# };
+#
+# The lambda function's signature follows the C++ signature in common
+# cases, e.g.:
+#
+# // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
+# [](const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
+#
+# For out variant the 'out' argument's type is changed from 'Tensor &'
+# to 'Tensor'. It's because when calling the lambda it passes in the
+# PythonArgParser output '_r.tensor(3)', which is stack allocated object
+# and needs to pass by value. Also see comments in 'dispatch_lambda_return_str()'.
+#
+# // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
+# [](Tensor out, const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
+#
+# For multi-output case it can keep using reference type because the
+# PythonArgParser output has been unpacked to local variables, e.g.:
+#
+# // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *,
+# // Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
+# [](Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) -> std::tuple
+#
+# For deprecated python signature, it should follow deprecated python arg order.
+# TODO: This is to keep same byte-for-byte result as the old codegen - maybe unnecessary?
+
+
+def dispatch_lambda_args(
+ ps: PythonSignature, f: NativeFunction, symint: bool = True
+) -> Tuple[DispatchLambdaArgument, ...]:
+ if isinstance(ps, PythonSignatureDeprecated):
+ schema = ps.deprecated_schema
+ else:
+ schema = f.func
+
+ # Start with cpp arguments - dispatch lambda signature always include 'self'
+ cpp_args = cpp.arguments(
+ arguments=schema.arguments,
+ faithful=False,
+ symint=symint,
+ method=False,
+ cpp_no_default_args=f.cpp_no_default_args,
+ )
+ out_args: Set[str] = {a.name for a in schema.arguments.out}
+
+ # Convert from cpp argument to lambda argument
+ def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument:
+ type_str = cpp_arg.type
+ is_out_arg = cpp_arg.name in out_args
+ if ps.method and cpp_arg.name == "self":
+ # For method's 'self', we can use 'const Tensor &' and simply ignore mutability!
+ type_str = "const at::Tensor &"
+ else:
+ # For other cases we need prevent dangling refs to temps (unless it's
+ # unpacked scattered output)
+ # The reason is explained in the comments above and in 'dispatch_lambda_return_str()'.
+ # TODO: avoid this special handling?
+ ensure_temp_safe = len(out_args) <= 1 or not is_out_arg
+ if ensure_temp_safe:
+ type_str = {
+ "at::Tensor &": "at::Tensor",
+ }.get(type_str, type_str)
+ return DispatchLambdaArgument(
+ name=cpp_arg.name,
+ type_str=type_str,
+ is_out_arg=is_out_arg,
+ )
+
+ return tuple(map(dispatch_lambda_arg, cpp_args))
+
+
+# [old codegen] XXX: if you got here because of an assertion failure, it doesn't mean
+# it's enough to just extend the list here. Before you do this, make sure
+# to add an appropriate wrap() overload in torch/csrc/autograd/utils/wrap_outputs.h.
+SUPPORTED_RETURN_TYPES = {
+ "at::Tensor",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple",
+ "::std::tuple>",
+ "::std::vector",
+ # Needed for flash attention forw/backward
+ "::std::tuple",
+ "at::Scalar",
+ "bool",
+ "int64_t",
+ "void*",
+ "void",
+ "at::QScheme",
+ "double",
+ "at::IntArrayRef",
+ "at::ScalarType",
+ "at::Stream",
+}
+
+
+def dispatch_lambda_return_str(f: NativeFunction) -> str:
+ # [old codegen] Remove type annotation (e.g. 'Tensor' rather than 'Tensor &')
+ # because the dispatch lambdas take mutable arguments *by value*, not
+ # by reference. If you then return a reference to such an argument, you
+ # will now have a pointer to a dangling stack entry. Not good.
+ #
+ # You want:
+ #
+ # auto dispatch_selu_ = [](Tensor self) -> Tensor { ...; return at::selu_(self); };
+ # ^^^^^^
+ #
+ # *not*
+ #
+ # auto dispatch_selu_ = [](Tensor self) -> Tensor& { ...; return at::selu_(self); };
+ # ^^^^^^^
+ #
+ # (NB: We can't make dispatch_selu_ take Tensor&, because the enclosing
+ # codegen looks like dispatch_selu_(_r.tensor(0)), and you can't take a
+ # mutable reference to temporary. Maybe we could assign it to a
+ # variable itself.)
+ returns_without_annotation = tuple(
+ Return(r.name, r.type, None) for r in f.func.returns
+ )
+ return_str = cpp.returns_type(returns_without_annotation, symint=True).cpp_type()
+ if return_str not in SUPPORTED_RETURN_TYPES:
+ raise RuntimeError(f"{f.func.name} returns unsupported type {return_str}")
+ return return_str
+
+
+def cpp_dispatch_target(f: NativeFunction) -> str:
+ symint = f.func.has_symint()
+ name = cpp.name(f.func, symint_overload=symint)
+ if Variant.method in f.variants:
+ return f"self.{name}"
+ if Variant.function in f.variants:
+ if has_tensor_options(f) or f.func.name.name.base.endswith("_like"):
+ namespace = "torch"
+ else:
+ namespace = "at"
+ return f"{namespace}::{name}"
+ raise RuntimeError(f"could not dispatch, neither function nor method: {f.func}")
+
+
+def cpp_dispatch_exprs(
+ f: NativeFunction,
+ *,
+ python_signature: Optional[PythonSignature] = None,
+) -> Tuple[str, ...]:
+ cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments()
+
+ exprs: Tuple[str, ...] = tuple()
+ if not isinstance(python_signature, PythonSignatureDeprecated):
+ # By default the exprs are consistent with the C++ signature.
+ exprs = tuple(a.name for a in cpp_args)
+ else:
+ # For deprecated python signature we may need fill in some constants.
+ exprs = tuple(
+ filter(
+ lambda n: n != "out" or f.func.is_out_fn(),
+ python_signature.deprecated_args_exprs,
+ )
+ )
+
+ if Variant.method in f.variants:
+ exprs = tuple(filter("self".__ne__, exprs))
+
+ return exprs
+
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+#
+# Python / C++ Args Binding
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+
+
+# We explicitly enumerate the PythonArgParser unpacking methods for all
+# supported types. This might be more verbose than necessary, partially
+# because of the irregularity of unpacking method naming, partially
+# because we want to mimic the old codegen behavior - to reject
+# unexpected and/or unsupported cases which the old codegen rejects.
+# For certain cases it is intentionally more restrictive than necessary,
+# e.g.: it doesn't accepts doublelist with definite size.
+def arg_parser_unpack_method(
+ t: Type, default: Optional[str], default_init: Optional[str], *, symint: bool = True
+) -> str:
+ has_default_init = default_init is not None
+ if has_default_init and str(t) not in (
+ "ScalarType?",
+ "ScalarType",
+ "Device",
+ "Device?",
+ "Layout",
+ "Layout?",
+ "bool",
+ "bool?",
+ ):
+ raise RuntimeError(f"type '{t}' does not supported unpacking with default")
+
+ if isinstance(t, BaseType):
+ if t.name in [
+ BaseTy.Tensor,
+ BaseTy.Stream,
+ BaseTy.Storage,
+ BaseTy.Scalar,
+ BaseTy.Dimname,
+ ]:
+ # These unpack methods line up with their schema names
+ return t.name.name.lower()
+ elif t.name == BaseTy.ScalarType:
+ return "scalartypeWithDefault" if has_default_init else "scalartype"
+ elif t.name == BaseTy.Device:
+ return "deviceWithDefault" if has_default_init else "device"
+ elif t.name == BaseTy.DeviceIndex:
+ return "toInt64"
+ elif t.name == BaseTy.int:
+ return "toInt64"
+ elif t.name == BaseTy.SymInt:
+ return "toSymInt" if symint else "toInt64"
+ elif t.name == BaseTy.bool:
+ return "toBoolWithDefault" if has_default_init else "toBool"
+ elif t.name == BaseTy.float:
+ return "toDouble"
+ elif t.name == BaseTy.str:
+ return "stringView"
+ elif t.name == BaseTy.Layout:
+ return "layoutWithDefault" if has_default_init else "layout"
+ elif t.name == BaseTy.MemoryFormat:
+ return "memoryformat"
+
+ elif isinstance(t, OptionalType):
+ if str(t.elem) == "Tensor":
+ return "optionalTensor"
+ elif str(t.elem) == "Generator":
+ return "generator"
+ elif str(t.elem) == "Dimname[]":
+ return "toDimnameListOptional"
+ elif not has_default_init and default in (None, "None", "c10::nullopt"):
+ # If default is None: append 'Optional' to elem's unpacking method
+ return (
+ arg_parser_unpack_method(t.elem, None, None, symint=symint) + "Optional"
+ )
+ else:
+ # Otherwise, load as underlying type with default
+ return arg_parser_unpack_method(
+ t.elem, default, default_init, symint=symint
+ )
+
+ elif isinstance(t, ListType):
+ if str(t.elem) == "Tensor":
+ # accept and use definite size
+ return f"tensorlist_n<{t.size}>" if t.size is not None else "tensorlist"
+ elif str(t.elem) == "Tensor?":
+ return "list_of_optional_tensors"
+ elif str(t.elem) == "Dimname":
+ # accept definite size
+ return "dimnamelist"
+ elif str(t.elem) == "int":
+ # accept definite size
+ return "intlist"
+ elif str(t.elem) == "float":
+ return "doublelist"
+ elif str(t.elem) == "SymInt":
+ # accept definite size
+ return "symintlist" if symint else "intlist"
+ elif str(t.elem) == "Scalar":
+ return "scalarlist"
+ raise RuntimeError(f"type '{t}' is not supported by PythonArgParser")
+
+
+# Return RHS expression for python argument using PythonArgParser output.
+# e.g. for arg name 'foo', arg type 'bool', arg_index = 2, returns '_r.toBool(2)'
+def arg_parser_output_expr(
+ arg_index: int, a: PythonArgument, *, symint: bool = True
+) -> PythonArgParserOutputExpr:
+ has_default = a.default_init is not None
+ unpack_method = arg_parser_unpack_method(
+ t=a.type, default=a.default, default_init=a.default_init, symint=symint
+ )
+ default = f", {a.default_init}" if has_default else ""
+ expr = f"_r.{unpack_method}({arg_index}{default})"
+
+ return PythonArgParserOutputExpr(
+ name=a.name,
+ expr=expr,
+ index=arg_index,
+ argument=a,
+ )
+
+
+# Returns a map with key = arg_name and value = PythonArgParserOutputExpr.
+def arg_parser_output_exprs(
+ ps: PythonSignature, f: NativeFunction, *, symint: bool = True
+) -> Dict[str, PythonArgParserOutputExpr]:
+ return {
+ e.name: e
+ for i, a in enumerate(ps.arguments())
+ for e in (arg_parser_output_expr(i, a, symint=symint),)
+ }
+
+
+# argument name to type for scattered tensor options fields
+TENSOR_OPTIONS_FIELDS = {
+ "dtype": "ScalarType?",
+ "device": "Device?",
+ "layout": "Layout?",
+ "pin_memory": "bool?",
+ "requires_grad": "bool?",
+}
+
+
+# bind arg parser outputs (python args) with dispatch lambda arguments (c++ args).
+def dispatch_lambda_exprs(
+ ps: PythonSignature, f: NativeFunction, *, symint: bool = True
+) -> DispatchLambdaArgumentExprs:
+ # This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing
+ # 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser
+ # outputs.
+ arg_parser_outputs = arg_parser_output_exprs(ps, f, symint=symint)
+ lambda_args = dispatch_lambda_args(ps, f, symint=symint)
+ inits: List[str] = []
+ lambda_args_exprs: Dict[str, str] = {}
+
+ has_toptions = has_tensor_options(f)
+
+ # 1. special inits/unpacking to provide binding exprs for lambda arguments.
+ for a in ps.arguments(skip_tensor_options=True):
+ name = a.name
+ arg_parser_expr = arg_parser_outputs[a.name].expr
+
+ if has_toptions and name == "self":
+ # TODO: why this needs to be special case?
+ inits.extend(
+ [
+ f"auto self = {arg_parser_expr};",
+ ]
+ )
+ lambda_args_exprs[name] = name
+ elif (
+ isinstance(a, PythonOutArgument)
+ and len(a.outputs) > 1
+ and f.func.is_out_fn()
+ ):
+ inits.extend(
+ [
+ f"auto out = {arg_parser_expr};",
+ ]
+ )
+ for i, out_arg in enumerate(a.outputs):
+ lambda_args_exprs[out_arg.name] = f"out[{i}]"
+ elif str(a.type) == "Dimname[]?":
+ # [old codegen]
+ # TODO: make this part of something more general, or get rid of it.
+ # optional> are special. The PythonArgParser returns an
+ # optional>, which cannot be implicitly converted to
+ # optional>. One needs to unwrap the optional and rewrap.
+ inits.extend(
+ [
+ f"auto __{name} = {arg_parser_expr};",
+ f"c10::optional {name} = __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;", # noqa: B950
+ ]
+ )
+ lambda_args_exprs[name] = name
+ else:
+ # default case - directly using PythonArgParser output expr
+ lambda_args_exprs[name] = arg_parser_expr
+
+ # method's self is passed directly to python binding, rather than parsed
+ if ps.method:
+ lambda_args_exprs["self"] = "self"
+
+ # 2. special packing/checking for TensorOptions.
+ tensor_options_args_names = [a.name for a in ps.tensor_options_args]
+ if has_toptions:
+ if f.func.is_out_fn():
+ raise RuntimeError(f"{f.func}: tensor options with output arg")
+ for a in ps.tensor_options_args:
+ if a.name not in TENSOR_OPTIONS_FIELDS:
+ raise RuntimeError(
+ f"{f.func}: unrecognized tensor options field '{a.name}' in python binding arguments"
+ )
+ if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name):
+ raise RuntimeError(
+ f"{f.func}: unrecognized type '{str(a.type)}' for tensor options field '{a.name}'"
+ )
+ if not all(
+ a in tensor_options_args_names for a in TENSOR_OPTIONS_FIELDS.keys()
+ ):
+ raise RuntimeError(
+ f"{f.func}: incomplete tensor options args: {tensor_options_args_names}"
+ )
+
+ inits.append(
+ f"""\
+const auto options = TensorOptions()
+ .dtype({arg_parser_outputs['dtype'].expr})
+ .device({arg_parser_outputs['device'].expr})
+ .layout({arg_parser_outputs['layout'].expr})
+ .requires_grad({arg_parser_outputs['requires_grad'].expr})
+ .pinned_memory({arg_parser_outputs['pin_memory'].expr});
+torch::utils::maybe_initialize_device(options);
+"""
+ )
+ lambda_args_exprs["options"] = "options"
+
+ # 3. special case - access scattered TensorOptions fields without packing
+ # TODO: maybe move to the generator side as it's not related to binding.
+ if not has_toptions and tensor_options_args_names:
+ if "dtype" in tensor_options_args_names:
+ # we're an output-arg variant, check these args against output tensor
+ if not f.func.is_out_fn():
+ raise RuntimeError(
+ f"{f.func}: dtype in tensor_options_args without output arg"
+ )
+ if not all(a in tensor_options_args_names for a in ("layout", "device")):
+ raise RuntimeError(
+ f"{f.func}: incomplete tensor options for output check"
+ )
+
+ inits.append(
+ f"""\
+check_out_type_matches({arg_parser_outputs['out'].expr}, {arg_parser_outputs['dtype'].expr},
+ {arg_parser_outputs['dtype'].is_none_expr}, {arg_parser_outputs['layout'].expr},
+ {arg_parser_outputs['device'].expr}, {arg_parser_outputs['device'].is_none_expr});
+"""
+ )
+ # we'll set requires_grad on outgoing tensor
+ if "requires_grad" not in tensor_options_args_names:
+ raise RuntimeError(
+ f'{f.func}: expected "requires_grad" in tensor_options_args absent, but found [{tensor_options_args_names}]'
+ )
+
+ return DispatchLambdaArgumentExprs(
+ exprs=tuple(lambda_args_exprs[a.name] for a in lambda_args),
+ inits=inits,
+ )
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3e2f9a431b45c7ff1b0357dcb0e24a508a38a87
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__init__.py
@@ -0,0 +1,3 @@
+from .types import *
+from .types_base import *
+from .signatures import * # isort:skip
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/signatures.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/signatures.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4c93b283f8f4ff57e879cd49313b1439c68e725
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/signatures.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/types_base.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/types_base.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc3d77421b2f216648979a259c4b87a64d240543
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/types_base.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/signatures.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/signatures.py
new file mode 100644
index 0000000000000000000000000000000000000000..f21ab29178e5ca46777b873fc5c1ef12f32f6443
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/signatures.py
@@ -0,0 +1,423 @@
+from dataclasses import dataclass
+
+from typing import Iterator, List, Optional, Sequence, Set, Tuple, Union
+
+from torchgen.model import (
+ BackendIndex,
+ FunctionSchema,
+ NativeFunction,
+ NativeFunctionsGroup,
+ NativeFunctionsViewGroup,
+)
+
+from .types_base import Binding, CType, Expr
+
+
+@dataclass(frozen=True)
+class CppSignature:
+ """
+ A CppSignature represents a single overload in the C++ API. For
+ any given function schema, there may be multiple CppSignatures
+ corresponding to it, based on how we desugar to C++. See also
+ CppSignatureGroup.
+ """
+
+ # The schema this signature is derived from
+ func: FunctionSchema
+
+ # Is this a C++ signature for a method, i.e. Tensor::my_op(...)?
+ method: bool
+
+ # Is this a faithful C++ signature (i.e. following the JIT schema) or a convenience API
+ # (i.e. with a potential TensorOptions argument and out arguments in the front)
+ faithful: bool
+
+ # Is this a symint C++ signature. For BC reasons, functions that take
+ # SymInts still present as int64_t in C++, and the SymInt variant is
+ # offered at a different overload name
+ #
+ # NB: If a function RETURNS a SymInt, this is ALWAYS false
+ symint: bool
+
+ # The set of C++ arguments which should not have defaults applied to them
+ cpp_no_default_args: Set[str]
+
+ # Is this a fallback C++ binding? Fallback bindings are enabled by
+ # manual_cpp_binding: True and are alternate, non-public API that
+ # lets manual C++ binding implementors access the binding that would
+ # have been automatically generated
+ fallback_binding: bool = False
+
+ # Return the unpacked argument structure of this signature,
+ # discarding information about which arguments are semantically
+ # related to each other.
+ def arguments(self) -> Sequence[Binding]:
+ return cpp.arguments(
+ self.func.arguments,
+ faithful=self.faithful,
+ symint=self.symint,
+ method=self.method,
+ cpp_no_default_args=self.cpp_no_default_args,
+ )
+
+ def name(self, *, suppress_symint_suffix: bool = False) -> str:
+ n = cpp.name(
+ self.func,
+ faithful_name_for_out_overloads=self.faithful,
+ symint_overload=False if suppress_symint_suffix else self.symint,
+ )
+ if self.fallback_binding:
+ n = f"__dispatch_{n}"
+ return n
+
+ # Render the C++ declaration for this signature
+ def decl(
+ self,
+ *,
+ name: Optional[str] = None,
+ prefix: str = "",
+ is_redispatching_fn: bool = False,
+ suppress_symint_suffix: bool = False,
+ ) -> str:
+ returns_type = cpp.returns_type(
+ self.func.returns, symint=self.symint
+ ).cpp_type()
+ cpp_args = [a.decl() for a in self.arguments()]
+ if is_redispatching_fn:
+ cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
+ cpp_args_str = ", ".join(cpp_args)
+ if name is None:
+ name = prefix + self.name(suppress_symint_suffix=suppress_symint_suffix)
+ return f"{returns_type} {name}({cpp_args_str})"
+
+ # Render the C++ definition for this signature, not including
+ # the body (with curly braces)
+ def defn(
+ self,
+ *,
+ name: Optional[str] = None,
+ prefix: str = "",
+ is_redispatching_fn: bool = False,
+ ) -> str:
+ returns_type = cpp.returns_type(
+ self.func.returns, symint=self.symint
+ ).cpp_type()
+ cpp_args = [a.defn() for a in self.arguments()]
+ if is_redispatching_fn:
+ cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
+ cpp_args_str = ", ".join(cpp_args)
+ if name is None:
+ name = prefix + self.name()
+ return f"{returns_type} {name}({cpp_args_str})"
+
+ def ptr_type(self) -> str:
+ args_types_str = ", ".join(a.type for a in self.arguments())
+ return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_types_str})"
+
+ # Return the C++ function type, e.g., something like int(bool)
+ def type(self) -> str:
+ args_types_str = ", ".join(a.type for a in self.arguments())
+ return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} ({args_types_str})"
+
+
+# Represents group of all CppSignatures associated with a
+# FunctionSchema. Right now, that's the regular, user-visible
+# signature, as well as a "faithful" signature which doesn't
+# have grouping.
+@dataclass(frozen=True)
+class CppSignatureGroup:
+ func: FunctionSchema
+ signature: CppSignature
+ faithful_signature: Optional[CppSignature]
+ symint_signature: Optional[CppSignature]
+ symint_faithful_signature: Optional[CppSignature]
+
+ def most_faithful_signature(self) -> CppSignature:
+ if self.faithful_signature:
+ return self.faithful_signature
+ else:
+ return self.signature
+
+ def signatures(self, *, symint: bool = True) -> Iterator[CppSignature]:
+ yield self.signature
+ if self.faithful_signature:
+ yield self.faithful_signature
+ if symint:
+ if self.symint_signature:
+ yield self.symint_signature
+ if self.symint_faithful_signature:
+ yield self.symint_faithful_signature
+
+ @staticmethod
+ def from_native_function(
+ f: NativeFunction, *, method: bool, fallback_binding: bool = False
+ ) -> "CppSignatureGroup":
+ func = f.func
+
+ def make_sig(*, faithful: bool, symint: bool) -> CppSignature:
+ return CppSignature(
+ func=func,
+ faithful=faithful,
+ symint=symint,
+ method=method,
+ fallback_binding=fallback_binding,
+ cpp_no_default_args=f.cpp_no_default_args,
+ )
+
+ def make_sigs(*, symint: bool) -> Tuple[CppSignature, Optional[CppSignature]]:
+ faithful_signature: Optional[CppSignature] = None
+ if func.arguments.tensor_options is not None or len(func.arguments.out) > 0:
+ faithful_signature = make_sig(faithful=True, symint=symint)
+ signature = make_sig(faithful=False, symint=symint)
+ return signature, faithful_signature
+
+ signature, faithful_signature = make_sigs(symint=False)
+ symint_signature: Optional[CppSignature] = None
+ symint_faithful_signature: Optional[CppSignature] = None
+ if func.has_symint():
+ symint_signature, symint_faithful_signature = make_sigs(symint=True)
+
+ return CppSignatureGroup(
+ func=func,
+ signature=signature,
+ faithful_signature=faithful_signature,
+ symint_signature=symint_signature,
+ symint_faithful_signature=symint_faithful_signature,
+ )
+
+
+@dataclass(frozen=True)
+class DispatcherSignature:
+ # The schema this signature is derived from
+ func: FunctionSchema
+
+ # Allows you to prepend an arbitrary prefix to the signature name.
+ # This is useful for parts of the codegen that generate wrappers around kernels,
+ # and need to avoid naming collisions.
+ prefix: str = ""
+
+ symint: bool = True
+
+ def arguments(self) -> List[Binding]:
+ return dispatcher.arguments(self.func, symint=self.symint)
+
+ def name(self) -> str:
+ return self.prefix + dispatcher.name(self.func)
+
+ def decl(self, name: Optional[str] = None) -> str:
+ args_str = ", ".join(a.decl() for a in self.arguments())
+ if name is None:
+ name = self.name()
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
+
+ def defn(
+ self, name: Optional[str] = None, *, is_redispatching_fn: bool = False
+ ) -> str:
+ args = [a.defn() for a in self.arguments()]
+ if is_redispatching_fn:
+ args = ["c10::DispatchKeySet dispatchKeySet"] + args
+ args_str = ", ".join(args)
+ if name is None:
+ name = self.name()
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
+
+ def exprs(self) -> List[Expr]:
+ return [Expr(a.name, a.nctype) for a in self.arguments()]
+
+ def returns_type(self) -> CType:
+ return dispatcher.returns_type(self.func.returns, symint=self.symint)
+
+ def ptr_type(self) -> str:
+ dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
+ return f"{self.returns_type().cpp_type()} (*)({dispatcher_args_types_str})"
+
+ # Return the C++ function type, e.g., something like int(bool)
+ def type(self) -> str:
+ dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
+ return f"{self.returns_type().cpp_type()} ({dispatcher_args_types_str})"
+
+ @staticmethod
+ def from_schema(
+ func: FunctionSchema, *, prefix: str = "", symint: bool = True
+ ) -> "DispatcherSignature":
+ return DispatcherSignature(func, prefix, symint)
+
+
+@dataclass(frozen=True)
+class NativeSignature:
+ # The schema this signature is derived from
+ func: FunctionSchema
+
+ symint: bool
+
+ prefix: str = ""
+
+ def name(self) -> str:
+ return self.prefix + native.name(self.func)
+
+ def decl(self, name: Optional[str] = None) -> str:
+ args_str = ", ".join(a.decl() for a in self.arguments())
+ if name is None:
+ name = self.name()
+ return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
+
+ def defn(self, name: Optional[str] = None) -> str:
+ args_str = ", ".join(a.defn() for a in self.arguments())
+ if name is None:
+ name = self.name()
+ return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
+
+ def ptr_type(self) -> str:
+ # don't include defaults in type signature!
+ args_str = ", ".join(a.defn() for a in self.arguments())
+ return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_str})"
+
+ def arguments(self) -> List[Binding]:
+ return native.arguments(self.func, symint=self.symint)
+
+ def returns_type(self) -> CType:
+ return native.returns_type(self.func.returns, symint=self.symint)
+
+ def dispatcher_exprs(self) -> List[Expr]:
+ return translate.translate(
+ self.arguments(), dispatcher.arguments(self.func), method=False
+ )
+
+
+@dataclass(frozen=True)
+class ViewInverseSignature:
+ g: NativeFunctionsViewGroup
+
+ def name(self) -> str:
+ return functionalization.reverse_name(self.g.view, include_namespace=False)
+
+ def decl(self) -> str:
+ return_type = functionalization.returns_type(self.g.view.func)
+ decls = [
+ a.decl()
+ for a in functionalization.inner_arguments(
+ self.g.view.func, is_reverse=True
+ )
+ ]
+ return f"static {return_type.cpp_type()} {self.name()}({', '.join(decls)});"
+
+
+@dataclass(frozen=True)
+class FunctionalizationLambda:
+ g: NativeFunctionsViewGroup
+
+ # are we generating the forward lambda or the reverse lambda?
+ is_reverse: bool
+
+ def captures(self) -> List[Expr]:
+ # The lambda lives inside of a kernel following the dispatcher API, so its outer context is the dispatcher arguments
+ # We also need to read the "reapply views" TLS at the time that the functionalization kernel was executed,
+ # and plumb it into the lambda.
+ outer_ctx = dispatcher.arguments(self.g.view.func) + [
+ functionalization.reapply_views_binding,
+ functionalization.inverse_return_mode_binding,
+ ]
+ capture_bindings = functionalization.capture_arguments(
+ self.g.view.func, is_reverse=self.is_reverse
+ )
+ # allow_expensive_conversions is set because we want to convert
+ # some reference types (IntArrayRef) to value types (vector).
+ capture_exprs = translate.translate(
+ outer_ctx, capture_bindings, method=False, allow_expensive_conversions=True
+ )
+ return capture_exprs
+
+ def decl(self) -> str:
+ return_type = functionalization.returns_type(self.g.view.func)
+ capture_str = ", ".join(
+ f"{val.type.name} = {val.expr}" for val in self.captures()
+ )
+ decls = [
+ a.decl()
+ for a in functionalization.outer_arguments(is_reverse=self.is_reverse)
+ ]
+ return f"[{capture_str}]({', '.join(decls)}) -> {return_type.cpp_type()}"
+
+ def inner_call(self, *, reapply_views: Optional[bool] = None) -> str:
+ inner_call_name = functionalization.name(
+ self.g,
+ is_reverse=self.is_reverse,
+ include_namespace=True,
+ reapply_views=reapply_views,
+ )
+
+ arg_ctx = functionalization.outer_arguments(is_reverse=self.is_reverse)
+ capture_ctx = functionalization.capture_arguments(
+ self.g.view.func, is_reverse=self.is_reverse
+ )
+ full_ctx = arg_ctx + capture_ctx
+
+ assert self.g.view_copy is not None
+ call_bindings = functionalization.inner_arguments(
+ self.g.view_copy.func, is_reverse=self.is_reverse
+ )
+ maybe_index = functionalization.inner_call_index(self.g.view_copy.func)
+ call_exprs = [
+ e.expr for e in translate.translate(full_ctx, call_bindings, method=False)
+ ]
+ if not self.is_reverse and maybe_index is not None:
+ return f'{inner_call_name}({", ".join(call_exprs)})[{maybe_index.name}];'
+ else:
+ return f'{inner_call_name}({", ".join(call_exprs)});'
+
+ @staticmethod
+ def from_func(
+ g: NativeFunctionsViewGroup, *, is_reverse: bool
+ ) -> "FunctionalizationLambda":
+ return FunctionalizationLambda(g, is_reverse)
+
+
+@dataclass(frozen=True)
+class StructuredImplSignature:
+ g: NativeFunctionsGroup
+ name: str
+
+ def defn(self, name: Optional[str] = None) -> str:
+ args_str = ", ".join(a.defn() for a in self.arguments())
+ return f"TORCH_IMPL_FUNC({self.name})({args_str})"
+
+ def arguments(self) -> List[Binding]:
+ return structured.impl_arguments(self.g)
+
+
+# Helper functions
+
+
+def kernel_signature(
+ f: NativeFunction, backend_index: BackendIndex, *, prefix: str = ""
+) -> Union["NativeSignature", "DispatcherSignature"]:
+ # Note [External Backends Follow Dispatcher API]
+ # Kernel signatures for in-tree backends follow the "native" API,
+ # while kernels for out-of-tree backends follow the dispatcher API.
+ # See the comments in `native.py` for details, but historically there have been
+ # some small differences in schema convention between them and the Dispatcher API.
+ # Any differences that require translating between the two will results in a runtime cost,
+ # so we'd like to keep the differences as small as possible.
+ # With external backends, we'd like to enforce that they write their kernels with schemas
+ # that match the Dispatcher API directly, if they can.
+ meta = backend_index.get_kernel(f)
+ symint = meta is not None and meta.supports_symint()
+ if symint:
+ assert (
+ f.func.has_symint()
+ ), f"attempted to define symint kernel for {backend_index.dispatch_key} without SymInt in schema"
+ if backend_index.external:
+ return DispatcherSignature.from_schema(f.func, prefix=prefix, symint=symint)
+ else:
+ return NativeSignature(f.func, prefix=prefix, symint=symint)
+
+
+# Functions only, no types
+from torchgen.api import (
+ cpp,
+ dispatcher,
+ functionalization,
+ native,
+ structured,
+ translate,
+)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/types_base.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/types_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f8561e49abe6bc4818ed388c882e07243c665cb
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/types_base.py
@@ -0,0 +1,270 @@
+"""
+Where should I add a new type? `types_base.py` vs `types.py`
+
+This file defines data model classes for torchgen typing system, as well as some base types such as int32_t.
+
+`types.py` defines ATen Tensor type and some c10 types, along with signatures that use these types.
+
+The difference between these two files, is `types_base.py` should be implementation-agnostic, meaning it shouldn't
+contain any type definition that is tight to a specific C++ library (e.g., ATen), so that it can be easily reused
+if we want to generate code for another C++ library.
+
+Add new types to `types.py` if these types are ATen/c10 related.
+Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
+"""
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from enum import auto, Enum
+from typing import List, Optional, Union
+
+from torchgen.model import Argument, SelfArgument, TensorOptionsArguments
+
+# An ArgName is just the str name of the argument in schema;
+# but in some special circumstances, we may add a little extra
+# context. The Enum SpecialArgName covers all of these cases;
+# grep for their construction sites to see when they can occur.
+
+
+class SpecialArgName(Enum):
+ possibly_redundant_memory_format = auto()
+
+
+ArgName = Union[str, SpecialArgName]
+
+
+# This class shouldn't be created directly; instead, use/create one of the singletons below.
+@dataclass(frozen=True)
+class BaseCppType:
+ ns: Optional[str]
+ name: str
+
+ def __str__(self) -> str:
+ if self.ns is None or self.ns == "":
+ return self.name
+ return f"{self.ns}::{self.name}"
+
+
+# The set of all non-templated, valid, fully-qualified names of C++ types that are used in the codegen.
+# Templated types get their own dataclass, mainly to make namespace parsing easier.
+byteT = BaseCppType("", "uint8_t")
+charT = BaseCppType("", "int8_t")
+shortT = BaseCppType("", "int16_t")
+# It would be more symmetric for this to be called intT, but it easy to mix
+# this up with JIT int (which is int64_t in C++), so we intentionally don't
+# define intT to make it obvious when you've stuffed it up
+int32T = BaseCppType("", "int32_t")
+longT = BaseCppType("", "int64_t")
+doubleT = BaseCppType("", "double")
+floatT = BaseCppType("", "float")
+boolT = BaseCppType("", "bool")
+voidT = BaseCppType("", "void")
+
+
+class CType(ABC):
+ @abstractmethod
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ raise NotImplementedError
+
+ @abstractmethod
+ def cpp_type_registration_declarations(self) -> str:
+ raise NotImplementedError
+
+ @abstractmethod
+ def remove_const_ref(self) -> "CType":
+ return self
+
+
+@dataclass(frozen=True)
+class BaseCType(CType):
+ type: BaseCppType
+
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ return str(self.type)
+
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
+ # TODO: Kill this when we eventually remove it!
+ def cpp_type_registration_declarations(self) -> str:
+ return str(self.type).replace("at::", "")
+
+ def remove_const_ref(self) -> "CType":
+ return self
+
+
+@dataclass(frozen=True)
+class ConstRefCType(CType):
+ elem: "CType"
+
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ if strip_ref:
+ return self.elem.cpp_type(strip_ref=strip_ref)
+ return f"const {self.elem.cpp_type()} &"
+
+ def cpp_type_registration_declarations(self) -> str:
+ return f"const {self.elem.cpp_type_registration_declarations()} &"
+
+ def remove_const_ref(self) -> "CType":
+ return self.elem.remove_const_ref()
+
+
+@dataclass(frozen=True)
+class VectorCType(CType):
+ elem: "CType"
+
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ # Do not pass `strip_ref` recursively.
+ return f"::std::vector<{self.elem.cpp_type()}>"
+
+ def cpp_type_registration_declarations(self) -> str:
+ return f"::std::vector<{self.elem.cpp_type_registration_declarations()}>"
+
+ def remove_const_ref(self) -> "CType":
+ return VectorCType(self.elem.remove_const_ref())
+
+
+@dataclass(frozen=True)
+class ArrayCType(CType):
+ elem: "CType"
+ size: int
+
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ # Do not pass `strip_ref` recursively.
+ return f"::std::array<{self.elem.cpp_type()},{self.size}>"
+
+ def cpp_type_registration_declarations(self) -> str:
+ return f"::std::array<{self.elem.cpp_type_registration_declarations()},{self.size}>"
+
+ def remove_const_ref(self) -> "CType":
+ return ArrayCType(self.elem.remove_const_ref(), self.size)
+
+
+@dataclass(frozen=True)
+class TupleCType(CType):
+ elems: List["CType"]
+
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ # Do not pass `strip_ref` recursively.
+ return f'::std::tuple<{",".join([e.cpp_type() for e in self.elems])}>'
+
+ def cpp_type_registration_declarations(self) -> str:
+ return f'::std::tuple<{",".join([e.cpp_type_registration_declarations() for e in self.elems])}>'
+
+ def remove_const_ref(self) -> "CType":
+ return TupleCType([e.remove_const_ref() for e in self.elems])
+
+
+@dataclass(frozen=True)
+class MutRefCType(CType):
+ elem: "CType"
+
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ if strip_ref:
+ return self.elem.cpp_type(strip_ref=strip_ref)
+ return f"{self.elem.cpp_type()} &"
+
+ def cpp_type_registration_declarations(self) -> str:
+ return f"{self.elem.cpp_type_registration_declarations()} &"
+
+ def remove_const_ref(self) -> "CType":
+ return self.elem.remove_const_ref()
+
+
+# A NamedCType is short for Named C++ semantic type. A NamedCType represents a C++ type, plus
+# semantic information about what it represents. For example, consider the
+# argument "bool pin_memory"; its normal C++ type is "bool", but its C++
+# semantic type also keeps track that this represents a "pin_memory"; you can't
+# just use a random other boolean in a context where you need a "pin_memory"!
+#
+
+
+@dataclass(frozen=True)
+class NamedCType:
+ name: ArgName
+ type: CType
+
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
+ return self.type.cpp_type(strip_ref=strip_ref)
+
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
+ # TODO: Kill this when we eventually remove it!
+ def cpp_type_registration_declarations(self) -> str:
+ return self.type.cpp_type_registration_declarations()
+
+ def remove_const_ref(self) -> "NamedCType":
+ return NamedCType(self.name, self.type.remove_const_ref())
+
+ def with_name(self, name: str) -> "NamedCType":
+ return NamedCType(name, self.type)
+
+
+# A binding represents any C++ binding site for a formal parameter.
+# We don't distinguish between binding sites for different APIs;
+# instead, all of the important distinctions are encoded in CType,
+# which you can use to figure out if a given Binding is appropriate
+# for use in another context. (See torchgen.api.translate)
+
+
+@dataclass(frozen=True)
+class Binding:
+ name: str
+ nctype: NamedCType
+ argument: Union[Argument, TensorOptionsArguments, SelfArgument]
+ # TODO: maybe don't represent default here
+ default: Optional[str] = None
+
+ def rename(self, name: str) -> "Binding":
+ return Binding(
+ name=name,
+ nctype=self.nctype,
+ argument=self.argument,
+ default=self.default,
+ )
+
+ @property
+ def type(self) -> str:
+ return self.nctype.cpp_type()
+
+ def no_default(self) -> "Binding":
+ return Binding(
+ name=self.name,
+ nctype=self.nctype,
+ default=None,
+ argument=self.argument,
+ )
+
+ def decl(self, *, func_ptr_cast: bool = False) -> str:
+ mb_default = ""
+ if self.default is not None:
+ mb_default = f"={self.default}"
+
+ # casting only needs to know the type
+ if func_ptr_cast:
+ return f"{self.type}"
+ else:
+ return f"{self.type} {self.name}{mb_default}"
+
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
+ # TODO: Kill this when we eventually remove it!
+ def decl_registration_declarations(self) -> str:
+ type_s = self.nctype.cpp_type_registration_declarations()
+ mb_default = ""
+ if self.default is not None:
+ mb_default = f"={self.default}"
+ return f"{type_s} {self.name}{mb_default}"
+
+ def defn(self) -> str:
+ return f"{self.type} {self.name}"
+
+ def with_name(self, name: str) -> "Binding":
+ return Binding(
+ name=name, nctype=self.nctype, argument=self.argument, default=self.default
+ )
+
+
+# An Expr is a C++ expression. It has a C++ string representing its syntax,
+# as well as a CType saying what it provides.
+
+
+@dataclass(frozen=True)
+class Expr:
+ expr: str
+ type: NamedCType
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/ufunc.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/ufunc.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f044706068cf9af126070d8fa39cdca7da83b8b
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/ufunc.py
@@ -0,0 +1,209 @@
+from dataclasses import dataclass
+from typing import List, Optional
+
+import torchgen.api.types as api_types
+
+from torchgen.api import cpp, structured
+from torchgen.api.types import (
+ ArgName,
+ BaseCppType,
+ BaseCType,
+ Binding,
+ ConstRefCType,
+ CType,
+ NamedCType,
+ scalarT,
+)
+from torchgen.model import (
+ Argument,
+ BaseTy,
+ BaseType,
+ DispatchKey,
+ FunctionSchema,
+ NativeFunctionsGroup,
+ Type,
+)
+
+
+def schema_kernel_name(func: FunctionSchema, dispatch_key: DispatchKey) -> str:
+ assert func.is_out_fn(), "ufunc.kernel_name should only be invoked on out schemas"
+ return f"ufunc_{func.name.name}_{dispatch_key}"
+
+
+def kernel_name(g: NativeFunctionsGroup, dispatch_key: DispatchKey) -> str:
+ return schema_kernel_name(g.out.func, dispatch_key)
+
+
+# Tensors are omitted (as they are stored in TensorIterator), everything else is
+# passed along (technically, we can pass tensors along too, it just wastes
+# argument registers)
+#
+# NB: used for CPU only
+def dispatchstub_type(t: Type, *, binds: ArgName) -> Optional[NamedCType]:
+ # Dispatch stubs are always plain ints
+ r = cpp.valuetype_type(t, binds=binds, symint=False)
+ if r is not None:
+ return r
+
+ if t == BaseType(BaseTy.Scalar):
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
+ elif t == BaseType(BaseTy.Tensor):
+ return None
+ else:
+ raise AssertionError(f"unrecognized type {repr(t)}")
+
+
+def opmath_type(scalar_t: BaseCppType) -> BaseCppType:
+ if scalar_t == api_types.scalar_t:
+ return api_types.opmath_t
+ raise NotImplementedError
+
+
+# NB: Tensors in constructor are stored in opmath_t, not scalar_t
+# because Tensor in constructor = its a scalar tensor partially applied =
+# it can be higher precision and we want to compute in that higher precision
+#
+# NB: CUDA only
+def ufunctor_ctor_type(t: Type, *, binds: ArgName, scalar_t: BaseCppType) -> NamedCType:
+ r = cpp.valuetype_type(t, binds=binds, symint=False)
+ if r is not None:
+ return r
+
+ if t == BaseType(BaseTy.Scalar):
+ return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
+ elif t == BaseType(BaseTy.Tensor):
+ return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
+ else:
+ raise AssertionError(f"unrecognized type {repr(t)}")
+
+
+# Only Tensors ever get passed directly to operator()
+#
+# NB: CUDA only
+# (Actually, this works for CPU too)
+def ufunctor_apply_type(
+ t: Type, *, binds: ArgName, scalar_t: BaseCppType
+) -> NamedCType:
+ if t == BaseType(BaseTy.Tensor):
+ return NamedCType(binds, BaseCType(scalar_t))
+ else:
+ raise AssertionError(f"unrecognized type {repr(t)}")
+
+
+# The actual ufunc template function the user writes. Everything here
+# is done in the computation type. compute_t is opmath_t in CUDA and scalar_t
+# in CPU
+def ufunc_type(t: Type, *, binds: ArgName, compute_t: CType) -> NamedCType:
+ r = cpp.valuetype_type(t, binds=binds, symint=False)
+ if r is not None:
+ return r
+
+ if t == BaseType(BaseTy.Scalar):
+ return NamedCType(binds, compute_t)
+ elif t == BaseType(BaseTy.Tensor):
+ return NamedCType(binds, compute_t)
+ else:
+ raise AssertionError(f"unrecognized type {repr(t)}")
+
+
+def ufunctor_ctor_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
+ return Binding(
+ nctype=ufunctor_ctor_type(a.type, binds=a.name, scalar_t=scalar_t),
+ name=a.name,
+ default=None,
+ argument=a,
+ )
+
+
+def ufunctor_apply_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
+ return Binding(
+ nctype=ufunctor_apply_type(a.type, binds=a.name, scalar_t=scalar_t),
+ name=a.name,
+ default=None,
+ argument=a,
+ )
+
+
+def ufunc_argument(a: Argument, compute_t: CType) -> Binding:
+ return Binding(
+ nctype=ufunc_type(a.type, binds=a.name, compute_t=compute_t),
+ name=a.name,
+ default=None,
+ argument=a,
+ )
+
+
+@dataclass(frozen=True)
+class UfunctorBindings:
+ ctor: List[Binding]
+ apply: List[Binding]
+
+
+# ufunctors are a CUDA-only concept representing functors that take some of
+# their arguments on a host-side constructor, and the rest in the device-side
+# apply. E.g.,
+#
+# template
+# struct CUDAFunctorOnSelf_add {
+# using opmath_t = at::opmath_type;
+# opmath_t other_;
+# opmath_t alpha_;
+# CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha) : other_(other), alpha_(alpha) {}
+# __device__ scalar_t operator()(scalar_t self) {
+# return ufunc::add(static_cast(self), other_, alpha_);
+# }
+# };
+#
+# The ctor refers to the constructor CUDAFunctorOnSelf_add, while apply refers
+# to the operator() definition
+def ufunctor_arguments(
+ g: NativeFunctionsGroup, *, scalar_tensor_idx: Optional[int], scalar_t: BaseCppType
+) -> UfunctorBindings:
+ ctor = []
+ apply = []
+ for a in g.functional.func.arguments.flat_non_out:
+ if a.type.is_tensor_like():
+ if scalar_tensor_idx == 0:
+ # put it in the ctor anyway
+ ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
+ scalar_tensor_idx = None
+ else:
+ if scalar_tensor_idx is not None:
+ scalar_tensor_idx -= 1
+ apply.append(ufunctor_apply_argument(a, scalar_t=scalar_t))
+ else:
+ ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
+ assert scalar_tensor_idx is None
+ return UfunctorBindings(ctor=ctor, apply=apply)
+
+
+# ufuncs are the inner loop template functions that you wrote in ufunc/add.h
+# which do the actual computation in question. E.g.,
+#
+# template
+# C10_HOST_DEVICE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ {
+# return self + alpha * other;
+# }
+#
+# In this file, we refer to T as compute_t which is bound by caller
+def ufunc_arguments(g: NativeFunctionsGroup, *, compute_t: CType) -> List[Binding]:
+ return [
+ ufunc_argument(a, compute_t=compute_t)
+ for a in g.functional.func.arguments.flat_non_out
+ ]
+
+
+# Stubs are the DispatchStub trampolines that CPU kernels use to get to their
+# vectorized versions. E.g.,
+#
+# using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
+# DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
+def stub_arguments(g: NativeFunctionsGroup) -> List[Binding]:
+ # stubs drop all tensor arguments (they are implicit in the TensorIterator
+ # argument and keep everything else)
+ return [
+ r
+ for a in g.out.func.arguments.flat_non_out
+ if not a.type.is_tensor_like()
+ for r in structured.argument(a)
+ ]
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/unboxing.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/unboxing.py
new file mode 100644
index 0000000000000000000000000000000000000000..df4430c49b745753dc83b2115a7f4d8c000190d0
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/unboxing.py
@@ -0,0 +1,248 @@
+from typing import List, Tuple
+
+from torchgen.api import cpp
+from torchgen.api.types import Binding, CppSignatureGroup, CType
+from torchgen.model import (
+ Argument,
+ BaseTy,
+ BaseType,
+ ListType,
+ NativeFunction,
+ OptionalType,
+ Type,
+)
+
+# This file generates the code for unboxing wrappers, i.e., the glue logic to unbox a boxed operator and convert the
+# ivalues from stack to correct arguments to the unboxed kernel, based on corresponding JIT schema. This codegen is
+# an alternative way to generate unboxing wrappers similar to the existing C++ metaprogramming approach but gets the
+# job done statically. These generated unboxing wrappers will be useful under the scenario where we need to register
+# a fixed set of operators known at compile time and thus can save some time in runtime initialization phase.
+#
+# Here's an example on how the codegen works:
+#
+# - Function Schema (source of truth)
+#
+# aten::empty.names(int[] size, *, Dimname[]? names,
+# ScalarType? dtype=None, Layout? layout=None,
+# Device? device=None, bool? pin_memory=None,
+# MemoryFormat? memory_format=None) -> Tensor
+# - Argument Conversion
+# Generates C++ code to convert an ivalue (from stack) to its underlying C++ type.
+# - int[] size
+# ```cpp
+# const c10::List size_list_in = (std::move(peek(stack, 0, 7))).toList();
+#
+# std::vector size_vec;
+# for (c10::IValue size_elem: size_list_in) {
+# int64_t size_base = size_elem.to();
+# size_vec.push_back(size_base);
+# }
+# at::ArrayRef size_list_out(size_vec);
+# ~~~~~~~~~~~~~ <-- The converted argument from ivalues in the stack.
+# Will be passed to unboxed kernel.
+# ```
+# - Dimname[]? names
+# ```cpp
+# c10::optional names_opt = (std::move(peek(stack, 1, 7))).toOptional();
+# c10::optional> names_opt_out;
+# if (names_opt.has_value()) {
+# ~~~~~~~~~~~ <-- Unwrapping optional shell
+# const c10::IValue names_opt_in = names_opt.value();
+# const c10::List names_list_in = names_opt_in.toList();
+#
+# std::vector names_vec;
+# for (c10::IValue names_elem: names_list_in) {
+# ~~~~~~~~~~~~~~~~~~~~~~~~~ <-- Unrolling list, then convert elements one by one.
+# at::Dimname names_base = names_elem.to();
+# names_vec.push_back(names_base);
+# }
+# at::ArrayRef names_list_out(names_vec);
+#
+# names_opt_out = c10::optional>(names_list_out);
+# } else {
+# names_opt_out = c10::optional>();
+# }
+# ```
+# - ScalarType? dtype (similarly for the rest of the arguments)
+# ```cpp
+# c10::optional dtype_opt = (std::move(peek(stack, 2, 7))).toOptional();
+# c10::optional dtype_opt_out;
+# if (dtype_opt.has_value()) {
+# const c10::IValue dtype_opt_in = dtype_opt.value();
+# at::ScalarType dtype_base = dtype_opt_in.to();
+# ~~~~~~~~~~~~~~~~~~~~ <-- For base types, convert ivalue to it
+# directly using ".to()" API.
+# dtype_opt_out = c10::optional(dtype_base);
+# } else {
+# dtype_opt_out = c10::optional();
+# }
+# ```
+#
+# - Unboxed Kernel Call
+# ```cpp
+# auto result_ = torch::empty(
+# size_list_out,
+# names_opt_out,
+# options,
+# memory_format_opt_out
+# );
+# ```
+#
+# - Push Result Back to Stack
+# ```cpp
+# drop(stack, 7);
+# pack(stack, std::move(result_));
+# ```
+connector = "\n\t"
+
+
+# Return unboxing function name for a NativeFunction
+def name(f: NativeFunction) -> str:
+ return f.func.name.unambiguous_name()
+
+
+# Convert all the arguments in a NativeFunction to C++ code
+def convert_arguments(f: NativeFunction) -> Tuple[List[Binding], List[str]]:
+ # we need the 'self' argument so method needs to be False
+ args = (
+ CppSignatureGroup.from_native_function(f, method=False)
+ .most_faithful_signature()
+ .arguments()
+ )
+ code_list = [
+ f"c10::IValue {args[i].name} = std::move(peek(stack, {i}, {len(args)}));"
+ for i in range(len(args))
+ ] + [""]
+ binding_list = []
+ for arg in args:
+ # expecting only Argument
+ if not isinstance(arg.argument, Argument):
+ raise Exception(
+ f"Unexpected argument type, expecting `Argument` but got {arg}"
+ )
+ argument: Argument = arg.argument
+ unboxed_name, _, code, decl = argumenttype_ivalue_convert(
+ argument.type,
+ argument.name,
+ mutable=argument.is_write,
+ )
+ code_list.extend(decl)
+ code_list.extend(code)
+ binding_list.append(arg.with_name(unboxed_name))
+ return binding_list, code_list
+
+
+# Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
+# (1) the C++ code necessary to unbox the argument
+# (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
+def argumenttype_ivalue_convert(
+ t: Type, arg_name: str, *, mutable: bool = False
+) -> Tuple[str, CType, List[str], List[str]]:
+ # Unboxing is for mobile, which doesn't care about SymInts
+ ctype = cpp.argumenttype_type(
+ t=t, mutable=mutable, binds=arg_name, symint=False
+ ).type
+
+ if isinstance(t, BaseType):
+ out_name = f"{arg_name}_base"
+ code, decl = _gen_code_base_type(
+ arg_name=arg_name, out_name=out_name, ctype=ctype
+ )
+ elif isinstance(t, OptionalType):
+ out_name = f"{arg_name}_opt_out"
+ code, decl = _gen_code_optional_type(
+ arg_name=arg_name,
+ out_name=out_name,
+ t=t,
+ ctype=ctype,
+ )
+ elif isinstance(t, ListType):
+ out_name = f"{arg_name}_list_out"
+ code, decl = _gen_code_list_type(
+ arg_name=arg_name,
+ out_name=out_name,
+ t=t,
+ ctype=ctype,
+ )
+ else:
+ raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}")
+ return out_name, ctype, code, decl
+
+
+def _gen_code_base_type(
+ arg_name: str, out_name: str, ctype: CType
+) -> Tuple[List[str], List[str]]:
+ return [
+ f"{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
+ ], []
+
+
+def _gen_code_optional_type(
+ arg_name: str, out_name: str, t: OptionalType, ctype: CType
+) -> Tuple[List[str], List[str]]:
+ in_name = f"{arg_name}_opt_in"
+ res_name, _, res_code, decl = argumenttype_ivalue_convert(t.elem, in_name)
+ return (
+ f"""
+c10::optional {arg_name}_opt = {arg_name}.toOptional();
+{ctype.cpp_type(strip_ref=True)} {out_name};
+if ({arg_name}_opt.has_value()) {{
+ const c10::IValue {in_name} = {arg_name}_opt.value();
+ {connector.join(res_code)}
+ {out_name} = {ctype.cpp_type(strip_ref=True)}({res_name});
+}} else {{
+ {out_name} = {ctype.cpp_type(strip_ref=True)}();
+}}
+ """.split(
+ "\n"
+ ),
+ decl,
+ )
+
+
+def _gen_code_list_type(
+ arg_name: str, out_name: str, t: ListType, ctype: CType
+) -> Tuple[List[str], List[str]]:
+ in_name = f"{arg_name}_list_in"
+ elem_name = f"{arg_name}_elem"
+ code = [f"const c10::List {in_name} = {arg_name}.toList();"]
+ res_name, res_ctype, res_code, decl = argumenttype_ivalue_convert(t.elem, elem_name)
+ # handle list type with size, e.g., bool[4]
+ if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool and t.size:
+ code.extend(
+ f"""
+{ctype.cpp_type(strip_ref=True)} {out_name} = as_array<{res_ctype.cpp_type(strip_ref=True)}, {t.size}>({in_name});
+ """.split(
+ "\n"
+ )
+ )
+ # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List>
+ elif isinstance(t.elem, OptionalType):
+ code.extend(
+ f"""
+{ctype.cpp_type(strip_ref=True)} {out_name};
+for (c10::IValue {elem_name}: {in_name}) {{
+ {connector.join(res_code)}
+ {out_name}.push_back({res_name});
+}}
+ """.split(
+ "\n"
+ )
+ )
+ else:
+ # use ArrayRef as default.
+ vec_name = arg_name + "_vec"
+ # need to bring vector instantiation out of scope so that ArrayRef has valid data
+ decl.append(f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};")
+ code.extend(
+ f"""
+for (c10::IValue {elem_name}: {in_name}) {{
+ {connector.join(res_code)}
+ {vec_name}.push_back({res_name});
+}}
+{ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
+ """.split(
+ "\n"
+ )
+ )
+ return code, decl
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h
new file mode 100644
index 0000000000000000000000000000000000000000..b45a17b5922f8a0b76e0237616914ce9969efca5
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h
@@ -0,0 +1,19 @@
+#pragma once
+
+// an external backend might generate file within its code tree
+// and check all the source files within the tree with clang-format.
+// so, disable it since the backend might have a different config.
+// clang-format off
+
+// ${generated_comment}
+
+#include
+
+${namespace_prologue}
+
+struct ${class_name} {
+
+${dispatch_declarations}
+
+};
+${namespace_epilogue}
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/Function.h b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/Function.h
new file mode 100644
index 0000000000000000000000000000000000000000..5bbd742aae0ad0933d22790715599b4309efca8a
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/Function.h
@@ -0,0 +1,26 @@
+#pragma once
+
+// ${generated_comment}
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+${static_dispatch_ops_headers}
+
+${operator_includes}
+
+namespace at {
+
+${function_definitions}
+
+}
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h
new file mode 100644
index 0000000000000000000000000000000000000000..4f70db62a4c6429ee8e782fb13fb0ae6ffc5d957
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h
@@ -0,0 +1,17 @@
+#pragma once
+
+// ${generated_comment}
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+${extra_includes}
+
+${native_function_declarations}
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h
new file mode 100644
index 0000000000000000000000000000000000000000..89989e2121c9aa34a4583205c3541a04edd36700
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h
@@ -0,0 +1,19 @@
+#pragma once
+
+// ${generated_comment}
+
+#include
+#include
+#include
+#include
+
+${NativeMetaFunctions_includes}
+
+namespace at {
+
+namespace meta {
+
+${NativeMetaFunctions_declarations}
+
+} // namespace meta
+} // namespace at
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/Operators.h b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/Operators.h
new file mode 100644
index 0000000000000000000000000000000000000000..e74b96ef3d5c6b6d50fe63eac4dca51f0655daa5
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/Operators.h
@@ -0,0 +1,74 @@
+#pragma once
+
+// ${generated_comment}
+
+#ifdef TORCH_ASSERT_NO_OPERATORS
+#error This change adds a dependency on native_functions.yaml, \
+ meaning the file will need to be re-compiled every time an operator \
+ is changed or added. Consider if your change would be better placed in \
+ another file, or if a more specific header might achieve the same goal. \
+ See NOTE: [Tensor vs. TensorBase]
+#endif
+
+#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
+#error This change adds a dependency on all pytorch operators, meaning the \
+ file will need to be re-compiled every time an operator is changed or added. \
+ Consider including a specific operator from \
+ and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+${Operators_includes}
+
+// Extension writers: do you write wrapper functions? Are you frustrated with
+// resolving overloads of operators? Are you frustrated with dealing with
+// pointer-to-methods and resolving overloads of pointer-to-methods?? Look no
+// further, this is the utility for you.
+//
+// Given an operator schema: aten::op.overload(...
+//
+// Use ATEN_FN2(op, overload) to get a *function* version of the operator
+// that is guaranteed to not be overloaded. This means that you can safely
+// decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args.
+//
+// Given an operator schema without an overload name: aten::op(...
+//
+// Use ATEN_FN(op) to get an unambiguous *function* version of the operator.
+//
+// There is some interesting behavior for out= operations.
+// ATEN_FN2(sin, out) gives a function that is *faithful* to the schema;
+// that is, the order of arguments is exactly what it looks like in the schema.
+
+#define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call
+#define ATEN_FN(op_name) at::_ops::op_name::call
+
+// Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time
+// metadata about a given aten operator.
+// Notable data on the class includes:
+// - ATEN_OP2(add, Tensor)::name // returns the string name: "add"
+// - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor"
+// - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &)
+// - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
+
+#define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload
+#define ATEN_OP(op_name) at::_ops::op_name
+
+// WARNING: Please do not call any of the ops in the _ops namespace directly.
+// Use the ATEN_FN macros. We do not guarantee stability of the naming
+// scheme for the functions in at::_ops
+
+// See Note [The ATen Operators API] for details of the at::_ops namespace
+
+namespace at {
+namespace _ops {
+${Operators_declarations}
+} // namespace _ops
+} // namespace at
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.cpp b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..58102bd97fca4eaef477818b0b0a92b7995e38b1
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.cpp
@@ -0,0 +1,15 @@
+// ${generated_comment}
+
+#include
+#include
+
+#include
+#include
+
+namespace at {
+
+namespace redispatch {
+ ${function_redispatch_definitions}
+} // namespace redispatch
+
+} // namespace at
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchDefinitions.ini b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchDefinitions.ini
new file mode 100644
index 0000000000000000000000000000000000000000..3bf7f9b1bb32112a126e88a2e23e47c91e58dd9c
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchDefinitions.ini
@@ -0,0 +1,24 @@
+${ns_prologue}
+
+// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
+// ambiguity with conflicting identifiers that may have been defined in
+// at namespace already.
+namespace {
+
+${dispatch_helpers}
+
+${dispatch_anonymous_definitions}
+
+${static_init_dispatch_registrations}
+
+} // anonymous namespace
+
+${deferred_dispatch_registrations}
+
+namespace ${dispatch_namespace} {
+
+${dispatch_namespaced_definitions}
+
+} // namespace ${dispatch_namespace}
+
+${ns_epilogue}
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/TensorBody.h b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/TensorBody.h
new file mode 100644
index 0000000000000000000000000000000000000000..010f12d4cfbce98804d42ed7028e686ce7ba1174
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/TensorBody.h
@@ -0,0 +1,753 @@
+#pragma once
+
+#ifdef TORCH_ASSERT_NO_OPERATORS
+#error This change adds a dependency on native_functions.yaml, \
+ meaning the file will need to be re-compiled every time an operator \
+ is changed or added. Consider if your change would be better placed in \
+ another file, or if a more specific header might achieve the same goal. \
+ See NOTE: [Tensor vs. TensorBase]
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#include
+
+namespace c10{
+template class List;
+template class IListRef;
+}
+namespace at {
+struct Generator;
+struct Type;
+class DeprecatedTypeProperties;
+class Tensor;
+} // namespace at
+namespace at {
+namespace indexing {
+struct TensorIndex;
+} // namespace indexing
+} // namespace at
+
+namespace torch { namespace autograd {
+
+struct Node;
+
+}} // namespace torch::autograd
+
+namespace at {
+
+class OptionalTensorRef;
+class TensorRef;
+class Tensor;
+using TensorList = ArrayRef;
+using ITensorList = c10::IListRef;
+
+using Stream = c10::Stream;
+
+// Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which
+// has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr.
+//
+// For example:
+//
+// void func(Tensor a) {
+// Tensor b = a;
+// ...
+// }
+//
+// In this example, when we say Tensor b = a, we are creating a new object that points to the
+// same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the
+// destructor decrements the reference count by calling release() on the TensorImpl it points to.
+// The existing constructors, operator overloads, etc. take care to implement the correct semantics.
+//
+// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
+// special care must be taken to handle this.
+class TORCH_API Tensor: public TensorBase {
+ protected:
+ // Create a Tensor with a +0 reference count. Special care must be
+ // taken to avoid decrementing this reference count at destruction
+ // time. Intended to support MaybeOwnedTraits.
+ explicit Tensor(unsafe_borrow_t, const TensorBase& rhs): TensorBase(unsafe_borrow_t{}, rhs) {}
+ friend MaybeOwnedTraits;
+ friend OptionalTensorRef;
+ friend TensorRef;
+
+ public:
+ Tensor() = default;
+ // This constructor should not be used by end users and is an implementation
+ // detail invoked by autogenerated code.
+ explicit Tensor(
+ c10::intrusive_ptr tensor_impl)
+ : TensorBase(std::move(tensor_impl)) {}
+ Tensor(const Tensor &tensor) = default;
+ Tensor(Tensor &&tensor) = default;
+
+ // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount
+ explicit Tensor(const TensorBase &base): TensorBase(base) {}
+ /*implicit*/ Tensor(TensorBase &&base): TensorBase(std::move(base)) {}
+
+ // Creates a new wrapper from TensorImpl. Intentionally a free method because
+ // it should be used with care. Checks necessary invariants
+ static Tensor wrap_tensor_impl(
+ c10::intrusive_ptr tensor_impl) {
+ return TensorBase::wrap_tensor_impl(std::move(tensor_impl));
+ }
+
+ Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
+ return TensorBase::contiguous(memory_format);
+ }
+
+ Tensor conj() const {
+ if (!this->is_complex()) {
+ return *this;
+ }
+
+ switch (this->layout()) {
+ case at::kSparse:
+ case at::kSparseCsr:
+ case at::kSparseCsc:
+ case at::kSparseBsr:
+ case at::kSparseBsc:
+ return this->conj_physical();
+ default:
+ return this->_conj();
+ }
+ }
+
+ // Aliased by Dimname overloads, so need explicit using
+ using TensorBase::size;
+ using TensorBase::sym_size;
+ using TensorBase::stride;
+
+ /// Should be used if *this can reasonably be expected to be contiguous and
+ /// performance is important.
+ /// Compared to contiguous, it saves a reference count
+ /// increment/decrement if *this is already contiguous, at the cost
+ /// in all cases of an extra pointer of stack usage, an extra branch
+ /// to access, and an extra branch at destruction time.
+ c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const &;
+
+ // Use .contiguous() instead. Trying to borrow from a prvalue Tensor
+ // will only lead to trouble and dangling references.
+ c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete;
+
+ // The following overloads are very intruiging. Consider the following
+ // program:
+ //
+ // x[1] = 3;
+ //
+ // We would expect that the first entry of x is written to 3. But how can we
+ // actually achieve this? x[1] evaluates to a tensor...
+ //
+ // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be
+ // (profitably) assigned to in the traditional sense, so we overload
+ // assignment to mean, "Actually, copy 3 into the tensor data." This is done
+ // with an rvalue-reference ref-qualified overload (the methods with && at the
+ // end of their type.)
+ //
+ // There's one more fly in the ointment: We also want
+ //
+ // Tensor x = y;
+ //
+ // to work, and we want it NOT to copy. So we need a traditional operator=
+ // overload. But we MUST specify a mutable lvalue ref-qualifier, to
+ // disambiguate the traditional overload from the rvalue-reference
+ // ref-qualified overload. Otherwise, it will be ambiguous, because
+ // a non ref-qualified method is eligible for all situations.
+
+ // Unfortunately, we have to write these constructors out manually
+ // to work around an MSVC bug:
+ // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &':
+ // multiple versions of a defaulted special member functions are not allowed
+ // Tensor& operator=(const Tensor&) & = default;
+ // Tensor& operator=(Tensor&&) & = default;
+
+ // Also MSVC will wrongly issue the following warning with the aforementioned fix
+ // warning C4522: 'at::Tensor': multiple assignment operators specified
+ // Let's just skip the warning.
+ //
+ // TODO: temporarily disabled
+
+ Tensor& operator=(const TensorBase& x) & {
+ impl_ = x.getIntrusivePtr();
+ return *this;
+ }
+ Tensor& operator=(TensorBase&& x) & noexcept {
+ impl_ = x.unsafeReleaseIntrusivePtr();
+ return *this;
+ }
+
+ Tensor& operator=(const Tensor &x) & {
+ return operator=(static_cast(x));
+ }
+ Tensor& operator=(Tensor &&x) & noexcept {
+ return operator=(static_cast(x));
+ }
+
+ Tensor& operator=(const Scalar &v) && {
+ return fill_(v);
+ }
+ Tensor& operator=(const Tensor &rhs) && {
+ return copy_(rhs);
+ }
+ Tensor& operator=(Tensor&& rhs) && {
+ return copy_(rhs);
+ }
+
+ C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
+ DeprecatedTypeProperties & type() const {
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
+ dispatchKeyToBackend(legacyExtractDispatchKey(key_set())),
+ scalar_type());
+ }
+
+ Tensor toType(ScalarType t) const {
+ return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ // TODO: Deprecate me
+ Tensor toBackend(Backend b) const {
+ return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")
+ bool is_variable() const noexcept {
+ return !at::impl::variable_excluded_from_dispatch();
+ }
+
+ template
+ C10_DEPRECATED_MESSAGE("Tensor.data() is deprecated. Please use Tensor.data_ptr() instead.")
+ T * data() const {
+ return data_ptr();
+ }
+
+ template
+ T item() const;
+
+ template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
+ C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
+ GenericPackedTensorAccessor packed_accessor() const & {
+ return generic_packed_accessor();
+ }
+ template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
+ C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
+ GenericPackedTensorAccessor packed_accessor() && = delete;
+
+ Tensor operator~() const {
+ return bitwise_not();
+ }
+ Tensor operator-() const {
+ return neg();
+ }
+ Tensor& operator+=(const Tensor & other) {
+ return add_(other);
+ }
+ Tensor& operator+=(const Scalar & other) {
+ return add_(other);
+ }
+ Tensor& operator-=(const Tensor & other) {
+ return sub_(other);
+ }
+ Tensor& operator-=(const Scalar & other) {
+ return sub_(other);
+ }
+ Tensor& operator*=(const Tensor & other) {
+ return mul_(other);
+ }
+ Tensor& operator*=(const Scalar & other) {
+ return mul_(other);
+ }
+ Tensor& operator/=(const Tensor & other) {
+ return div_(other);
+ }
+ Tensor& operator/=(const Scalar & other) {
+ return div_(other);
+ }
+ Tensor& operator&=(const Tensor & other) {
+ return bitwise_and_(other);
+ }
+ Tensor& operator|=(const Tensor & other) {
+ return bitwise_or_(other);
+ }
+ Tensor& operator^=(const Tensor & other) {
+ return bitwise_xor_(other);
+ }
+ Tensor operator[](const Scalar & index) const {
+ if (!index.isIntegral(false)) {
+ TORCH_CHECK_INDEX(false, "Can only index tensors with integral scalars");
+ }
+ return this->operator[](index.toLong());
+ }
+ Tensor operator[](const Tensor & index) const {
+ // These properties are checked in the Scalar constructor, but we already
+ // check them here to provide more useful diagnostics for the user.
+ if (!index.defined()) {
+ TORCH_CHECK_INDEX(false, "Can only index with tensors that are defined");
+ }
+ if (index.dim() != 0) {
+ TORCH_CHECK_INDEX(false,
+ "Can only index with tensors that are scalars (zero-dim)");
+ }
+ // The Scalar(Tensor) constructor is explicit, so we need to call it.
+ return this->operator[](index.item());
+ }
+ Tensor operator[](int64_t index) const {
+ return select(0, index);
+ }
+
+ Tensor index(ArrayRef indices) const;
+ Tensor index(std::initializer_list indices) const;
+
+ Tensor & index_put_(ArrayRef indices, Tensor const & rhs);
+ Tensor & index_put_(ArrayRef indices, const Scalar& v);
+ Tensor & index_put_(std::initializer_list indices, Tensor const & rhs);
+ Tensor & index_put_(std::initializer_list indices, const Scalar& v);
+
+ Tensor cpu() const {
+ return to(options().device(c10::DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ // TODO: The Python version also accepts arguments
+ Tensor cuda() const {
+ return to(options().device(c10::DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ Tensor hip() const {
+ return to(options().device(c10::DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ Tensor ve() const {
+ return to(options().device(c10::DeviceType::VE), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ Tensor vulkan() const {
+ return to(options().device(c10::DeviceType::Vulkan), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ Tensor metal() const {
+ return to(options().device(c10::DeviceType::Metal), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ Tensor meta() const {
+ return to(options().device(c10::DeviceType::Meta), /*non_blocking*/ false, /*copy*/ false);
+ }
+
+ // ~~~~~ Autograd API ~~~~~
+
+ /// \fn bool is_leaf() const;
+ ///
+ /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention.
+ ///
+ /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were
+ /// created by the user. This means that they are not the result of an operation and so
+ /// `grad_fn()` is `nullptr`.
+ ///
+ /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`.
+ /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`.
+ ///
+ /// Example:
+ /// @code
+ /// auto a = torch::rand(10, torch::requires_grad());
+ /// std::cout << a.is_leaf() << std::endl; // prints `true`
+ ///
+ /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
+ /// std::cout << b.is_leaf() << std::endl; // prints `false`
+ /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor
+ ///
+ /// auto c = torch::rand(10, torch::requires_grad()) + 2;
+ /// std::cout << c.is_leaf() << std::endl; // prints `false`
+ /// // c was created by the addition operation
+ ///
+ /// auto d = torch::rand(10).cuda();
+ /// std::cout << d.is_leaf() << std::endl; // prints `true`
+ /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
+ ///
+ /// auto e = torch::rand(10).cuda().requires_grad_();
+ /// std::cout << e.is_leaf() << std::endl; // prints `true`
+ /// // e requires gradients and has no operations creating it
+ ///
+ /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
+ /// std::cout << f.is_leaf() << std::endl; // prints `true`
+ /// // f requires grad, has no operation creating it
+ /// @endcode
+
+ /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const;
+ ///
+ /// Computes the gradient of current tensor with respect to graph leaves.
+ ///
+ /// The graph is differentiated using the chain rule. If the tensor is
+ /// non-scalar (i.e. its data has more than one element) and requires
+ /// gradient, the function additionally requires specifying ``gradient``.
+ /// It should be a tensor of matching type and location, that contains
+ /// the gradient of the differentiated function w.r.t. this Tensor.
+ ///
+ /// This function accumulates gradients in the leaves - you might need to
+ /// zero them before calling it.
+ ///
+ /// \param gradient Gradient w.r.t. the
+ /// tensor. If it is a tensor, it will be automatically converted
+ /// to a Tensor that does not require grad unless ``create_graph`` is True.
+ /// None values can be specified for scalar Tensors or ones that
+ /// don't require grad. If a None value would be acceptable then
+ /// this argument is optional.
+ /// \param retain_graph If ``false``, the graph used to compute
+ /// the grads will be freed. Note that in nearly all cases setting
+ /// this option to True is not needed and often can be worked around
+ /// in a much more efficient way. Defaults to the value of
+ /// ``create_graph``.
+ /// \param create_graph If ``true``, graph of the derivative will
+ /// be constructed, allowing to compute higher order derivative
+ /// products. Defaults to ``false``.
+ /// \param inputs Inputs w.r.t. which the gradient will be accumulated into
+ /// ``at::Tensor::grad``. All other Tensors will be ignored. If not
+ /// provided, the gradient is accumulated into all the leaf Tensors
+ /// that were used to compute the current tensor.
+ /// When inputs are provided and a given input is not a leaf,
+ /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
+ /// It is an implementation detail on which the user should not rely.
+ /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
+ void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const {
+ // NB: Adding this wrapper to _backward here because we'd like our
+ // 'backwards' api to accept the 'inputs' argument optionally. Since code gen
+ // currently does not support optional of TensorList our approach is to replace
+ // backward in native_functions.yaml with _backward and call it here instead.
+ if (inputs.has_value()) {
+ TORCH_CHECK(inputs.value().size() > 0, "'inputs' argument to backward cannot be empty")
+ this->_backward(inputs.value(), gradient, retain_graph, create_graph);
+ } else {
+ this->_backward({}, gradient, retain_graph, create_graph);
+ }
+ }
+
+ /// \fn Tensor detach() const;
+ ///
+ /// Returns a new Tensor, detached from the current graph.
+ /// The result will never require gradient.
+
+ /// \fn Tensor & detach_() const;
+ ///
+ /// Detaches the Tensor from the graph that created it, making it a leaf.
+ /// Views cannot be detached in-place.
+
+ /// \fn void retain_grad() const;
+ ///
+ /// Enables this Tensor to have their :attr:`grad` populated during
+ /// :func:`backward`. This is a no-op for leaf tensors.
+
+ /// \fn bool retains_grad() const;
+ ///
+ /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
+ /// populated during :func:`backward`, ``false`` otherwise.
+
+ const Tensor& set_requires_grad(bool requires_grad) const {
+ TensorBase::set_requires_grad(requires_grad);
+ return *this;
+ }
+
+ /// Return a mutable reference to the gradient. This is conventionally
+ /// used as `t.grad() = x` to set a gradient to a completely new tensor.
+ /// Note that this function work with a non-const Tensor and is not
+ /// thread safe.
+ Tensor& mutable_grad() const {
+ return impl_->mutable_grad();
+ }
+
+ /// This function returns an undefined tensor by default and returns a defined tensor
+ /// the first time a call to `backward()` computes gradients for this Tensor.
+ /// The attribute will then contain the gradients computed and future calls
+ /// to `backward()` will accumulate (add) gradients into it.
+ const Tensor& grad() const {
+ const Tensor& maybe_grad = impl_->grad();
+ if (!is_leaf() && !retains_grad() && !maybe_grad.defined()) {
+ TORCH_WARN(
+ "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad "
+ "attribute won't be populated during autograd.backward(). If you indeed want the .grad "
+ "field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. "
+ "If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor "
+ "instead. See github.com/pytorch/pytorch/pull/30531 for more informations.");
+ }
+ return maybe_grad;
+ }
+
+ // The Forward AD API functions below are low level and are not to be used by end
+ // users who should use the API provided in torch/csrc/autograd.h
+
+ /// This function returns the forward gradient for this Tensor at the given level.
+ const Tensor& _fw_grad(uint64_t level) const {
+ return impl_->_fw_grad(level, *this);
+ }
+
+ /// This function can be used to set the value of the forward grad.
+ /// Note that the given new_grad might not be used directly if it has different
+ /// metadata (size/stride/storage offset) compared to this Tensor. In that case,
+ /// new_grad content will be copied into a new Tensor
+ void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const {
+ impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op);
+ }
+
+
+ // STOP. Thinking of adding a method here, which only makes use
+ // of other ATen methods? Define it in native_functions.yaml.
+
+ //example
+ //Tensor * add(Tensor & b);
+ ${tensor_method_declarations}
+
+ // Special C++ only overloads for std()-like functions (See gh-40287)
+ // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
+ // So, for example std(0) would select the std(unbiased=False) overload
+
+ Tensor var(int dim) const {
+ return var(IntArrayRef{dim});
+ }
+
+ Tensor std(int dim) const {
+ return std(IntArrayRef{dim});
+ }
+
+ // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the
+ // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet.
+ // Before that change, we make this method to maintain BC for C++ usage like
+ // `x.to(y.dtype)`.
+ // TODO: remove following two after at::kDouble and its friends are TypeMeta's.
+ inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
+ return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
+ }
+ inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
+ return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
+ }
+
+ template
+ decltype(auto) m(F func, Args&&... params) const {
+ return func(*this, std::forward(params)...);
+ }
+
+ /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended
+ /// to be used from functions that need to access the `Variable`'s equivalent `Tensor`
+ /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`).
+ ///
+ /// One notable difference with the legacy `.data()` function is that changes to the
+ /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset)
+ /// will not update the original `Variable`, due to the fact that this function
+ /// shallow-copies the `Variable`'s underlying TensorImpl.
+ at::Tensor tensor_data() const {
+ return TensorBase::tensor_data();
+ }
+
+ /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data`
+ /// in Python, which create a new `Variable` that shares the same storage and
+ /// tensor metadata with the original `Variable`, but with a completely new
+ /// autograd history.
+ ///
+ /// NOTE: If we change the tensor metadata (e.g. sizes / strides /
+ /// storage / storage_offset) of a variable created from `var.variable_data()`, those
+ /// changes will not update the original variable `var`. In `.variable_data()`, we set
+ /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal,
+ /// in order to prevent users from changing metadata of `var.variable_data()`
+ /// and expecting the original variable `var` to also be updated.
+ at::Tensor variable_data() const {
+ return TensorBase::variable_data();
+ }
+
+ // Hooks
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ template
+ using hook_return_void_t = std::enable_if_t>::value, unsigned>;
+ template
+ using hook_return_var_t = std::enable_if_t, Tensor>::value, unsigned>;
+
+ /// Registers a backward hook.
+ ///
+ /// The hook will be called every time a gradient with respect to the Tensor is computed.
+ /// The hook should have one of the following signature:
+ /// ```
+ /// hook(Tensor grad) -> Tensor
+ /// ```
+ /// ```
+ /// hook(Tensor grad) -> void
+ /// ```
+ /// The hook should not modify its argument, but it can optionally return a new gradient
+ /// which will be used in place of `grad`.
+ ///
+ /// This function returns the index of the hook in the list which can be used to remove hook.
+ ///
+ /// Example:
+ /// @code
+ /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
+ /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
+ /// v.backward(torch::tensor({1., 2., 3.}));
+ /// // This prints:
+ /// // ```
+ /// // 2
+ /// // 4
+ /// // 6
+ /// // [ CPUFloatType{3} ]
+ /// // ```
+ /// std::cout << v.grad() << std::endl;
+ /// v.remove_hook(h); // removes the hook
+ /// @endcode
+ template
+ hook_return_void_t register_hook(T&& hook) const;
+ template
+ hook_return_var_t register_hook(T&& hook) const;
+
+ // Variable methods
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Tensor data() const {
+ return TensorBase::data();
+ }
+
+ void _backward(TensorList inputs, const c10::optional& gradient, c10::optional keep_graph, bool create_graph) const;
+
+ const Tensor& requires_grad_(bool _requires_grad=true) const {
+ TensorBase::requires_grad_(_requires_grad);
+ return *this;
+ }
+};
+
+namespace detail {
+// Helper creator for Tensor class which doesn't requires the users to pass
+// in an intrusive_ptr instead it just converts the argument passed to
+// requested intrusive_ptr type.
+template
+Tensor make_tensor(Args&&... args) {
+ return Tensor(c10::make_intrusive(std::forward(args)...));
+}
+
+} // namespace detail
+
+} // namespace at
+
+
+namespace at {
+${tensor_method_definitions}
+} // namespace at
+
+
+namespace c10 {
+template <>
+struct MaybeOwnedTraits {
+ using owned_type = at::Tensor;
+ using borrow_type = at::Tensor;
+
+ static borrow_type createBorrow(const owned_type& from) {
+ // NOTE: this can be implemented without the special
+ // unsafe_borrow_t Tensor constructor as
+ //
+ // return borrow_type(c10::intrusive_ptr::reclaim(from.unsafeGetTensorImpl()));
+ //
+ // but that hurts inlining due to the nullptr check in the
+ // Tensor(c10::intrusive_ptr<...>) constructor. We already know
+ // that from.impl_ isn't null because from is a valid Tensor, so
+ // we needn't do the check again. (using __builtin_assume can
+ // avoid this, but wouldn't be portable to MSVC.)
+ return borrow_type(borrow_type::unsafe_borrow_t{}, from);
+ }
+
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
+ lhs.unsafeReleaseTensorImpl();
+ // See above note: this can be implemented with public API
+ // similarly to createBorrow(), but that would hurt inlining.
+ lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
+ }
+
+ static void destroyBorrow(borrow_type& toDestroy) {
+ toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0.
+ }
+
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
+ return borrow;
+ }
+
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
+ return &borrow;
+ }
+
+ static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
+ return true;
+ }
+};
+
+template <>
+struct ExclusivelyOwnedTraits {
+ using repr_type = at::Tensor;
+ using pointer_type = at::Tensor*;
+ using const_pointer_type = const at::Tensor*;
+
+ static repr_type nullRepr() {
+ return at::Tensor();
+ }
+
+ template
+ static repr_type createInPlace(Args&&... args) {
+ return at::Tensor(std::forward(args)...);
+ }
+
+ static repr_type moveToRepr(at::Tensor&& x) {
+ return std::move(x);
+ }
+
+ static void destroyOwned(at::Tensor& x) {
+ return ExclusivelyOwnedTraits::destroyOwned(x);
+ }
+
+ static at::Tensor take(at::Tensor& x) {
+ return std::move(x);
+ }
+
+ static pointer_type getImpl(repr_type& x) {
+ return &x;
+ }
+
+ static const_pointer_type getImpl(const repr_type& x) {
+ return &x;
+ }
+};
+} // namespace c10
+
+namespace at {
+
+inline c10::MaybeOwned borrow_from_optional_tensor(
+ const c10::optional& opt) {
+ return opt.has_value()
+ ? c10::MaybeOwned::borrowed(*opt)
+ : c10::MaybeOwned::owned(std::in_place);
+}
+
+inline c10::MaybeOwned Tensor::expect_contiguous(MemoryFormat memory_format) const & {
+ if (is_contiguous(memory_format)) {
+ return c10::MaybeOwned::borrowed(*this);
+ } else {
+ return c10::MaybeOwned::owned(__dispatch_contiguous(memory_format));
+ }
+}
+} // namespace at
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..76439040eda45ec34f627298260e7bf081fd728c
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/ATen/templates/TensorMethods.cpp
@@ -0,0 +1,61 @@
+#include
+#include
+
+#include
+
+namespace at {
+
+namespace {
+
+// Verifies the requested type is the same as the Tensor's type.
+void check_type(const TensorBase& tensor, ScalarType type, c10::string_view type_name) {
+ TORCH_CHECK(
+ tensor.scalar_type() == type
+ || (isQIntType(tensor.scalar_type())
+ && toUnderlying(tensor.scalar_type()) == type),
+ "expected scalar type ", type_name, " but found ", tensor.scalar_type());
+}
+
+} // namespace
+
+#define DEFINE_CAST(T, name) \
+ template <> \
+ TORCH_API const T* TensorBase::const_data_ptr() const { \
+ check_type(*this, ScalarType::name, #name); \
+ return this->unsafeGetTensorImpl()->data_ptr_impl(); \
+ } \
+ \
+ template <> \
+ TORCH_API const T* TensorBase::const_data_ptr() const { \
+ check_type(*this, ScalarType::name, #name); \
+ return this->unsafeGetTensorImpl()->data_ptr_impl>(); \
+ } \
+ \
+ template <> \
+ TORCH_API T* TensorBase::mutable_data_ptr() const { \
+ check_type(*this, ScalarType::name, #name); \
+ return this->unsafeGetTensorImpl()->mutable_data_ptr_impl(); \
+ } \
+ \
+ template <> \
+ TORCH_API T* TensorBase::data_ptr() const { \
+ return mutable_data_ptr(); \
+ } \
+
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CAST)
+ AT_FORALL_QINT_TYPES(DEFINE_CAST)
+ DEFINE_CAST(uint16_t, UInt16)
+ DEFINE_CAST(uint32_t, UInt32)
+ DEFINE_CAST(uint64_t, UInt64)
+ #undef DEFINE_CAST
+
+ #define DEFINE_ITEM(T, name) \
+ template <> \
+ TORCH_API T Tensor::item() const { \
+ return item().to##name(); \
+ }
+
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ITEM)
+ #undef DEFINE_ITEM
+
+ } //namespace at
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/BUILD.bazel b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/BUILD.bazel
new file mode 100644
index 0000000000000000000000000000000000000000..d1a0db360d230fe0f027c19869c6307f17010503
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/BUILD.bazel
@@ -0,0 +1,4 @@
+load("//:tools/bazel.bzl", "rules")
+load(":build.bzl", "define_targets")
+
+define_targets(rules = rules)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90f3669f4c393913b2786596e1b73c32a3f53e0c
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc2201d40720978d2ef82007c729c35ee8629504
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b817d65f3037bffc4a2096a0c7bad7b6ffb850d
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6e51fee10ef986515cc0573131cb118a92d9913
Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-311.pyc differ
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/deprecated.yaml b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/deprecated.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..52f7ec50b6ea15dae1c3308358997950d295c924
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/packaged/autograd/deprecated.yaml
@@ -0,0 +1,134 @@
+# Deprecated function signatures. These are exposed in Python, but not included
+# in the error message suggestions.
+
+- name: add(Tensor self, Scalar alpha, Tensor other) -> Tensor
+ aten: add(self, other, alpha)
+
+- name: add_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
+ aten: add_(self, other, alpha)
+
+- name: add(Tensor self, Scalar alpha, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
+ aten: add_out(out, self, other, alpha)
+
+- name: addbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
+ aten: addbmm(self, batch1, batch2, beta, alpha)
+
+- name: addbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
+ aten: addbmm_(self, batch1, batch2, beta, alpha)
+
+- name: addbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addbmm_out(out, self, batch1, batch2, beta, alpha)
+
+- name: addbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
+ aten: addbmm(self, batch1, batch2, beta, 1)
+
+- name: addbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
+ aten: addbmm_(self, batch1, batch2, beta, 1)
+
+- name: addbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addbmm_out(out, self, batch1, batch2, beta, 1)
+
+- name: addcdiv(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
+ aten: addcdiv(self, tensor1, tensor2, value)
+
+- name: addcdiv_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
+ aten: addcdiv_(self, tensor1, tensor2, value)
+
+- name: addcdiv(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addcdiv_out(out, self, tensor1, tensor2, value)
+
+- name: addcmul(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
+ aten: addcmul(self, tensor1, tensor2, value)
+
+- name: addcmul_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
+ aten: addcmul_(self, tensor1, tensor2, value)
+
+- name: addcmul(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addcmul_out(out, self, tensor1, tensor2, value)
+
+- name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
+ aten: addmm(self, mat1, mat2, beta, alpha)
+
+- name: addmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor(a!)
+ aten: addmm_(self, mat1, mat2, beta, alpha)
+
+- name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addmm_out(out, self, mat1, mat2, beta, alpha)
+
+- name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
+ aten: addmm(self, mat1, mat2, beta, 1)
+
+- name: addmm_(Scalar beta, Tensor(a!) self, Tensor mat1, Tensor mat2) -> Tensor(a!)
+ aten: addmm_(self, mat1, mat2, beta, 1)
+
+- name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addmm_out(out, self, mat1, mat2, beta, 1)
+
+- name: sspaddmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
+ aten: sspaddmm(self, mat1, mat2, beta, alpha)
+
+- name: sspaddmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
+ aten: sspaddmm(self, mat1, mat2, beta, 1)
+
+- name: addmv(Scalar beta, Tensor self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor
+ aten: addmv(self, mat, vec, beta, alpha)
+
+- name: addmv_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor(a!)
+ aten: addmv_(self, mat, vec, beta, alpha)
+
+- name: addmv(Scalar beta, Tensor self, Scalar alpha, Tensor mat, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addmv_out(out, self, mat, vec, beta, alpha)
+
+- name: addmv(Scalar beta, Tensor self, Tensor mat, Tensor vec) -> Tensor
+ aten: addmv(self, mat, vec, beta, 1)
+
+- name: addmv_(Scalar beta, Tensor(a!) self, Tensor mat, Tensor vec) -> Tensor(a!)
+ aten: addmv_(self, mat, vec, beta, 1)
+
+- name: addmv(Scalar beta, Tensor self, Tensor mat, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addmv_out(out, self, mat, vec, beta, 1)
+
+- name: addr(Scalar beta, Tensor self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor
+ aten: addr(self, vec1, vec2, beta, alpha)
+
+- name: addr_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor(a!)
+ aten: addr_(self, vec1, vec2, beta, alpha)
+
+- name: addr(Scalar beta, Tensor self, Scalar alpha, Tensor vec1, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addr_out(out, self, vec1, vec2, beta, alpha)
+
+- name: addr(Scalar beta, Tensor self, Tensor vec1, Tensor vec2) -> Tensor
+ aten: addr(self, vec1, vec2, beta, 1)
+
+- name: addr_(Scalar beta, Tensor(a!) self, Tensor vec1, Tensor vec2) -> Tensor(a!)
+ aten: addr_(self, vec1, vec2, beta, 1)
+
+- name: addr(Scalar beta, Tensor self, Tensor vec1, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: addr_out(out, self, vec1, vec2, beta, 1)
+
+- name: baddbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
+ aten: baddbmm(self, batch1, batch2, beta, alpha)
+
+- name: baddbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
+ aten: baddbmm_(self, batch1, batch2, beta, alpha)
+
+- name: baddbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: baddbmm_out(out, self, batch1, batch2, beta, alpha)
+
+- name: baddbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
+ aten: baddbmm(self, batch1, batch2, beta, 1)
+
+- name: baddbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
+ aten: baddbmm_(self, batch1, batch2, beta, 1)
+
+- name: baddbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
+ aten: baddbmm_out(out, self, batch1, batch2, beta, 1)
+
+- name: sub(Tensor self, Scalar alpha, Tensor other) -> Tensor
+ aten: sub(self, other, alpha)
+
+- name: sub_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
+ aten: sub_(self, other, alpha)
+
+- name: sub(Tensor self, Scalar alpha, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
+ aten: sub_out(out, self, other, alpha)
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/wheel/__main__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/wheel/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0be74537494dc2cf18c2e3b318ffd22b886aef6b
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/wheel/__main__.py
@@ -0,0 +1,23 @@
+"""
+Wheel command line tool (enable python -m wheel syntax)
+"""
+
+from __future__ import annotations
+
+import sys
+
+
+def main(): # needed for console script
+ if __package__ == "":
+ # To be able to run 'python wheel-0.9.whl/wheel':
+ import os.path
+
+ path = os.path.dirname(os.path.dirname(__file__))
+ sys.path[0:0] = [path]
+ import wheel.cli
+
+ sys.exit(wheel.cli.main())
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/wheel/macosx_libfile.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/wheel/macosx_libfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..abdfc9eda1d4c1f33270c155e1610fe73bd54263
--- /dev/null
+++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/wheel/macosx_libfile.py
@@ -0,0 +1,482 @@
+"""
+This module contains function to analyse dynamic library
+headers to extract system information
+
+Currently only for MacOSX
+
+Library file on macosx system starts with Mach-O or Fat field.
+This can be distinguish by first 32 bites and it is called magic number.
+Proper value of magic number is with suffix _MAGIC. Suffix _CIGAM means
+reversed bytes order.
+Both fields can occur in two types: 32 and 64 bytes.
+
+FAT field inform that this library contains few version of library
+(typically for different types version). It contains
+information where Mach-O headers starts.
+
+Each section started with Mach-O header contains one library
+(So if file starts with this field it contains only one version).
+
+After filed Mach-O there are section fields.
+Each of them starts with two fields:
+cmd - magic number for this command
+cmdsize - total size occupied by this section information.
+
+In this case only sections LC_VERSION_MIN_MACOSX (for macosx 10.13 and earlier)
+and LC_BUILD_VERSION (for macosx 10.14 and newer) are interesting,
+because them contains information about minimal system version.
+
+Important remarks:
+- For fat files this implementation looks for maximum number version.
+ It not check if it is 32 or 64 and do not compare it with currently built package.
+ So it is possible to false report higher version that needed.
+- All structures signatures are taken form macosx header files.
+- I think that binary format will be more stable than `otool` output.
+ and if apple introduce some changes both implementation will need to be updated.
+- The system compile will set the deployment target no lower than
+ 11.0 for arm64 builds. For "Universal 2" builds use the x86_64 deployment
+ target when the arm64 target is 11.0.
+"""
+
+from __future__ import annotations
+
+import ctypes
+import os
+import sys
+from io import BufferedIOBase
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from typing import Union
+
+ StrPath = Union[str, os.PathLike[str]]
+
+"""here the needed const and struct from mach-o header files"""
+
+FAT_MAGIC = 0xCAFEBABE
+FAT_CIGAM = 0xBEBAFECA
+FAT_MAGIC_64 = 0xCAFEBABF
+FAT_CIGAM_64 = 0xBFBAFECA
+MH_MAGIC = 0xFEEDFACE
+MH_CIGAM = 0xCEFAEDFE
+MH_MAGIC_64 = 0xFEEDFACF
+MH_CIGAM_64 = 0xCFFAEDFE
+
+LC_VERSION_MIN_MACOSX = 0x24
+LC_BUILD_VERSION = 0x32
+
+CPU_TYPE_ARM64 = 0x0100000C
+
+mach_header_fields = [
+ ("magic", ctypes.c_uint32),
+ ("cputype", ctypes.c_int),
+ ("cpusubtype", ctypes.c_int),
+ ("filetype", ctypes.c_uint32),
+ ("ncmds", ctypes.c_uint32),
+ ("sizeofcmds", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32),
+]
+"""
+struct mach_header {
+ uint32_t magic; /* mach magic number identifier */
+ cpu_type_t cputype; /* cpu specifier */
+ cpu_subtype_t cpusubtype; /* machine specifier */
+ uint32_t filetype; /* type of file */
+ uint32_t ncmds; /* number of load commands */
+ uint32_t sizeofcmds; /* the size of all the load commands */
+ uint32_t flags; /* flags */
+};
+typedef integer_t cpu_type_t;
+typedef integer_t cpu_subtype_t;
+"""
+
+mach_header_fields_64 = mach_header_fields + [("reserved", ctypes.c_uint32)]
+"""
+struct mach_header_64 {
+ uint32_t magic; /* mach magic number identifier */
+ cpu_type_t cputype; /* cpu specifier */
+ cpu_subtype_t cpusubtype; /* machine specifier */
+ uint32_t filetype; /* type of file */
+ uint32_t ncmds; /* number of load commands */
+ uint32_t sizeofcmds; /* the size of all the load commands */
+ uint32_t flags; /* flags */
+ uint32_t reserved; /* reserved */
+};
+"""
+
+fat_header_fields = [("magic", ctypes.c_uint32), ("nfat_arch", ctypes.c_uint32)]
+"""
+struct fat_header {
+ uint32_t magic; /* FAT_MAGIC or FAT_MAGIC_64 */
+ uint32_t nfat_arch; /* number of structs that follow */
+};
+"""
+
+fat_arch_fields = [
+ ("cputype", ctypes.c_int),
+ ("cpusubtype", ctypes.c_int),
+ ("offset", ctypes.c_uint32),
+ ("size", ctypes.c_uint32),
+ ("align", ctypes.c_uint32),
+]
+"""
+struct fat_arch {
+ cpu_type_t cputype; /* cpu specifier (int) */
+ cpu_subtype_t cpusubtype; /* machine specifier (int) */
+ uint32_t offset; /* file offset to this object file */
+ uint32_t size; /* size of this object file */
+ uint32_t align; /* alignment as a power of 2 */
+};
+"""
+
+fat_arch_64_fields = [
+ ("cputype", ctypes.c_int),
+ ("cpusubtype", ctypes.c_int),
+ ("offset", ctypes.c_uint64),
+ ("size", ctypes.c_uint64),
+ ("align", ctypes.c_uint32),
+ ("reserved", ctypes.c_uint32),
+]
+"""
+struct fat_arch_64 {
+ cpu_type_t cputype; /* cpu specifier (int) */
+ cpu_subtype_t cpusubtype; /* machine specifier (int) */
+ uint64_t offset; /* file offset to this object file */
+ uint64_t size; /* size of this object file */
+ uint32_t align; /* alignment as a power of 2 */
+ uint32_t reserved; /* reserved */
+};
+"""
+
+segment_base_fields = [("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32)]
+"""base for reading segment info"""
+
+segment_command_fields = [
+ ("cmd", ctypes.c_uint32),
+ ("cmdsize", ctypes.c_uint32),
+ ("segname", ctypes.c_char * 16),
+ ("vmaddr", ctypes.c_uint32),
+ ("vmsize", ctypes.c_uint32),
+ ("fileoff", ctypes.c_uint32),
+ ("filesize", ctypes.c_uint32),
+ ("maxprot", ctypes.c_int),
+ ("initprot", ctypes.c_int),
+ ("nsects", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32),
+]
+"""
+struct segment_command { /* for 32-bit architectures */
+ uint32_t cmd; /* LC_SEGMENT */
+ uint32_t cmdsize; /* includes sizeof section structs */
+ char segname[16]; /* segment name */
+ uint32_t vmaddr; /* memory address of this segment */
+ uint32_t vmsize; /* memory size of this segment */
+ uint32_t fileoff; /* file offset of this segment */
+ uint32_t filesize; /* amount to map from the file */
+ vm_prot_t maxprot; /* maximum VM protection */
+ vm_prot_t initprot; /* initial VM protection */
+ uint32_t nsects; /* number of sections in segment */
+ uint32_t flags; /* flags */
+};
+typedef int vm_prot_t;
+"""
+
+segment_command_fields_64 = [
+ ("cmd", ctypes.c_uint32),
+ ("cmdsize", ctypes.c_uint32),
+ ("segname", ctypes.c_char * 16),
+ ("vmaddr", ctypes.c_uint64),
+ ("vmsize", ctypes.c_uint64),
+ ("fileoff", ctypes.c_uint64),
+ ("filesize", ctypes.c_uint64),
+ ("maxprot", ctypes.c_int),
+ ("initprot", ctypes.c_int),
+ ("nsects", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32),
+]
+"""
+struct segment_command_64 { /* for 64-bit architectures */
+ uint32_t cmd; /* LC_SEGMENT_64 */
+ uint32_t cmdsize; /* includes sizeof section_64 structs */
+ char segname[16]; /* segment name */
+ uint64_t vmaddr; /* memory address of this segment */
+ uint64_t vmsize; /* memory size of this segment */
+ uint64_t fileoff; /* file offset of this segment */
+ uint64_t filesize; /* amount to map from the file */
+ vm_prot_t maxprot; /* maximum VM protection */
+ vm_prot_t initprot; /* initial VM protection */
+ uint32_t nsects; /* number of sections in segment */
+ uint32_t flags; /* flags */
+};
+"""
+
+version_min_command_fields = segment_base_fields + [
+ ("version", ctypes.c_uint32),
+ ("sdk", ctypes.c_uint32),
+]
+"""
+struct version_min_command {
+ uint32_t cmd; /* LC_VERSION_MIN_MACOSX or
+ LC_VERSION_MIN_IPHONEOS or
+ LC_VERSION_MIN_WATCHOS or
+ LC_VERSION_MIN_TVOS */
+ uint32_t cmdsize; /* sizeof(struct min_version_command) */
+ uint32_t version; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+};
+"""
+
+build_version_command_fields = segment_base_fields + [
+ ("platform", ctypes.c_uint32),
+ ("minos", ctypes.c_uint32),
+ ("sdk", ctypes.c_uint32),
+ ("ntools", ctypes.c_uint32),
+]
+"""
+struct build_version_command {
+ uint32_t cmd; /* LC_BUILD_VERSION */
+ uint32_t cmdsize; /* sizeof(struct build_version_command) plus */
+ /* ntools * sizeof(struct build_tool_version) */
+ uint32_t platform; /* platform */
+ uint32_t minos; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t ntools; /* number of tool entries following this */
+};
+"""
+
+
+def swap32(x: int) -> int:
+ return (
+ ((x << 24) & 0xFF000000)
+ | ((x << 8) & 0x00FF0000)
+ | ((x >> 8) & 0x0000FF00)
+ | ((x >> 24) & 0x000000FF)
+ )
+
+
+def get_base_class_and_magic_number(
+ lib_file: BufferedIOBase,
+ seek: int | None = None,
+) -> tuple[type[ctypes.Structure], int]:
+ if seek is None:
+ seek = lib_file.tell()
+ else:
+ lib_file.seek(seek)
+ magic_number = ctypes.c_uint32.from_buffer_copy(
+ lib_file.read(ctypes.sizeof(ctypes.c_uint32))
+ ).value
+
+ # Handle wrong byte order
+ if magic_number in [FAT_CIGAM, FAT_CIGAM_64, MH_CIGAM, MH_CIGAM_64]:
+ if sys.byteorder == "little":
+ BaseClass = ctypes.BigEndianStructure
+ else:
+ BaseClass = ctypes.LittleEndianStructure
+
+ magic_number = swap32(magic_number)
+ else:
+ BaseClass = ctypes.Structure
+
+ lib_file.seek(seek)
+ return BaseClass, magic_number
+
+
+def read_data(struct_class: type[ctypes.Structure], lib_file: BufferedIOBase):
+ return struct_class.from_buffer_copy(lib_file.read(ctypes.sizeof(struct_class)))
+
+
+def extract_macosx_min_system_version(path_to_lib: str):
+ with open(path_to_lib, "rb") as lib_file:
+ BaseClass, magic_number = get_base_class_and_magic_number(lib_file, 0)
+ if magic_number not in [FAT_MAGIC, FAT_MAGIC_64, MH_MAGIC, MH_MAGIC_64]:
+ return
+
+ if magic_number in [FAT_MAGIC, FAT_CIGAM_64]:
+
+ class FatHeader(BaseClass):
+ _fields_ = fat_header_fields
+
+ fat_header = read_data(FatHeader, lib_file)
+ if magic_number == FAT_MAGIC:
+
+ class FatArch(BaseClass):
+ _fields_ = fat_arch_fields
+
+ else:
+
+ class FatArch(BaseClass):
+ _fields_ = fat_arch_64_fields
+
+ fat_arch_list = [
+ read_data(FatArch, lib_file) for _ in range(fat_header.nfat_arch)
+ ]
+
+ versions_list: list[tuple[int, int, int]] = []
+ for el in fat_arch_list:
+ try:
+ version = read_mach_header(lib_file, el.offset)
+ if version is not None:
+ if el.cputype == CPU_TYPE_ARM64 and len(fat_arch_list) != 1:
+ # Xcode will not set the deployment target below 11.0.0
+ # for the arm64 architecture. Ignore the arm64 deployment
+ # in fat binaries when the target is 11.0.0, that way
+ # the other architectures can select a lower deployment
+ # target.
+ # This is safe because there is no arm64 variant for
+ # macOS 10.15 or earlier.
+ if version == (11, 0, 0):
+ continue
+ versions_list.append(version)
+ except ValueError:
+ pass
+
+ if len(versions_list) > 0:
+ return max(versions_list)
+ else:
+ return None
+
+ else:
+ try:
+ return read_mach_header(lib_file, 0)
+ except ValueError:
+ """when some error during read library files"""
+ return None
+
+
+def read_mach_header(
+ lib_file: BufferedIOBase,
+ seek: int | None = None,
+) -> tuple[int, int, int] | None:
+ """
+ This function parses a Mach-O header and extracts
+ information about the minimal macOS version.
+
+ :param lib_file: reference to opened library file with pointer
+ """
+ base_class, magic_number = get_base_class_and_magic_number(lib_file, seek)
+ arch = "32" if magic_number == MH_MAGIC else "64"
+
+ class SegmentBase(base_class):
+ _fields_ = segment_base_fields
+
+ if arch == "32":
+
+ class MachHeader(base_class):
+ _fields_ = mach_header_fields
+
+ else:
+
+ class MachHeader(base_class):
+ _fields_ = mach_header_fields_64
+
+ mach_header = read_data(MachHeader, lib_file)
+ for _i in range(mach_header.ncmds):
+ pos = lib_file.tell()
+ segment_base = read_data(SegmentBase, lib_file)
+ lib_file.seek(pos)
+ if segment_base.cmd == LC_VERSION_MIN_MACOSX:
+
+ class VersionMinCommand(base_class):
+ _fields_ = version_min_command_fields
+
+ version_info = read_data(VersionMinCommand, lib_file)
+ return parse_version(version_info.version)
+ elif segment_base.cmd == LC_BUILD_VERSION:
+
+ class VersionBuild(base_class):
+ _fields_ = build_version_command_fields
+
+ version_info = read_data(VersionBuild, lib_file)
+ return parse_version(version_info.minos)
+ else:
+ lib_file.seek(pos + segment_base.cmdsize)
+ continue
+
+
+def parse_version(version: int) -> tuple[int, int, int]:
+ x = (version & 0xFFFF0000) >> 16
+ y = (version & 0x0000FF00) >> 8
+ z = version & 0x000000FF
+ return x, y, z
+
+
+def calculate_macosx_platform_tag(archive_root: StrPath, platform_tag: str) -> str:
+ """
+ Calculate proper macosx platform tag basing on files which are included to wheel
+
+ Example platform tag `macosx-10.14-x86_64`
+ """
+ prefix, base_version, suffix = platform_tag.split("-")
+ base_version = tuple(int(x) for x in base_version.split("."))
+ base_version = base_version[:2]
+ if base_version[0] > 10:
+ base_version = (base_version[0], 0)
+ assert len(base_version) == 2
+ if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+ deploy_target = tuple(
+ int(x) for x in os.environ["MACOSX_DEPLOYMENT_TARGET"].split(".")
+ )
+ deploy_target = deploy_target[:2]
+ if deploy_target[0] > 10:
+ deploy_target = (deploy_target[0], 0)
+ if deploy_target < base_version:
+ sys.stderr.write(
+ "[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value ({}) than "
+ "the version on which the Python interpreter was compiled ({}), and "
+ "will be ignored.\n".format(
+ ".".join(str(x) for x in deploy_target),
+ ".".join(str(x) for x in base_version),
+ )
+ )
+ else:
+ base_version = deploy_target
+
+ assert len(base_version) == 2
+ start_version = base_version
+ versions_dict: dict[str, tuple[int, int]] = {}
+ for dirpath, _dirnames, filenames in os.walk(archive_root):
+ for filename in filenames:
+ if filename.endswith(".dylib") or filename.endswith(".so"):
+ lib_path = os.path.join(dirpath, filename)
+ min_ver = extract_macosx_min_system_version(lib_path)
+ if min_ver is not None:
+ min_ver = min_ver[0:2]
+ if min_ver[0] > 10:
+ min_ver = (min_ver[0], 0)
+ versions_dict[lib_path] = min_ver
+
+ if len(versions_dict) > 0:
+ base_version = max(base_version, max(versions_dict.values()))
+
+ # macosx platform tag do not support minor bugfix release
+ fin_base_version = "_".join([str(x) for x in base_version])
+ if start_version < base_version:
+ problematic_files = [k for k, v in versions_dict.items() if v > start_version]
+ problematic_files = "\n".join(problematic_files)
+ if len(problematic_files) == 1:
+ files_form = "this file"
+ else:
+ files_form = "these files"
+ error_message = (
+ "[WARNING] This wheel needs a higher macOS version than {} "
+ "To silence this warning, set MACOSX_DEPLOYMENT_TARGET to at least "
+ + fin_base_version
+ + " or recreate "
+ + files_form
+ + " with lower "
+ "MACOSX_DEPLOYMENT_TARGET: \n" + problematic_files
+ )
+
+ if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+ error_message = error_message.format(
+ "is set in MACOSX_DEPLOYMENT_TARGET variable."
+ )
+ else:
+ error_message = error_message.format(
+ "the version your Python interpreter is compiled against."
+ )
+
+ sys.stderr.write(error_message)
+
+ platform_tag = prefix + "_" + fin_base_version + "_" + suffix
+ return platform_tag