diff --git a/.gitattributes b/.gitattributes index 510c4335d154944948099fe7dcf2741dc1bc2550..fa78e456cb43d59f2a33be380fa76f5f2283f332 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1258,3 +1258,5 @@ infer_4_47_1/lib/python3.10/site-packages/triton/backends/nvidia/bin/ptxas filte falcon/bin/x86_64-conda-linux-gnu-ld filter=lfs diff=lfs merge=lfs -text falcon/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text falcon/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +falcon/lib/python3.10/site-packages/pysam.libs/libsasl2-7de4d792.so.3.0.0 filter=lfs diff=lfs merge=lfs -text +evalkit_tf437/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/aliases.py b/evalkit_tf437/lib/python3.10/site-packages/pydantic/aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..0325bedacbeeb86b112ea4bea786afd3826c3812 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic/aliases.py @@ -0,0 +1,132 @@ +"""Support for alias configurations.""" + +from __future__ import annotations + +import dataclasses +from typing import Any, Callable, Literal + +from pydantic_core import PydanticUndefined + +from ._internal import _internal_dataclass + +__all__ = ('AliasGenerator', 'AliasPath', 'AliasChoices') + + +@dataclasses.dataclass(**_internal_dataclass.slots_true) +class AliasPath: + """Usage docs: https://docs.pydantic.dev/2.9/concepts/alias#aliaspath-and-aliaschoices + + A data class used by `validation_alias` as a convenience to create aliases. + + Attributes: + path: A list of string or integer aliases. + """ + + path: list[int | str] + + def __init__(self, first_arg: str, *args: str | int) -> None: + self.path = [first_arg] + list(args) + + def convert_to_aliases(self) -> list[str | int]: + """Converts arguments to a list of string or integer aliases. + + Returns: + The list of aliases. + """ + return self.path + + def search_dict_for_path(self, d: dict) -> Any: + """Searches a dictionary for the path specified by the alias. + + Returns: + The value at the specified path, or `PydanticUndefined` if the path is not found. + """ + v = d + for k in self.path: + if isinstance(v, str): + # disallow indexing into a str, like for AliasPath('x', 0) and x='abc' + return PydanticUndefined + try: + v = v[k] + except (KeyError, IndexError, TypeError): + return PydanticUndefined + return v + + +@dataclasses.dataclass(**_internal_dataclass.slots_true) +class AliasChoices: + """Usage docs: https://docs.pydantic.dev/2.9/concepts/alias#aliaspath-and-aliaschoices + + A data class used by `validation_alias` as a convenience to create aliases. + + Attributes: + choices: A list containing a string or `AliasPath`. + """ + + choices: list[str | AliasPath] + + def __init__(self, first_choice: str | AliasPath, *choices: str | AliasPath) -> None: + self.choices = [first_choice] + list(choices) + + def convert_to_aliases(self) -> list[list[str | int]]: + """Converts arguments to a list of lists containing string or integer aliases. + + Returns: + The list of aliases. + """ + aliases: list[list[str | int]] = [] + for c in self.choices: + if isinstance(c, AliasPath): + aliases.append(c.convert_to_aliases()) + else: + aliases.append([c]) + return aliases + + +@dataclasses.dataclass(**_internal_dataclass.slots_true) +class AliasGenerator: + """Usage docs: https://docs.pydantic.dev/2.9/concepts/alias#using-an-aliasgenerator + + A data class used by `alias_generator` as a convenience to create various aliases. + + Attributes: + alias: A callable that takes a field name and returns an alias for it. + validation_alias: A callable that takes a field name and returns a validation alias for it. + serialization_alias: A callable that takes a field name and returns a serialization alias for it. + """ + + alias: Callable[[str], str] | None = None + validation_alias: Callable[[str], str | AliasPath | AliasChoices] | None = None + serialization_alias: Callable[[str], str] | None = None + + def _generate_alias( + self, + alias_kind: Literal['alias', 'validation_alias', 'serialization_alias'], + allowed_types: tuple[type[str] | type[AliasPath] | type[AliasChoices], ...], + field_name: str, + ) -> str | AliasPath | AliasChoices | None: + """Generate an alias of the specified kind. Returns None if the alias generator is None. + + Raises: + TypeError: If the alias generator produces an invalid type. + """ + alias = None + if alias_generator := getattr(self, alias_kind): + alias = alias_generator(field_name) + if alias and not isinstance(alias, allowed_types): + raise TypeError( + f'Invalid `{alias_kind}` type. `{alias_kind}` generator must produce one of `{allowed_types}`' + ) + return alias + + def generate_aliases(self, field_name: str) -> tuple[str | None, str | AliasPath | AliasChoices | None, str | None]: + """Generate `alias`, `validation_alias`, and `serialization_alias` for a field. + + Returns: + A tuple of three aliases - validation, alias, and serialization. + """ + alias = self._generate_alias('alias', (str,), field_name) + validation_alias = self._generate_alias('validation_alias', (str, AliasChoices, AliasPath), field_name) + serialization_alias = self._generate_alias('serialization_alias', (str,), field_name) + + return alias, validation_alias, serialization_alias # type: ignore diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/class_validators.py b/evalkit_tf437/lib/python3.10/site-packages/pydantic/class_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..9977150c92fcb083fcdfa632c9de3b5fa92470cb --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic/class_validators.py @@ -0,0 +1,5 @@ +"""`class_validators` module is a backport module from V1.""" + +from ._migration import getattr_migration + +__getattr__ = getattr_migration(__name__) diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/datetime_parse.py b/evalkit_tf437/lib/python3.10/site-packages/pydantic/datetime_parse.py new file mode 100644 index 0000000000000000000000000000000000000000..53d52649e7620965ed194240a3becaa3ce3e3448 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic/datetime_parse.py @@ -0,0 +1,5 @@ +"""The `datetime_parse` module is a backport module from V1.""" + +from ._migration import getattr_migration + +__getattr__ = getattr_migration(__name__) diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/decorator.py b/evalkit_tf437/lib/python3.10/site-packages/pydantic/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..0d97560c1b791956726b04fd66740a947647aabe --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic/decorator.py @@ -0,0 +1,5 @@ +"""The `decorator` module is a backport module from V1.""" + +from ._migration import getattr_migration + +__getattr__ = getattr_migration(__name__) diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/functional_serializers.py b/evalkit_tf437/lib/python3.10/site-packages/pydantic/functional_serializers.py new file mode 100644 index 0000000000000000000000000000000000000000..d1579bbd8aee26bb535b77d3f47a4989c09b8f61 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic/functional_serializers.py @@ -0,0 +1,438 @@ +"""This module contains related classes and functions for serialization.""" + +from __future__ import annotations + +import dataclasses +from functools import partial, partialmethod +from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload + +from pydantic_core import PydanticUndefined, core_schema +from pydantic_core.core_schema import SerializationInfo, SerializerFunctionWrapHandler, WhenUsed +from typing_extensions import Annotated, Literal, TypeAlias + +from . import PydanticUndefinedAnnotation +from ._internal import _decorators, _internal_dataclass +from .annotated_handlers import GetCoreSchemaHandler + + +@dataclasses.dataclass(**_internal_dataclass.slots_true, frozen=True) +class PlainSerializer: + """Plain serializers use a function to modify the output of serialization. + + This is particularly helpful when you want to customize the serialization for annotated types. + Consider an input of `list`, which will be serialized into a space-delimited string. + + ```python + from typing import List + + from typing_extensions import Annotated + + from pydantic import BaseModel, PlainSerializer + + CustomStr = Annotated[ + List, PlainSerializer(lambda x: ' '.join(x), return_type=str) + ] + + class StudentModel(BaseModel): + courses: CustomStr + + student = StudentModel(courses=['Math', 'Chemistry', 'English']) + print(student.model_dump()) + #> {'courses': 'Math Chemistry English'} + ``` + + Attributes: + func: The serializer function. + return_type: The return type for the function. If omitted it will be inferred from the type annotation. + when_used: Determines when this serializer should be used. Accepts a string with values `'always'`, + `'unless-none'`, `'json'`, and `'json-unless-none'`. Defaults to 'always'. + """ + + func: core_schema.SerializerFunction + return_type: Any = PydanticUndefined + when_used: WhenUsed = 'always' + + def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """Gets the Pydantic core schema. + + Args: + source_type: The source type. + handler: The `GetCoreSchemaHandler` instance. + + Returns: + The Pydantic core schema. + """ + schema = handler(source_type) + try: + return_type = _decorators.get_function_return_type( + self.func, self.return_type, handler._get_types_namespace() + ) + except NameError as e: + raise PydanticUndefinedAnnotation.from_name_error(e) from e + return_schema = None if return_type is PydanticUndefined else handler.generate_schema(return_type) + schema['serialization'] = core_schema.plain_serializer_function_ser_schema( + function=self.func, + info_arg=_decorators.inspect_annotated_serializer(self.func, 'plain'), + return_schema=return_schema, + when_used=self.when_used, + ) + return schema + + +@dataclasses.dataclass(**_internal_dataclass.slots_true, frozen=True) +class WrapSerializer: + """Wrap serializers receive the raw inputs along with a handler function that applies the standard serialization + logic, and can modify the resulting value before returning it as the final output of serialization. + + For example, here's a scenario in which a wrap serializer transforms timezones to UTC **and** utilizes the existing `datetime` serialization logic. + + ```python + from datetime import datetime, timezone + from typing import Any, Dict + + from typing_extensions import Annotated + + from pydantic import BaseModel, WrapSerializer + + class EventDatetime(BaseModel): + start: datetime + end: datetime + + def convert_to_utc(value: Any, handler, info) -> Dict[str, datetime]: + # Note that `handler` can actually help serialize the `value` for + # further custom serialization in case it's a subclass. + partial_result = handler(value, info) + if info.mode == 'json': + return { + k: datetime.fromisoformat(v).astimezone(timezone.utc) + for k, v in partial_result.items() + } + return {k: v.astimezone(timezone.utc) for k, v in partial_result.items()} + + UTCEventDatetime = Annotated[EventDatetime, WrapSerializer(convert_to_utc)] + + class EventModel(BaseModel): + event_datetime: UTCEventDatetime + + dt = EventDatetime( + start='2024-01-01T07:00:00-08:00', end='2024-01-03T20:00:00+06:00' + ) + event = EventModel(event_datetime=dt) + print(event.model_dump()) + ''' + { + 'event_datetime': { + 'start': datetime.datetime( + 2024, 1, 1, 15, 0, tzinfo=datetime.timezone.utc + ), + 'end': datetime.datetime( + 2024, 1, 3, 14, 0, tzinfo=datetime.timezone.utc + ), + } + } + ''' + + print(event.model_dump_json()) + ''' + {"event_datetime":{"start":"2024-01-01T15:00:00Z","end":"2024-01-03T14:00:00Z"}} + ''' + ``` + + Attributes: + func: The serializer function to be wrapped. + return_type: The return type for the function. If omitted it will be inferred from the type annotation. + when_used: Determines when this serializer should be used. Accepts a string with values `'always'`, + `'unless-none'`, `'json'`, and `'json-unless-none'`. Defaults to 'always'. + """ + + func: core_schema.WrapSerializerFunction + return_type: Any = PydanticUndefined + when_used: WhenUsed = 'always' + + def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """This method is used to get the Pydantic core schema of the class. + + Args: + source_type: Source type. + handler: Core schema handler. + + Returns: + The generated core schema of the class. + """ + schema = handler(source_type) + try: + return_type = _decorators.get_function_return_type( + self.func, self.return_type, handler._get_types_namespace() + ) + except NameError as e: + raise PydanticUndefinedAnnotation.from_name_error(e) from e + return_schema = None if return_type is PydanticUndefined else handler.generate_schema(return_type) + schema['serialization'] = core_schema.wrap_serializer_function_ser_schema( + function=self.func, + info_arg=_decorators.inspect_annotated_serializer(self.func, 'wrap'), + return_schema=return_schema, + when_used=self.when_used, + ) + return schema + + +if TYPE_CHECKING: + _Partial: TypeAlias = 'partial[Any] | partialmethod[Any]' + + FieldPlainSerializer: TypeAlias = 'core_schema.SerializerFunction | _Partial' + """A field serializer method or function in `plain` mode.""" + + FieldWrapSerializer: TypeAlias = 'core_schema.WrapSerializerFunction | _Partial' + """A field serializer method or function in `wrap` mode.""" + + FieldSerializer: TypeAlias = 'FieldPlainSerializer | FieldWrapSerializer' + """A field serializer method or function.""" + + _FieldPlainSerializerT = TypeVar('_FieldPlainSerializerT', bound=FieldPlainSerializer) + _FieldWrapSerializerT = TypeVar('_FieldWrapSerializerT', bound=FieldWrapSerializer) + + +@overload +def field_serializer( + field: str, + /, + *fields: str, + mode: Literal['wrap'], + return_type: Any = ..., + when_used: WhenUsed = ..., + check_fields: bool | None = ..., +) -> Callable[[_FieldWrapSerializerT], _FieldWrapSerializerT]: ... + + +@overload +def field_serializer( + field: str, + /, + *fields: str, + mode: Literal['plain'] = ..., + return_type: Any = ..., + when_used: WhenUsed = ..., + check_fields: bool | None = ..., +) -> Callable[[_FieldPlainSerializerT], _FieldPlainSerializerT]: ... + + +def field_serializer( + *fields: str, + mode: Literal['plain', 'wrap'] = 'plain', + return_type: Any = PydanticUndefined, + when_used: WhenUsed = 'always', + check_fields: bool | None = None, +) -> ( + Callable[[_FieldWrapSerializerT], _FieldWrapSerializerT] + | Callable[[_FieldPlainSerializerT], _FieldPlainSerializerT] +): + """Decorator that enables custom field serialization. + + In the below example, a field of type `set` is used to mitigate duplication. A `field_serializer` is used to serialize the data as a sorted list. + + ```python + from typing import Set + + from pydantic import BaseModel, field_serializer + + class StudentModel(BaseModel): + name: str = 'Jane' + courses: Set[str] + + @field_serializer('courses', when_used='json') + def serialize_courses_in_order(self, courses: Set[str]): + return sorted(courses) + + student = StudentModel(courses={'Math', 'Chemistry', 'English'}) + print(student.model_dump_json()) + #> {"name":"Jane","courses":["Chemistry","English","Math"]} + ``` + + See [Custom serializers](../concepts/serialization.md#custom-serializers) for more information. + + Four signatures are supported: + + - `(self, value: Any, info: FieldSerializationInfo)` + - `(self, value: Any, nxt: SerializerFunctionWrapHandler, info: FieldSerializationInfo)` + - `(value: Any, info: SerializationInfo)` + - `(value: Any, nxt: SerializerFunctionWrapHandler, info: SerializationInfo)` + + Args: + fields: Which field(s) the method should be called on. + mode: The serialization mode. + + - `plain` means the function will be called instead of the default serialization logic, + - `wrap` means the function will be called with an argument to optionally call the + default serialization logic. + return_type: Optional return type for the function, if omitted it will be inferred from the type annotation. + when_used: Determines the serializer will be used for serialization. + check_fields: Whether to check that the fields actually exist on the model. + + Returns: + The decorator function. + """ + + def dec(f: FieldSerializer) -> _decorators.PydanticDescriptorProxy[Any]: + dec_info = _decorators.FieldSerializerDecoratorInfo( + fields=fields, + mode=mode, + return_type=return_type, + when_used=when_used, + check_fields=check_fields, + ) + return _decorators.PydanticDescriptorProxy(f, dec_info) # pyright: ignore[reportArgumentType] + + return dec # pyright: ignore[reportReturnType] + + +if TYPE_CHECKING: + # The first argument in the following callables represent the `self` type: + + ModelPlainSerializerWithInfo: TypeAlias = Callable[[Any, SerializationInfo], Any] + """A model serializer method with the `info` argument, in `plain` mode.""" + + ModelPlainSerializerWithoutInfo: TypeAlias = Callable[[Any], Any] + """A model serializer method without the `info` argument, in `plain` mode.""" + + ModelPlainSerializer: TypeAlias = 'ModelPlainSerializerWithInfo | ModelPlainSerializerWithoutInfo' + """A model serializer method in `plain` mode.""" + + ModelWrapSerializerWithInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo], Any] + """A model serializer method with the `info` argument, in `wrap` mode.""" + + ModelWrapSerializerWithoutInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler], Any] + """A model serializer method without the `info` argument, in `wrap` mode.""" + + ModelWrapSerializer: TypeAlias = 'ModelWrapSerializerWithInfo | ModelWrapSerializerWithoutInfo' + """A model serializer method in `wrap` mode.""" + + ModelSerializer: TypeAlias = 'ModelPlainSerializer | ModelWrapSerializer' + + _ModelPlainSerializerT = TypeVar('_ModelPlainSerializerT', bound=ModelPlainSerializer) + _ModelWrapSerializerT = TypeVar('_ModelWrapSerializerT', bound=ModelWrapSerializer) + + +@overload +def model_serializer(f: _ModelPlainSerializerT, /) -> _ModelPlainSerializerT: ... + + +@overload +def model_serializer( + *, mode: Literal['wrap'], when_used: WhenUsed = 'always', return_type: Any = ... +) -> Callable[[_ModelWrapSerializerT], _ModelWrapSerializerT]: ... + + +@overload +def model_serializer( + *, + mode: Literal['plain'] = ..., + when_used: WhenUsed = 'always', + return_type: Any = ..., +) -> Callable[[_ModelPlainSerializerT], _ModelPlainSerializerT]: ... + + +def model_serializer( + f: _ModelPlainSerializerT | _ModelWrapSerializerT | None = None, + /, + *, + mode: Literal['plain', 'wrap'] = 'plain', + when_used: WhenUsed = 'always', + return_type: Any = PydanticUndefined, +) -> ( + _ModelPlainSerializerT + | Callable[[_ModelWrapSerializerT], _ModelWrapSerializerT] + | Callable[[_ModelPlainSerializerT], _ModelPlainSerializerT] +): + """Decorator that enables custom model serialization. + + This is useful when a model need to be serialized in a customized manner, allowing for flexibility beyond just specific fields. + + An example would be to serialize temperature to the same temperature scale, such as degrees Celsius. + + ```python + from typing import Literal + + from pydantic import BaseModel, model_serializer + + class TemperatureModel(BaseModel): + unit: Literal['C', 'F'] + value: int + + @model_serializer() + def serialize_model(self): + if self.unit == 'F': + return {'unit': 'C', 'value': int((self.value - 32) / 1.8)} + return {'unit': self.unit, 'value': self.value} + + temperature = TemperatureModel(unit='F', value=212) + print(temperature.model_dump()) + #> {'unit': 'C', 'value': 100} + ``` + + Two signatures are supported for `mode='plain'`, which is the default: + + - `(self)` + - `(self, info: SerializationInfo)` + + And two other signatures for `mode='wrap'`: + + - `(self, nxt: SerializerFunctionWrapHandler)` + - `(self, nxt: SerializerFunctionWrapHandler, info: SerializationInfo)` + + See [Custom serializers](../concepts/serialization.md#custom-serializers) for more information. + + Args: + f: The function to be decorated. + mode: The serialization mode. + + - `'plain'` means the function will be called instead of the default serialization logic + - `'wrap'` means the function will be called with an argument to optionally call the default + serialization logic. + when_used: Determines when this serializer should be used. + return_type: The return type for the function. If omitted it will be inferred from the type annotation. + + Returns: + The decorator function. + """ + + def dec(f: ModelSerializer) -> _decorators.PydanticDescriptorProxy[Any]: + dec_info = _decorators.ModelSerializerDecoratorInfo(mode=mode, return_type=return_type, when_used=when_used) + return _decorators.PydanticDescriptorProxy(f, dec_info) + + if f is None: + return dec # pyright: ignore[reportReturnType] + else: + return dec(f) # pyright: ignore[reportReturnType] + + +AnyType = TypeVar('AnyType') + + +if TYPE_CHECKING: + SerializeAsAny = Annotated[AnyType, ...] # SerializeAsAny[list[str]] will be treated by type checkers as list[str] + """Force serialization to ignore whatever is defined in the schema and instead ask the object + itself how it should be serialized. + In particular, this means that when model subclasses are serialized, fields present in the subclass + but not in the original schema will be included. + """ +else: + + @dataclasses.dataclass(**_internal_dataclass.slots_true) + class SerializeAsAny: # noqa: D101 + def __class_getitem__(cls, item: Any) -> Any: + return Annotated[item, SerializeAsAny()] + + def __get_pydantic_core_schema__( + self, source_type: Any, handler: GetCoreSchemaHandler + ) -> core_schema.CoreSchema: + schema = handler(source_type) + schema_to_update = schema + while schema_to_update['type'] == 'definitions': + schema_to_update = schema_to_update.copy() + schema_to_update = schema_to_update['schema'] + schema_to_update['serialization'] = core_schema.wrap_serializer_function_ser_schema( + lambda x, h: h(x), schema=core_schema.any_schema() + ) + return schema + + __hash__ = object.__hash__ diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/py.typed b/evalkit_tf437/lib/python3.10/site-packages/pydantic/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/tools.py b/evalkit_tf437/lib/python3.10/site-packages/pydantic/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..fdc68c4f4a35e8d8200b3aafcda63d43839da375 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic/tools.py @@ -0,0 +1,5 @@ +"""The `tools` module is a backport module from V1.""" + +from ._migration import getattr_migration + +__getattr__ = getattr_migration(__name__) diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic/validate_call_decorator.py b/evalkit_tf437/lib/python3.10/site-packages/pydantic/validate_call_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..1151694515d6f7fe70dc222f6138f81c5d2e2881 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic/validate_call_decorator.py @@ -0,0 +1,69 @@ +"""Decorator for validating function calls.""" + +from __future__ import annotations as _annotations + +import functools +from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload + +from ._internal import _typing_extra, _validate_call + +__all__ = ('validate_call',) + +if TYPE_CHECKING: + from .config import ConfigDict + + AnyCallableT = TypeVar('AnyCallableT', bound=Callable[..., Any]) + + +@overload +def validate_call( + *, config: ConfigDict | None = None, validate_return: bool = False +) -> Callable[[AnyCallableT], AnyCallableT]: ... + + +@overload +def validate_call(func: AnyCallableT, /) -> AnyCallableT: ... + + +def validate_call( + func: AnyCallableT | None = None, + /, + *, + config: ConfigDict | None = None, + validate_return: bool = False, +) -> AnyCallableT | Callable[[AnyCallableT], AnyCallableT]: + """Usage docs: https://docs.pydantic.dev/2.9/concepts/validation_decorator/ + + Returns a decorated wrapper around the function that validates the arguments and, optionally, the return value. + + Usage may be either as a plain decorator `@validate_call` or with arguments `@validate_call(...)`. + + Args: + func: The function to be decorated. + config: The configuration dictionary. + validate_return: Whether to validate the return value. + + Returns: + The decorated function. + """ + local_ns = _typing_extra.parent_frame_namespace() + + def validate(function: AnyCallableT) -> AnyCallableT: + if isinstance(function, (classmethod, staticmethod)): + name = type(function).__name__ + raise TypeError(f'The `@{name}` decorator should be applied after `@validate_call` (put `@{name}` on top)') + + validate_call_wrapper = _validate_call.ValidateCallWrapper(function, config, validate_return, local_ns) + + @functools.wraps(function) + def wrapper_function(*args, **kwargs): + return validate_call_wrapper(*args, **kwargs) + + wrapper_function.raw_function = function # type: ignore + + return wrapper_function # type: ignore + + if func: + return validate(func) + else: + return validate diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..082910e8e3677153859e3c8d86f447f1edd5ad03 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efa173282e69e63cd89631f686772e33bc74ad1f486d50700538bedf737419ea +size 123086 diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fab411b9cd93adba3c8ceb2356eaa488aafb0b96 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_emoji_replace.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_emoji_replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03e5bcbe20fd5ab1f6d104794cd8411a03b32f8e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_emoji_replace.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_null_file.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_null_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73fa065e60ae20997a42c3e299c05f8bb2afceb7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/_null_file.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/ansi.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/ansi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87111ff880ffcb0372f23c6e2efd14832feea8d3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/ansi.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/bar.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/bar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12e95b1b864b283fadc74269463c3109c9fff2f8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/bar.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/emoji.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/emoji.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69bd553b13c69d96acba2925832162aa0d2710f0 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/emoji.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/file_proxy.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/file_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7e94db32efab554d8be0e0064e5079e8e6ef9a1 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/file_proxy.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/filesize.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/filesize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e71f48adb94eaf735255ee3811ef70caed434e4c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/filesize.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/json.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48b8ee215c8e3033b8340156a22bf1968055bcad Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/json.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/live.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/live.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3feddaecb675279b9853045a04ef96364072ae9a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/live.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/pretty.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/pretty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b17dbfbf4861665cfb879f59be2e4cbbb70c4562 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/pretty.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/progress.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/progress.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..815bf8a5ae09ed6c22201679685a7ba69b28c550 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/progress.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/protocol.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/protocol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b232a3a8cb6776783f286e6d4d517e10032b816 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/protocol.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/rule.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/rule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f6a83b5cd3de9647d93f39c75578bb930d7425f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/rule.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/scope.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/scope.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b84af1fa8012452c43492fe65552f96bb06c0928 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/scope.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/status.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/status.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def69891864e658604c12cff19379f53f88b4d6a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/status.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/syntax.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/syntax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32c25a147919045d377bca0eb6bb85ac8d37c1e9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/syntax.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/terminal_theme.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/terminal_theme.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b214202448458c1a9d7b67e01e3a006e917ae00 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/rich/__pycache__/terminal_theme.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/INSTALLER b/falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/METADATA b/falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..7b114cdbe89034ec67e6a5f8ed9fe80261ab8d77 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/anyio-4.8.0.dist-info/METADATA @@ -0,0 +1,104 @@ +Metadata-Version: 2.1 +Name: anyio +Version: 4.8.0 +Summary: High level compatibility layer for multiple asynchronous event loop implementations +Author-email: Alex Grönholm +License: MIT +Project-URL: Documentation, https://anyio.readthedocs.io/en/latest/ +Project-URL: Changelog, https://anyio.readthedocs.io/en/stable/versionhistory.html +Project-URL: Source code, https://github.com/agronholm/anyio +Project-URL: Issue tracker, https://github.com/agronholm/anyio/issues +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Framework :: AnyIO +Classifier: Typing :: Typed +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: exceptiongroup>=1.0.2; python_version < "3.11" +Requires-Dist: idna>=2.8 +Requires-Dist: sniffio>=1.1 +Requires-Dist: typing_extensions>=4.5; python_version < "3.13" +Provides-Extra: trio +Requires-Dist: trio>=0.26.1; extra == "trio" +Provides-Extra: test +Requires-Dist: anyio[trio]; extra == "test" +Requires-Dist: coverage[toml]>=7; extra == "test" +Requires-Dist: exceptiongroup>=1.2.0; extra == "test" +Requires-Dist: hypothesis>=4.0; extra == "test" +Requires-Dist: psutil>=5.9; extra == "test" +Requires-Dist: pytest>=7.0; extra == "test" +Requires-Dist: trustme; extra == "test" +Requires-Dist: truststore>=0.9.1; python_version >= "3.10" and extra == "test" +Requires-Dist: uvloop>=0.21; (platform_python_implementation == "CPython" and platform_system != "Windows" and python_version < "3.14") and extra == "test" +Provides-Extra: doc +Requires-Dist: packaging; extra == "doc" +Requires-Dist: Sphinx~=7.4; extra == "doc" +Requires-Dist: sphinx_rtd_theme; extra == "doc" +Requires-Dist: sphinx-autodoc-typehints>=1.2.0; extra == "doc" + +.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg + :target: https://github.com/agronholm/anyio/actions/workflows/test.yml + :alt: Build Status +.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master + :target: https://coveralls.io/github/agronholm/anyio?branch=master + :alt: Code Coverage +.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest + :target: https://anyio.readthedocs.io/en/latest/?badge=latest + :alt: Documentation +.. image:: https://badges.gitter.im/gitterHQ/gitter.svg + :target: https://gitter.im/python-trio/AnyIO + :alt: Gitter chat + +AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or +trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony +with the native SC of trio itself. + +Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or +trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full +refactoring necessary. It will blend in with the native libraries of your chosen backend. + +Documentation +------------- + +View full documentation at: https://anyio.readthedocs.io/ + +Features +-------- + +AnyIO offers the following functionality: + +* Task groups (nurseries_ in trio terminology) +* High-level networking (TCP, UDP and UNIX sockets) + + * `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python + 3.8) + * async/await style UDP sockets (unlike asyncio where you still have to use Transports and + Protocols) + +* A versatile API for byte streams and object streams +* Inter-task synchronization and communication (locks, conditions, events, semaphores, object + streams) +* Worker threads +* Subprocesses +* Asynchronous file I/O (using worker threads) +* Signal handling + +AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures. +It even works with the popular Hypothesis_ library. + +.. _asyncio: https://docs.python.org/3/library/asyncio.html +.. _trio: https://github.com/python-trio/trio +.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency +.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning +.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs +.. _pytest: https://docs.pytest.org/en/latest/ +.. _Hypothesis: https://hypothesis.works/ diff --git a/falcon/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9c3c935bc3dd76ddb3a9476f90cddc978bf4b1f Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py b/falcon/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..94f258df84ba8730208768fc44222bee4b3ebc33 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py @@ -0,0 +1,8 @@ +# This file has moved to under torch/_functorch. It is not public API. +# If you are not a PyTorch developer and you are relying on the following +# imports, please file an issue. +from torch._functorch.aot_autograd import ( + aot_autograd_decompositions, + KNOWN_TYPES, + PytreeThunk, +) diff --git a/falcon/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93a0b1af64740108592ff12ab337d6e5cad631c8 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f10fdcf6e54da804b5cff67a4ece9adc97569154 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__init__.py b/falcon/lib/python3.10/site-packages/functorch/dim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..519a7cb271cdc3eef40ccd76c94c504ed1a325e8 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/dim/__init__.py @@ -0,0 +1,179 @@ +import dis +import inspect +from typing import Sequence, Union + +import torch + +import functorch._C +from functorch._C import dim as _C +from .tree_map import tree_flatten, tree_map +from .wrap_type import wrap_type + +_C._patch_tensor_class() +dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists + + +class DimensionMismatchError(Exception): + pass + + +class DimensionBindError(Exception): + pass + + +from . import op_properties + +# use dict to avoid writing C++ bindings for set +pointwise = dict.fromkeys(op_properties.pointwise, True) + +use_c = True +if not use_c: + from . import reference + + +class _Tensor: + # fast path around slow wrapping/unwrapping logic for simply queries used + # by the implementation... + + @property + def dims(self): + return tuple(d for d in self._levels if isinstance(d, Dim)) + + def dim(self): + return self.ndim + + if use_c: + __torch_function__ = classmethod(_C.__torch_function__) + expand = _C._instancemethod(_C.expand) + else: + __torch_function__ = reference.__torch_function__ + expand = reference.expand + + index = _C._instancemethod(_C.index) + + def __repr__(self): + tensor, levels, ndim = self._tensor, self._levels, self.ndim + return f"{tensor}\nwith dims={tuple(l + ndim if isinstance(l, int) else l for l in levels)} sizes={tuple(tensor.size())}" + + +TensorLike = (_Tensor, torch.Tensor) + + +class Dim(_C.Dim, _Tensor): + # note that _C.Dim comes before tensor because we want the Dim API for things like size to take precendence. + # Tensor defines format, but we want to print Dims with special formatting + __format__ = object.__format__ + + +class Tensor(_Tensor, _C.Tensor): + if not use_c: + from_batched = staticmethod(_C.Tensor_from_batched) + from_positional = staticmethod(_C.Tensor_from_positional) + sum = _C._instancemethod(_C.Tensor_sum) + + +def cat(tensors, dim, new_dim): + n = dims() + return stack(tensors, n, dim).index([n, dim], new_dim) + + +if use_c: + _wrap = _C._wrap + + def _def(name, *args, **kwargs): + orig = getattr(torch.Tensor, name) + setattr(_Tensor, name, _C._instancemethod(_wrap(orig, *args, **kwargs))) + + t__getitem__ = _C._instancemethod(_C.__getitem__) + stack = _C.stack + split = _C._instancemethod(_C.split) +else: + _wrap, _def = reference._wrap, reference._def + t__getitem__ = reference.t__getitem__ + stack = reference.stack + split = reference.split + +# note: there is no python reference +t__setitem__ = _C._instancemethod(_C.__setitem__) +# this is patched in the C API because otherwise torch.Tensor will +# no longer be considered a sequence and things will break +# torch.Tensor.__getitem__ = t__getitem__ + +_Tensor.__getitem__ = t__getitem__ +# torch.Tensor.__setitem__ = t__setitem__ +_Tensor.__setitem__ = t__setitem__ + +torch.Tensor.split = split +_Tensor.split = split +torch.Tensor.expand = _C._instancemethod(_C.expand) +torch.Tensor.index = _C._instancemethod(_C.index) +wrap_type(use_c, _Tensor, torch.Tensor, _Tensor.__torch_function__) +del _Tensor.ndim + +if use_c: + _Tensor.order = _C._instancemethod(_C.order) +else: + _Tensor.order = reference.positional + +_def("mean") +_def("sum") +_def("all") +_def("amax") +_def("amin") +_def("aminmax") +_def("any") +_def("count_nonzero") +_def("logsumexp") +_def("nanmean") +_def("nansum") +_def("prod") +_def("std", keepdim_offset=2) +_def("var", keepdim_offset=2) +_def("max", single_dim=True) +_def("min", single_dim=True) +_def("argmax", single_dim=True) +_def("argmin", single_dim=True) +_def("kthvalue", single_dim=True) +_def("median", single_dim=True) +_def("nanmedian", single_dim=True) +_def("mode", single_dim=True) +_def("sort", reduce=False) +_def("argsort", reduce=False) +_def("unbind", single_dim=True) +_def("chunk", dim_offset=1, reduce=False) +_def("cummax", single_dim=True, reduce=False) +_def("cummin", single_dim=True, reduce=False) +_def("cumprod", single_dim=True, reduce=False) +_def("cumprod_", single_dim=True, reduce=False) +_def("cumsum", single_dim=True, reduce=False) +_def("cumsum_", single_dim=True, reduce=False) +_def("logcumsumexp", single_dim=True, reduce=False) +_def("renorm", dim_offset=1, single_dim=True, reduce=False) +_def("softmax", single_dim=True, reduce=False) +softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False) + +# stuff to handle in the future, because they require special +# binding logic for dims +# cross +# diag_embed +# diagonal +# diagonal_scatter +# diff +# nanquantile +# quantile +# roll +# rot90 +# topk (new dimes on output) +# should these all be subsumed by inplace indexing? +# index_add_ +# index_add +# index_copy +# index_copy_ +# index_fill +# index_fill_ +# index_select +# scatter +# scatter_ +# scatter_add +# scatter_add_ +# scatter_reduce diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/__init__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f96e50fe29c39069ca18898cfa634872251fc91c Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/__init__.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64bf537fe8011715c5d747d753063c06d8680c7f Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..459a2df79e8889c3f3c7dd8232561322510fb349 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef144ef2c8dd90189ae7d7c8e158afededa55fcc Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd976224ca711016712442c27ce84a2c14e4222e Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/op_properties.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/op_properties.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..742a9c0832e5e7a8a25b35bbbe7aaf1992c1732f Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/op_properties.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/reference.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a0a55513fddbdb51bd522bf255cf89d0798a06c Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/reference.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/tree_map.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/tree_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e33daf51a219b4c1281808bee8757a2e62fee9c6 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/tree_map.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/wrap_type.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/wrap_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7936649e01764ee6a4c90baef93a274886d2cf8a Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/dim/__pycache__/wrap_type.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/batch_tensor.py b/falcon/lib/python3.10/site-packages/functorch/dim/batch_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..0fc17f2492d5344821071a726a0e3a0e9d5dea95 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/dim/batch_tensor.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from contextlib import contextmanager + +from torch._C._functorch import _vmap_add_layers, _vmap_remove_layers + +_enabled = False + + +@contextmanager +def _enable_layers(dims): + global _enabled + assert not _enabled + input = sorted((d._level, d.size) for d in dims if not isinstance(d, int)) + n = len(input) + try: + _vmap_add_layers(input) + _enabled = True + yield + finally: + _enabled = False + _vmap_remove_layers(n) diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/delayed_mul_tensor.py b/falcon/lib/python3.10/site-packages/functorch/dim/delayed_mul_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..3984a063885907141b56bdd2c6e8cc730c592cbb --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/dim/delayed_mul_tensor.py @@ -0,0 +1,77 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import torch + +from . import _Tensor, Tensor +from .reference import _dims, _enable_layers, llist, ltuple + + +class DelayedMulTensor(_Tensor): + def __init__(self, lhs, rhs): + self._lhs, self._rhs = lhs, rhs + self._data = None + self._levels_data = None + self._has_device = lhs._has_device or rhs._has_device + self._batchtensor_data = None + self._tensor_data = None + + @property + def _levels(self): + if self._levels_data is None: + levels = llist(self._lhs._levels) + for l in self._rhs._levels: + if l not in levels: + levels.append(l) + self._levels_data = ltuple(levels) + return self._levels_data + + @property + def _batchtensor(self): + if self._batchtensor_data is None: + with _enable_layers(self._levels): + print("bt multiply fallback") + self._batchtensor_data = self._lhs._batchtensor * self._rhs._batchtensor + return self._batchtensor_data + + @property + def _tensor(self): + if self._tensor_data is None: + self._tensor_data = Tensor.from_batched( + self._batchtensor, self._has_device + )._tensor + return self._tensor_data + + @property + def ndim(self): + return self._batchtensor.ndim + + @property + def dims(self): + return ltuple(super().dims) + + def sum(self, dim): + dims = _dims(dim, 0, False, False) + n = ord("a") + all_levels = self._levels + + def to_char(d): + return chr(n + all_levels.index(d)) + + plhs, levelslhs = self._lhs._tensor, self._lhs._levels + prhs, levelsrhs = self._rhs._tensor, self._rhs._levels + new_dims = tuple(d for d in self.dims if d not in dims) + new_levels = [l for l in self._levels if l not in dims] + fmt = "".join( + [ + *(to_char(d) for d in levelslhs), + ",", + *(to_char(d) for d in levelsrhs), + "->", + *(to_char(d) for d in new_levels), + ] + ) + result_data = torch.einsum(fmt, (plhs, prhs)) + return Tensor.from_positional(result_data, new_levels, True) diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/dim.py b/falcon/lib/python3.10/site-packages/functorch/dim/dim.py new file mode 100644 index 0000000000000000000000000000000000000000..f8e34af96225f38ce59a4b9b400e399b30c42641 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/dim/dim.py @@ -0,0 +1,121 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import dis +import inspect + +from dataclasses import dataclass +from typing import Union + +from . import DimList + +_vmap_levels = [] + + +@dataclass +class LevelInfo: + level: int + alive: bool = True + + +class Dim: + def __init__(self, name: str, size: Union[None, int] = None): + self.name = name + self._size = None + self._vmap_level = None + if size is not None: + self.size = size + + def __del__(self): + if self._vmap_level is not None: + _vmap_active_levels[self._vmap_stack].alive = False # noqa: F821 + while ( + not _vmap_levels[-1].alive + and current_level() == _vmap_levels[-1].level # noqa: F821 + ): + _vmap_decrement_nesting() # noqa: F821 + _vmap_levels.pop() + + @property + def size(self): + assert self.is_bound + return self._size + + @size.setter + def size(self, size: int): + from . import DimensionBindError + + if self._size is None: + self._size = size + self._vmap_level = _vmap_increment_nesting(size, "same") # noqa: F821 + self._vmap_stack = len(_vmap_levels) + _vmap_levels.append(LevelInfo(self._vmap_level)) + + elif self._size != size: + raise DimensionBindError( + f"Dim '{self}' previously bound to a dimension of size {self._size} cannot bind to a dimension of size {size}" + ) + + @property + def is_bound(self): + return self._size is not None + + def __repr__(self): + return self.name + + +def extract_name(inst): + assert inst.opname == "STORE_FAST" or inst.opname == "STORE_NAME" + return inst.argval + + +_cache = {} + + +def dims(lists=0): + frame = inspect.currentframe() + assert frame is not None + calling_frame = frame.f_back + assert calling_frame is not None + code, lasti = calling_frame.f_code, calling_frame.f_lasti + key = (code, lasti) + if key not in _cache: + first = lasti // 2 + 1 + instructions = list(dis.get_instructions(calling_frame.f_code)) + unpack = instructions[first] + + if unpack.opname == "STORE_FAST" or unpack.opname == "STORE_NAME": + # just a single dim, not a list + name = unpack.argval + ctor = Dim if lists == 0 else DimList + _cache[key] = lambda: ctor(name=name) + else: + assert unpack.opname == "UNPACK_SEQUENCE" + ndims = unpack.argval + names = tuple( + extract_name(instructions[first + 1 + i]) for i in range(ndims) + ) + first_list = len(names) - lists + _cache[key] = lambda: tuple( + Dim(n) if i < first_list else DimList(name=n) + for i, n in enumerate(names) + ) + return _cache[key]() + + +def _dim_set(positional, arg): + def convert(a): + if isinstance(a, Dim): + return a + else: + assert isinstance(a, int) + return positional[a] + + if arg is None: + return positional + elif not isinstance(arg, (Dim, int)): + return tuple(convert(a) for a in arg) + else: + return (convert(arg),) diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/reference.py b/falcon/lib/python3.10/site-packages/functorch/dim/reference.py new file mode 100644 index 0000000000000000000000000000000000000000..2e5f9f50901929509ce1d378826193b93c79fc32 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/dim/reference.py @@ -0,0 +1,645 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# reference python implementations for C ops +import torch + +from functorch._C import dim as _C +from . import op_properties +from .batch_tensor import _enable_layers +from .tree_map import tree_flatten, tree_map + +DimList = _C.DimList +import operator +from functools import reduce + + +# use dict to avoid writing C++ bindings for set +pointwise = set(op_properties.pointwise) + + +def prod(x): + return reduce(operator.mul, x, 1) + + +def _wrap_dim(d, N, keepdim): + from . import Dim + + if isinstance(d, Dim): + assert not keepdim, "cannot preserve first-class dimensions with keepdim=True" + return d + elif d >= 0: + return d - N + else: + return d + + +def _dims(d, N, keepdim, single_dim): + from . import Dim + + if isinstance(d, (Dim, int)): + return ltuple((_wrap_dim(d, N, keepdim),)) + assert not single_dim, f"expected a single dimension or int but found: {d}" + return ltuple(_wrap_dim(x, N, keepdim) for x in d) + + +def _bind_dims_to_size(lhs_size, rhs, lhs_debug): + from . import DimensionMismatchError + + not_bound = tuple((i, r) for i, r in enumerate(rhs) if not r.is_bound) + if len(not_bound) == 1: + idx, d = not_bound[0] + rhs_so_far = prod(r.size for r in rhs if r.is_bound) + if lhs_size % rhs_so_far != 0: + rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs) + raise DimensionMismatchError( + f"inferred dimension does not evenly fit into larger dimension: {lhs_size} vs {rhs_s}" + ) + new_size = lhs_size // rhs_so_far + d.size = new_size + elif len(not_bound) > 1: + rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs) + raise DimensionMismatchError( + f"cannot infer the size of two dimensions at once: {rhs} with sizes {rhs_s}" + ) + else: + rhs_size = prod(r.size for r in rhs) + if lhs_size != rhs_size: + raise DimensionMismatchError( + f"Dimension sizes to do not match ({lhs_size} != {rhs_size}) when matching {lhs_debug} to {rhs}" + ) + + +def _tensor_levels(inp): + from . import _Tensor + + if isinstance(inp, _Tensor): + return inp._tensor, llist(inp._levels), inp._has_device + else: + return inp, llist(range(-inp.ndim, 0)), True + + +def _match_levels(v, from_levels, to_levels): + view = [] + permute = [] + requires_view = False + size = v.size() + for t in to_levels: + try: + idx = from_levels.index(t) + permute.append(idx) + view.append(size[idx]) + except ValueError: + view.append(1) + requires_view = True + if permute != list(range(len(permute))): + v = v.permute(*permute) + if requires_view: + v = v.view(*view) + return v + + +# make a single dimension positional but do not permute it, +# used to do multi-tensor operators where the dim being acted on +# should not physically move if possible +def _positional_no_permute(self, dim, expand_dim=False): + from . import Tensor + + ptensor, levels = self._tensor, llist(self._levels) + try: + idx = levels.index(dim) + except ValueError: + if not expand_dim: + raise + idx = 0 + ptensor = ptensor.expand(dim.size, *ptensor.size()) + levels.insert(0, 0) + idx_batched = 0 + for i in range(idx): + if isinstance(levels[i], int): + levels[i] -= 1 + idx_batched += 1 + levels[idx] = -idx_batched - 1 + return Tensor.from_positional(ptensor, levels, self._has_device), idx_batched + + +def seq(a, b): + from . import Dim + + if isinstance(a, Dim) != isinstance(b, Dim): + return False + if isinstance(a, Dim): + return a is b + else: + return a == b + + +class isin: + def __contains__(self, item): + for x in self: + if seq(item, x): + return True + return False + + def index(self, item): + for i, x in enumerate(self): + if seq(item, x): + return i + raise ValueError + + +class llist(isin, list): + pass + + +class ltuple(isin, tuple): + pass + + +empty_dict = {} + + +@classmethod +def __torch_function__(self, orig, cls, args, kwargs=empty_dict): + from . import _Tensor, Tensor, TensorLike + from .delayed_mul_tensor import DelayedMulTensor + + if orig is torch.Tensor.__mul__: + lhs, rhs = args + if ( + isinstance(lhs, _Tensor) + and isinstance(rhs, _Tensor) + and lhs.ndim == 0 + and rhs.ndim == 0 + ): + return DelayedMulTensor(lhs, rhs) + all_dims = llist() + flat_args, unflatten = tree_flatten((args, kwargs)) + device_holding_tensor = None + for f in flat_args: + if isinstance(f, _Tensor): + if f._has_device: + device_holding_tensor = f._batchtensor + for d in f.dims: + if d not in all_dims: + all_dims.append(d) + + def unwrap(t): + if isinstance(t, _Tensor): + r = t._batchtensor + if device_holding_tensor is not None and not t._has_device: + r = r.to(device=device_holding_tensor.device) + return r + return t + + if orig in pointwise: + result_levels = llist() + arg_levels = llist() + to_expand = [] + for i, f in enumerate(flat_args): + if isinstance(f, TensorLike): + ptensor, levels, _ = _tensor_levels(f) + if ( + isinstance(f, _Tensor) + and not f._has_device + and device_holding_tensor is not None + ): + ptensor = ptensor.to(device=device_holding_tensor.device) + flat_args[i] = ptensor + for l in levels: + if l not in result_levels: + result_levels.append(l) + to_expand.append((i, levels)) + + for i, levels in to_expand: + flat_args[i] = _match_levels(flat_args[i], levels, result_levels) + args, kwargs = unflatten(flat_args) + result = orig(*args, **kwargs) + + def wrap(t): + if isinstance(t, TensorLike): + return Tensor.from_positional( + t, result_levels, device_holding_tensor is not None + ) + return t + + return tree_map(wrap, result) + else: + + def wrap(t): + if isinstance(t, TensorLike): + return Tensor.from_batched(t, device_holding_tensor is not None) + return t + + with _enable_layers(all_dims): + print(f"batch_tensor for {orig}") + args, kwargs = unflatten(unwrap(f) for f in flat_args) + result = orig(*args, **kwargs) + # print("END", orig) + return tree_map(wrap, result) + + +def positional(self, *dims): + from . import Dim, DimensionBindError, Tensor + + ptensor, levels = self._tensor, llist(self._levels) + flat_dims = llist() + view = [] + needs_view = False + ndim = self.ndim + for d in dims: + if isinstance(d, DimList): + flat_dims.extend(d) + view.extend(e.size for e in d) + elif isinstance(d, Dim): + flat_dims.append(d) + view.append(d.size) + elif isinstance(d, int): + d = _wrap_dim(d, ndim, False) + flat_dims.append(d) + view.append(ptensor.size(d)) + else: + flat_dims.extend(d) + view.append(prod(e.size for e in d)) + needs_view = True + + permute = list(range(len(levels))) + nflat = len(flat_dims) + for i, d in enumerate(flat_dims): + try: + idx = levels.index(d) + except ValueError as e: + raise DimensionBindError( + f"tensor of dimensions {self.dims} does not contain dim {d}" + ) from e + p = permute[idx] + del levels[idx] + del permute[idx] + levels.insert(i, 0) + permute.insert(i, p) + ptensor = ptensor.permute(*permute) + seen = 0 + for i in range(len(levels) - 1, -1, -1): + if isinstance(levels[i], int): + seen += 1 + levels[i] = -seen + result = Tensor.from_positional(ptensor, levels, self._has_device) + if needs_view: + result = result.reshape(*view, *result.size()[len(flat_dims) :]) + return result + + +def _contains_dim(input): + from . import Dim + + for i in input: + if isinstance(i, Dim): + return True + + +def expand(self, *sizes): + if not _contains_dim(sizes): + return self.__torch_function__(torch.Tensor.expand, None, (self, *sizes)) + dims = sizes + sizes = [d.size for d in dims] + [-1] * self.ndim + self = self.expand(*sizes) + return self[dims] + + +_not_present = object() + + +def _getarg(name, offset, args, kwargs, default): + if len(args) > offset: + return args[offset] + return kwargs.get(name, default) + + +def _patcharg(name, offset, args, kwargs, value): + if len(args) > offset: + args[offset] = value + else: + kwargs[name] = value + + +def _wrap( + orig, dim_offset=0, keepdim_offset=1, dim_name="dim", single_dim=False, reduce=True +): + from . import Dim, Tensor, TensorLike + + def fn(self, *args, **kwargs): + dim = _getarg(dim_name, dim_offset, args, kwargs, _not_present) + if dim is _not_present or (single_dim and not isinstance(dim, Dim)): + with _enable_layers(self.dims): + print(f"dim fallback batch_tensor for {orig}") + return Tensor.from_batched( + orig(self._batchtensor, *args, **kwargs), self._has_device + ) + keepdim = ( + _getarg("keepdim", keepdim_offset, args, kwargs, False) if reduce else False + ) + t, levels = self._tensor, llist(self._levels) + dims = _dims(dim, self._batchtensor.ndim, keepdim, single_dim) + dim_indices = tuple(levels.index(d) for d in dims) + if reduce and not keepdim: + new_levels = [l for i, l in enumerate(levels) if i not in dim_indices] + else: + new_levels = levels + + if len(dim_indices) == 1: + dim_indices = dim_indices[ + 0 + ] # so that dims that really only take a single argument work... + args = list(args) + _patcharg(dim_name, dim_offset, args, kwargs, dim_indices) + + def wrap(t): + if isinstance(t, TensorLike): + return Tensor.from_positional(t, new_levels, self._has_device) + return t + + with _enable_layers(new_levels): + print(f"dim used batch_tensor for {orig}") + r = orig(t, *args, **kwargs) + return tree_map(wrap, r) + + return fn + + +def _def(name, *args, **kwargs): + from . import _Tensor + + orig = getattr(torch.Tensor, name) + setattr(_Tensor, name, _wrap(orig, *args, **kwargs)) + + +no_slice = slice(None) + +_orig_getitem = torch.Tensor.__getitem__ + + +class dim_tracker: + def __init__(self): + self.dims = llist() + self.count = [] + + def record(self, d): + if d not in self.dims: + self.dims.append(d) + self.count.append(1) + + def __getitem__(self, d): + return self.count[self.dims.index(d)] + + +def t__getitem__(self, input): + from . import _Tensor, Dim, DimensionBindError, DimList, Tensor, TensorLike + + # * bail to original example if we have a single non-Dim tensor, or a non-tensor + # * locate ... or an unbound tensor list, and determine its size, bind dim list + # (remember that None does not count to the total dim count) + # * bind simple dims and dim-packs to their sizes, count the number of uses of each dim, + # produce the re-view if needed + # * for each single-use dim index, replace with no_slice and mark that it will be added + # (keep track of whether we have to call super) + # * call super if needed + # * if we have dims to bind, bind them (it will help if we eliminated ... and None before) + + # this handles bool indexing handling, as well as some other simple cases. + + is_simple = ( + not isinstance(input, Dim) + and not isinstance(input, (tuple, list)) + and + # WAR for functorch bug where zero time tensors in getitem are not handled correctly. + not (isinstance(input, TensorLike) and input.ndim == 0) + ) + + if is_simple: + if isinstance(self, _Tensor): + return _Tensor.__torch_function__(_orig_getitem, None, (self, input)) + else: + return _orig_getitem(self, input) + + # can further optimize this case + if not isinstance(input, tuple): + input = [input] + else: + input = list(input) + + dims_indexed = 0 + expanding_object = None + dimlists = [] + for i, s in enumerate(input): + if s is ... or isinstance(s, DimList) and not s.is_bound: + if expanding_object is not None: + msg = ( + "at most one ... or unbound dimension list can exist in indexing list but" + f" found 2 at offsets {i} and {expanding_object}" + ) + raise DimensionBindError(msg) + expanding_object = i + + if isinstance(s, DimList): + dims_indexed += len(s) if s.is_bound else 0 + dimlists.append(i) + elif s is not None and s is not ...: + dims_indexed += 1 + + ndim = self.ndim + if dims_indexed > ndim: + raise IndexError( + f"at least {dims_indexed} indices were supplied but the tensor only has {ndim} dimensions." + ) + if expanding_object is not None: + expanding_ndims = ndim - dims_indexed + obj = input[expanding_object] + if obj is ...: + input[expanding_object : expanding_object + 1] = [ + no_slice + ] * expanding_ndims + else: + obj.bind_len(expanding_ndims) + # flatten the dimslists into the indexing + for i in reversed(dimlists): + input[i : i + 1] = input[i] + dims_indexed = 0 + requires_view = False + size = self.size() + view_sizes = [] + dims_seen = dim_tracker() + + def add_dims(t): + if not isinstance(t, _Tensor): + return + for d in t.dims: + dims_seen.record(d) + + add_dims(self) + dim_packs = [] + for i, idx in enumerate(input): + if idx is None: + input[i] = no_slice + view_sizes.append(1) + requires_view = True + else: + sz = size[dims_indexed] + if isinstance(idx, Dim): + idx.size = sz + dims_seen.record(idx) + view_sizes.append(sz) + elif isinstance(idx, (tuple, list)) and idx and isinstance(idx[0], Dim): + for d in idx: + dims_seen.record(idx) + _bind_dims_to_size(sz, idx, f"offset {i}") + view_sizes.extend(d.size for d in idx) + requires_view = True + dim_packs.append(i) + else: + add_dims(idx) + view_sizes.append(sz) + dims_indexed += 1 + if requires_view: + self = self.view(*view_sizes) + for i in reversed(dim_packs): + input[i : i + 1] = input[i] + + # currenty: + # input is flat, containing either Dim, or Tensor, or something valid for standard indexing + # self may have first-class dims as well. + + # to index: + # drop the first class dims from self, they just become direct indices of their positions + + # figure out the dimensions of the indexing tensors: union of all the dims in the tensors in the index. + # these dimensions will appear and need to be bound at the first place tensor occures + + if isinstance(self, _Tensor): + ptensor_self, levels = self._tensor, list(self._levels) + # indices to ptensor rather than self which has first-class dimensions + input_it = iter(input) + flat_inputs = [next(input_it) if isinstance(l, int) else l for l in levels] + has_device = self._has_device + to_pad = 0 + else: + ptensor_self, flat_inputs = self, input + to_pad = ptensor_self.ndim - len(flat_inputs) + has_device = True + + result_levels = [] + index_levels = [] + tensor_insert_point = None + to_expand = {} + requires_getindex = False + for i, inp in enumerate(flat_inputs): + if isinstance(inp, Dim) and dims_seen[inp] == 1: + flat_inputs[i] = no_slice + result_levels.append(inp) + elif isinstance(inp, TensorLike): + requires_getindex = True + if tensor_insert_point is None: + tensor_insert_point = len(result_levels) + ptensor, levels, _ = _tensor_levels(inp) + to_expand[i] = levels + flat_inputs[i] = ptensor + for l in levels: + if l not in index_levels: + index_levels.append(l) + else: + requires_getindex = True + result_levels.append(0) + + if tensor_insert_point is not None: + result_levels[tensor_insert_point:tensor_insert_point] = index_levels + + for i, levels in to_expand.items(): + flat_inputs[i] = _match_levels(flat_inputs[i], levels, index_levels) + + if requires_getindex: + result = _orig_getitem(ptensor_self, flat_inputs) + else: + result = ptensor_self + + next_positional = -1 + if to_pad > 0: + result_levels.extend([0] * to_pad) + for i, r in enumerate(reversed(result_levels)): + if isinstance(r, int): + result_levels[-1 - i] = next_positional + next_positional -= 1 + + return Tensor.from_positional(result, result_levels, has_device) + + +# XXX - dim is optional and can be the outer-most dimension... +def stack(tensors, new_dim, dim=0, out=None): + if isinstance(dim, int): + return torch.stack(tensors, dim, out).index(dim, new_dim) + index = None + if out is not None: + out, index = _positional_no_permute(out, dim, expand_dim=True) + ptensors = [] + for t in tensors: + pt, pi = _positional_no_permute(t, dim, expand_dim=True) + if index is not None and pi != index: + pt = pt.move_dim(pi, index) + else: + index = pi + ptensors.append(pt) + pr = torch.stack(ptensors, index, out=out) + return pr.index((index, index + 1), (new_dim, dim)) + + +_orig_split = torch.Tensor.split + + +def split(self, split_size_or_sections, dim=0): + from . import _Tensor, Dim + + if isinstance(split_size_or_sections, int) or any( + isinstance(t, int) for t in split_size_or_sections + ): + if isinstance(dim, Dim): + raise ValueError( + "when dim is specified as a Dim object, split sizes must also be dimensions." + ) + return _orig_split(self, split_size_or_sections, dim=dim) + + if isinstance(dim, Dim): + assert isinstance(self, _Tensor), f"Tensor does not have dimension {dim}" + self, dim = _positional_no_permute(self, dim) + + size = self.size(dim) + total_bound_size = 0 + unbound = [] + sizes = [] + for i, d in enumerate(split_size_or_sections): + if d.is_bound: + sizes.append(d.size) + total_bound_size += d.size + else: + sizes.append(0) + unbound.append(i) + + if unbound: + assert ( + total_bound_size <= size + ), f"result dimensions are larger than original: {total_bound_size} vs {size} ({split_size_or_sections})" + remaining_size = size - total_bound_size + chunk_size = -(-remaining_size // len(unbound)) + for u in unbound: + sz = min(chunk_size, remaining_size) + split_size_or_sections[u].size = sz + sizes[u] = sz + remaining_size -= sz + else: + assert ( + total_bound_size == size + ), f"result dimensions do not match original: {total_bound_size} vs {size} ({split_size_or_sections})" + return tuple( + t.index(dim, d) + for d, t in zip(split_size_or_sections, _orig_split(self, sizes, dim=dim)) + ) diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/tree_map.py b/falcon/lib/python3.10/site-packages/functorch/dim/tree_map.py new file mode 100644 index 0000000000000000000000000000000000000000..1f02f02656f288f81b81bee61736567b2326500d --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/dim/tree_map.py @@ -0,0 +1,14 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from functorch._C import dim + +tree_flatten = dim.tree_flatten + + +def tree_map(fn, tree): + vs, unflatten = tree_flatten(tree) + return unflatten(fn(v) for v in vs) diff --git a/falcon/lib/python3.10/site-packages/functorch/dim/wrap_type.py b/falcon/lib/python3.10/site-packages/functorch/dim/wrap_type.py new file mode 100644 index 0000000000000000000000000000000000000000..e2146c4a21a144dc3942e304d1406ace47df0e57 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/dim/wrap_type.py @@ -0,0 +1,71 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from types import ( + BuiltinMethodType, + FunctionType, + GetSetDescriptorType, + MethodDescriptorType, + WrapperDescriptorType, +) + +from functorch._C import dim as _C + +_wrap_method = _C._wrap_method + +FUNC_TYPES = ( + FunctionType, + MethodDescriptorType, + BuiltinMethodType, + WrapperDescriptorType, +) +PROPERTY_TYPES = (GetSetDescriptorType, property) + + +def _py_wrap_method(orig, __torch_function__): + def impl(*args, **kwargs): + return __torch_function__(orig, None, args, kwargs) + + return impl + + +def wrap_type(use_c, to_patch, pattern, __torch_function__): + if use_c: + wrap_method = _wrap_method + else: + wrap_method = _py_wrap_method + + all = {} + for t in reversed(pattern.mro()[:-1]): # skip object + all.update(t.__dict__) + + def wrap_attr(orig): + return property(wrap_method(orig.__get__, __torch_function__)) + + for name, obj in all.items(): + if name in ( + "__dict__", + "__new__", + "__init__", + "__repr__", + "__weakref__", + "__doc__", + "__module__", + "__dir__", + ): + continue + + # skip things that have been overloaded + # things that come from object like `__eq__` still need to be patched, however. + if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr( + object, name, None + ): + continue + + if isinstance(obj, FUNC_TYPES): + setattr(to_patch, name, wrap_method(obj, __torch_function__)) + elif isinstance(obj, PROPERTY_TYPES): + setattr(to_patch, name, wrap_attr(obj)) diff --git a/falcon/lib/python3.10/site-packages/functorch/experimental/__init__.py b/falcon/lib/python3.10/site-packages/functorch/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..23fc8381cc2358880f935064edd7eeff9766fec6 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/experimental/__init__.py @@ -0,0 +1,6 @@ +# PyTorch forward-mode is not mature yet +from torch._functorch.apis import chunk_vmap +from torch._functorch.batch_norm_replacement import replace_all_batch_norm_modules_ +from torch._functorch.eager_transforms import hessian, jacfwd, jvp + +from functorch import functionalize diff --git a/falcon/lib/python3.10/site-packages/functorch/experimental/__pycache__/ops.cpython-310.pyc b/falcon/lib/python3.10/site-packages/functorch/experimental/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75ff6507bb45c871dc531ac0814124da2c44866c Binary files /dev/null and b/falcon/lib/python3.10/site-packages/functorch/experimental/__pycache__/ops.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/functorch/experimental/control_flow.py b/falcon/lib/python3.10/site-packages/functorch/experimental/control_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..e24fc6142820013002f6cbc1d6f85e7e132aade8 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/experimental/control_flow.py @@ -0,0 +1,8 @@ +from torch import cond # noqa: F401 +from torch._higher_order_ops.cond import UnsupportedAliasMutationException # noqa: F401 + +from torch._higher_order_ops.map import ( # noqa: F401 + _stack_pytree, + _unstack_pytree, + map, +) diff --git a/falcon/lib/python3.10/site-packages/functorch/experimental/ops.py b/falcon/lib/python3.10/site-packages/functorch/experimental/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7a502ef2b002cd824e7b67d08fccac872b313110 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/functorch/experimental/ops.py @@ -0,0 +1 @@ +from torch._ops import HigherOrderOperator # noqa: F401 diff --git a/falcon/lib/python3.10/site-packages/propcache/__init__.py b/falcon/lib/python3.10/site-packages/propcache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..abf4a57b6f33bf71109131166cf57242f5c09663 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/propcache/__init__.py @@ -0,0 +1,32 @@ +"""propcache: An accelerated property cache for Python classes.""" + +from typing import TYPE_CHECKING + +_PUBLIC_API = ("cached_property", "under_cached_property") + +__version__ = "0.2.1" +__all__ = () + +# Imports have moved to `propcache.api` in 0.2.0+. +# This module is now a facade for the API. +if TYPE_CHECKING: + from .api import cached_property as cached_property # noqa: F401 + from .api import under_cached_property as under_cached_property # noqa: F401 + + +def _import_facade(attr: str) -> object: + """Import the public API from the `api` module.""" + if attr in _PUBLIC_API: + from . import api # pylint: disable=import-outside-toplevel + + return getattr(api, attr) + raise AttributeError(f"module '{__package__}' has no attribute '{attr}'") + + +def _dir_facade() -> list[str]: + """Include the public API in the module's dir() output.""" + return [*_PUBLIC_API, *globals().keys()] + + +__getattr__ = _import_facade +__dir__ = _dir_facade diff --git a/falcon/lib/python3.10/site-packages/propcache/__pycache__/__init__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/propcache/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e52c6d7a4dc91c94f818b23689277a0cbb95d48 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/propcache/__pycache__/__init__.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/propcache/__pycache__/_helpers.cpython-310.pyc b/falcon/lib/python3.10/site-packages/propcache/__pycache__/_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e08795faee70d7ca9656f3b1b3e1bad19e177627 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/propcache/__pycache__/_helpers.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc b/falcon/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f27426ae17d4b9005a33122b1516ba8fbfd28f5 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/propcache/api.py b/falcon/lib/python3.10/site-packages/propcache/api.py new file mode 100644 index 0000000000000000000000000000000000000000..22389e6337f8f77681b61de5e45d1ae6d474d39b --- /dev/null +++ b/falcon/lib/python3.10/site-packages/propcache/api.py @@ -0,0 +1,8 @@ +"""Public API of the property caching library.""" + +from ._helpers import cached_property, under_cached_property + +__all__ = ( + "cached_property", + "under_cached_property", +) diff --git a/falcon/lib/python3.10/site-packages/pydantic/color.py b/falcon/lib/python3.10/site-packages/pydantic/color.py new file mode 100644 index 0000000000000000000000000000000000000000..a61d3520b5d92365b38487dca3f955d42a976483 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/color.py @@ -0,0 +1,494 @@ +""" +Color definitions are used as per CSS3 specification: +http://www.w3.org/TR/css3-color/#svg-color + +A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`. + +In these cases the LAST color when sorted alphabetically takes preferences, +eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua". +""" +import math +import re +from colorsys import hls_to_rgb, rgb_to_hls +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast + +from pydantic.errors import ColorError +from pydantic.utils import Representation, almost_equal_floats + +if TYPE_CHECKING: + from pydantic.typing import CallableGenerator, ReprArgs + +ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]] +ColorType = Union[ColorTuple, str] +HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]] + + +class RGBA: + """ + Internal use only as a representation of a color. + """ + + __slots__ = 'r', 'g', 'b', 'alpha', '_tuple' + + def __init__(self, r: float, g: float, b: float, alpha: Optional[float]): + self.r = r + self.g = g + self.b = b + self.alpha = alpha + + self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha) + + def __getitem__(self, item: Any) -> Any: + return self._tuple[item] + + +# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached +r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*' +r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*' +_r_255 = r'(\d{1,3}(?:\.\d+)?)' +_r_comma = r'\s*,\s*' +r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*' +_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)' +r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*' +_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?' +_r_sl = r'(\d{1,3}(?:\.\d+)?)%' +r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*' +r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*' + +# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used +repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'} +rads = 2 * math.pi + + +class Color(Representation): + __slots__ = '_original', '_rgba' + + def __init__(self, value: ColorType) -> None: + self._rgba: RGBA + self._original: ColorType + if isinstance(value, (tuple, list)): + self._rgba = parse_tuple(value) + elif isinstance(value, str): + self._rgba = parse_str(value) + elif isinstance(value, Color): + self._rgba = value._rgba + value = value._original + else: + raise ColorError(reason='value must be a tuple, list or string') + + # if we've got here value must be a valid color + self._original = value + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='color') + + def original(self) -> ColorType: + """ + Original value passed to Color + """ + return self._original + + def as_named(self, *, fallback: bool = False) -> str: + if self._rgba.alpha is None: + rgb = cast(Tuple[int, int, int], self.as_rgb_tuple()) + try: + return COLORS_BY_VALUE[rgb] + except KeyError as e: + if fallback: + return self.as_hex() + else: + raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e + else: + return self.as_hex() + + def as_hex(self) -> str: + """ + Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string + a "short" representation of the color is possible and whether there's an alpha channel. + """ + values = [float_to_255(c) for c in self._rgba[:3]] + if self._rgba.alpha is not None: + values.append(float_to_255(self._rgba.alpha)) + + as_hex = ''.join(f'{v:02x}' for v in values) + if all(c in repeat_colors for c in values): + as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2)) + return '#' + as_hex + + def as_rgb(self) -> str: + """ + Color as an rgb(, , ) or rgba(, , , ) string. + """ + if self._rgba.alpha is None: + return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})' + else: + return ( + f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, ' + f'{round(self._alpha_float(), 2)})' + ) + + def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple: + """ + Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is + in the range 0 to 1. + + :param alpha: whether to include the alpha channel, options are + None - (default) include alpha only if it's set (e.g. not None) + True - always include alpha, + False - always omit alpha, + """ + r, g, b = (float_to_255(c) for c in self._rgba[:3]) + if alpha is None: + if self._rgba.alpha is None: + return r, g, b + else: + return r, g, b, self._alpha_float() + elif alpha: + return r, g, b, self._alpha_float() + else: + # alpha is False + return r, g, b + + def as_hsl(self) -> str: + """ + Color as an hsl(, , ) or hsl(, , , ) string. + """ + if self._rgba.alpha is None: + h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore + return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%})' + else: + h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore + return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%}, {round(a, 2)})' + + def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple: + """ + Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in + the range 0 to 1. + + NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys. + + :param alpha: whether to include the alpha channel, options are + None - (default) include alpha only if it's set (e.g. not None) + True - always include alpha, + False - always omit alpha, + """ + h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b) + if alpha is None: + if self._rgba.alpha is None: + return h, s, l + else: + return h, s, l, self._alpha_float() + if alpha: + return h, s, l, self._alpha_float() + else: + # alpha is False + return h, s, l + + def _alpha_float(self) -> float: + return 1 if self._rgba.alpha is None else self._rgba.alpha + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls + + def __str__(self) -> str: + return self.as_named(fallback=True) + + def __repr_args__(self) -> 'ReprArgs': + return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore + + def __eq__(self, other: Any) -> bool: + return isinstance(other, Color) and self.as_rgb_tuple() == other.as_rgb_tuple() + + def __hash__(self) -> int: + return hash(self.as_rgb_tuple()) + + +def parse_tuple(value: Tuple[Any, ...]) -> RGBA: + """ + Parse a tuple or list as a color. + """ + if len(value) == 3: + r, g, b = (parse_color_value(v) for v in value) + return RGBA(r, g, b, None) + elif len(value) == 4: + r, g, b = (parse_color_value(v) for v in value[:3]) + return RGBA(r, g, b, parse_float_alpha(value[3])) + else: + raise ColorError(reason='tuples must have length 3 or 4') + + +def parse_str(value: str) -> RGBA: + """ + Parse a string to an RGBA tuple, trying the following formats (in this order): + * named color, see COLORS_BY_NAME below + * hex short eg. `fff` (prefix can be `#`, `0x` or nothing) + * hex long eg. `ffffff` (prefix can be `#`, `0x` or nothing) + * `rgb(, , ) ` + * `rgba(, , , )` + """ + value_lower = value.lower() + try: + r, g, b = COLORS_BY_NAME[value_lower] + except KeyError: + pass + else: + return ints_to_rgba(r, g, b, None) + + m = re.fullmatch(r_hex_short, value_lower) + if m: + *rgb, a = m.groups() + r, g, b = (int(v * 2, 16) for v in rgb) + if a: + alpha: Optional[float] = int(a * 2, 16) / 255 + else: + alpha = None + return ints_to_rgba(r, g, b, alpha) + + m = re.fullmatch(r_hex_long, value_lower) + if m: + *rgb, a = m.groups() + r, g, b = (int(v, 16) for v in rgb) + if a: + alpha = int(a, 16) / 255 + else: + alpha = None + return ints_to_rgba(r, g, b, alpha) + + m = re.fullmatch(r_rgb, value_lower) + if m: + return ints_to_rgba(*m.groups(), None) # type: ignore + + m = re.fullmatch(r_rgba, value_lower) + if m: + return ints_to_rgba(*m.groups()) # type: ignore + + m = re.fullmatch(r_hsl, value_lower) + if m: + h, h_units, s, l_ = m.groups() + return parse_hsl(h, h_units, s, l_) + + m = re.fullmatch(r_hsla, value_lower) + if m: + h, h_units, s, l_, a = m.groups() + return parse_hsl(h, h_units, s, l_, parse_float_alpha(a)) + + raise ColorError(reason='string not recognised as a valid color') + + +def ints_to_rgba(r: Union[int, str], g: Union[int, str], b: Union[int, str], alpha: Optional[float]) -> RGBA: + return RGBA(parse_color_value(r), parse_color_value(g), parse_color_value(b), parse_float_alpha(alpha)) + + +def parse_color_value(value: Union[int, str], max_val: int = 255) -> float: + """ + Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number + in the range 0 to 1 + """ + try: + color = float(value) + except ValueError: + raise ColorError(reason='color values must be a valid number') + if 0 <= color <= max_val: + return color / max_val + else: + raise ColorError(reason=f'color values must be in the range 0 to {max_val}') + + +def parse_float_alpha(value: Union[None, str, float, int]) -> Optional[float]: + """ + Parse a value checking it's a valid float in the range 0 to 1 + """ + if value is None: + return None + try: + if isinstance(value, str) and value.endswith('%'): + alpha = float(value[:-1]) / 100 + else: + alpha = float(value) + except ValueError: + raise ColorError(reason='alpha values must be a valid float') + + if almost_equal_floats(alpha, 1): + return None + elif 0 <= alpha <= 1: + return alpha + else: + raise ColorError(reason='alpha values must be in the range 0 to 1') + + +def parse_hsl(h: str, h_units: str, sat: str, light: str, alpha: Optional[float] = None) -> RGBA: + """ + Parse raw hue, saturation, lightness and alpha values and convert to RGBA. + """ + s_value, l_value = parse_color_value(sat, 100), parse_color_value(light, 100) + + h_value = float(h) + if h_units in {None, 'deg'}: + h_value = h_value % 360 / 360 + elif h_units == 'rad': + h_value = h_value % rads / rads + else: + # turns + h_value = h_value % 1 + + r, g, b = hls_to_rgb(h_value, l_value, s_value) + return RGBA(r, g, b, alpha) + + +def float_to_255(c: float) -> int: + return int(round(c * 255)) + + +COLORS_BY_NAME = { + 'aliceblue': (240, 248, 255), + 'antiquewhite': (250, 235, 215), + 'aqua': (0, 255, 255), + 'aquamarine': (127, 255, 212), + 'azure': (240, 255, 255), + 'beige': (245, 245, 220), + 'bisque': (255, 228, 196), + 'black': (0, 0, 0), + 'blanchedalmond': (255, 235, 205), + 'blue': (0, 0, 255), + 'blueviolet': (138, 43, 226), + 'brown': (165, 42, 42), + 'burlywood': (222, 184, 135), + 'cadetblue': (95, 158, 160), + 'chartreuse': (127, 255, 0), + 'chocolate': (210, 105, 30), + 'coral': (255, 127, 80), + 'cornflowerblue': (100, 149, 237), + 'cornsilk': (255, 248, 220), + 'crimson': (220, 20, 60), + 'cyan': (0, 255, 255), + 'darkblue': (0, 0, 139), + 'darkcyan': (0, 139, 139), + 'darkgoldenrod': (184, 134, 11), + 'darkgray': (169, 169, 169), + 'darkgreen': (0, 100, 0), + 'darkgrey': (169, 169, 169), + 'darkkhaki': (189, 183, 107), + 'darkmagenta': (139, 0, 139), + 'darkolivegreen': (85, 107, 47), + 'darkorange': (255, 140, 0), + 'darkorchid': (153, 50, 204), + 'darkred': (139, 0, 0), + 'darksalmon': (233, 150, 122), + 'darkseagreen': (143, 188, 143), + 'darkslateblue': (72, 61, 139), + 'darkslategray': (47, 79, 79), + 'darkslategrey': (47, 79, 79), + 'darkturquoise': (0, 206, 209), + 'darkviolet': (148, 0, 211), + 'deeppink': (255, 20, 147), + 'deepskyblue': (0, 191, 255), + 'dimgray': (105, 105, 105), + 'dimgrey': (105, 105, 105), + 'dodgerblue': (30, 144, 255), + 'firebrick': (178, 34, 34), + 'floralwhite': (255, 250, 240), + 'forestgreen': (34, 139, 34), + 'fuchsia': (255, 0, 255), + 'gainsboro': (220, 220, 220), + 'ghostwhite': (248, 248, 255), + 'gold': (255, 215, 0), + 'goldenrod': (218, 165, 32), + 'gray': (128, 128, 128), + 'green': (0, 128, 0), + 'greenyellow': (173, 255, 47), + 'grey': (128, 128, 128), + 'honeydew': (240, 255, 240), + 'hotpink': (255, 105, 180), + 'indianred': (205, 92, 92), + 'indigo': (75, 0, 130), + 'ivory': (255, 255, 240), + 'khaki': (240, 230, 140), + 'lavender': (230, 230, 250), + 'lavenderblush': (255, 240, 245), + 'lawngreen': (124, 252, 0), + 'lemonchiffon': (255, 250, 205), + 'lightblue': (173, 216, 230), + 'lightcoral': (240, 128, 128), + 'lightcyan': (224, 255, 255), + 'lightgoldenrodyellow': (250, 250, 210), + 'lightgray': (211, 211, 211), + 'lightgreen': (144, 238, 144), + 'lightgrey': (211, 211, 211), + 'lightpink': (255, 182, 193), + 'lightsalmon': (255, 160, 122), + 'lightseagreen': (32, 178, 170), + 'lightskyblue': (135, 206, 250), + 'lightslategray': (119, 136, 153), + 'lightslategrey': (119, 136, 153), + 'lightsteelblue': (176, 196, 222), + 'lightyellow': (255, 255, 224), + 'lime': (0, 255, 0), + 'limegreen': (50, 205, 50), + 'linen': (250, 240, 230), + 'magenta': (255, 0, 255), + 'maroon': (128, 0, 0), + 'mediumaquamarine': (102, 205, 170), + 'mediumblue': (0, 0, 205), + 'mediumorchid': (186, 85, 211), + 'mediumpurple': (147, 112, 219), + 'mediumseagreen': (60, 179, 113), + 'mediumslateblue': (123, 104, 238), + 'mediumspringgreen': (0, 250, 154), + 'mediumturquoise': (72, 209, 204), + 'mediumvioletred': (199, 21, 133), + 'midnightblue': (25, 25, 112), + 'mintcream': (245, 255, 250), + 'mistyrose': (255, 228, 225), + 'moccasin': (255, 228, 181), + 'navajowhite': (255, 222, 173), + 'navy': (0, 0, 128), + 'oldlace': (253, 245, 230), + 'olive': (128, 128, 0), + 'olivedrab': (107, 142, 35), + 'orange': (255, 165, 0), + 'orangered': (255, 69, 0), + 'orchid': (218, 112, 214), + 'palegoldenrod': (238, 232, 170), + 'palegreen': (152, 251, 152), + 'paleturquoise': (175, 238, 238), + 'palevioletred': (219, 112, 147), + 'papayawhip': (255, 239, 213), + 'peachpuff': (255, 218, 185), + 'peru': (205, 133, 63), + 'pink': (255, 192, 203), + 'plum': (221, 160, 221), + 'powderblue': (176, 224, 230), + 'purple': (128, 0, 128), + 'red': (255, 0, 0), + 'rosybrown': (188, 143, 143), + 'royalblue': (65, 105, 225), + 'saddlebrown': (139, 69, 19), + 'salmon': (250, 128, 114), + 'sandybrown': (244, 164, 96), + 'seagreen': (46, 139, 87), + 'seashell': (255, 245, 238), + 'sienna': (160, 82, 45), + 'silver': (192, 192, 192), + 'skyblue': (135, 206, 235), + 'slateblue': (106, 90, 205), + 'slategray': (112, 128, 144), + 'slategrey': (112, 128, 144), + 'snow': (255, 250, 250), + 'springgreen': (0, 255, 127), + 'steelblue': (70, 130, 180), + 'tan': (210, 180, 140), + 'teal': (0, 128, 128), + 'thistle': (216, 191, 216), + 'tomato': (255, 99, 71), + 'turquoise': (64, 224, 208), + 'violet': (238, 130, 238), + 'wheat': (245, 222, 179), + 'white': (255, 255, 255), + 'whitesmoke': (245, 245, 245), + 'yellow': (255, 255, 0), + 'yellowgreen': (154, 205, 50), +} + +COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()} diff --git a/falcon/lib/python3.10/site-packages/pydantic/parse.cpython-310-x86_64-linux-gnu.so b/falcon/lib/python3.10/site-packages/pydantic/parse.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..205c76e7c1f4f3eaa738c91f9f83d8e561ea1313 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/pydantic/parse.cpython-310-x86_64-linux-gnu.so differ diff --git a/falcon/lib/python3.10/site-packages/pydantic/types.py b/falcon/lib/python3.10/site-packages/pydantic/types.py new file mode 100644 index 0000000000000000000000000000000000000000..b2366835e8ba6ed2e3c033842cf527fe2161e2ef --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/types.py @@ -0,0 +1,1205 @@ +import abc +import math +import re +import warnings +from datetime import date +from decimal import Decimal, InvalidOperation +from enum import Enum +from pathlib import Path +from types import new_class +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + FrozenSet, + List, + Optional, + Pattern, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) +from uuid import UUID +from weakref import WeakSet + +from pydantic import errors +from pydantic.datetime_parse import parse_date +from pydantic.utils import import_string, update_not_none +from pydantic.validators import ( + bytes_validator, + constr_length_validator, + constr_lower, + constr_strip_whitespace, + constr_upper, + decimal_validator, + float_finite_validator, + float_validator, + frozenset_validator, + int_validator, + list_validator, + number_multiple_validator, + number_size_validator, + path_exists_validator, + path_validator, + set_validator, + str_validator, + strict_bytes_validator, + strict_float_validator, + strict_int_validator, + strict_str_validator, +) + +__all__ = [ + 'NoneStr', + 'NoneBytes', + 'StrBytes', + 'NoneStrBytes', + 'StrictStr', + 'ConstrainedBytes', + 'conbytes', + 'ConstrainedList', + 'conlist', + 'ConstrainedSet', + 'conset', + 'ConstrainedFrozenSet', + 'confrozenset', + 'ConstrainedStr', + 'constr', + 'PyObject', + 'ConstrainedInt', + 'conint', + 'PositiveInt', + 'NegativeInt', + 'NonNegativeInt', + 'NonPositiveInt', + 'ConstrainedFloat', + 'confloat', + 'PositiveFloat', + 'NegativeFloat', + 'NonNegativeFloat', + 'NonPositiveFloat', + 'FiniteFloat', + 'ConstrainedDecimal', + 'condecimal', + 'UUID1', + 'UUID3', + 'UUID4', + 'UUID5', + 'FilePath', + 'DirectoryPath', + 'Json', + 'JsonWrapper', + 'SecretField', + 'SecretStr', + 'SecretBytes', + 'StrictBool', + 'StrictBytes', + 'StrictInt', + 'StrictFloat', + 'PaymentCardNumber', + 'ByteSize', + 'PastDate', + 'FutureDate', + 'ConstrainedDate', + 'condate', +] + +NoneStr = Optional[str] +NoneBytes = Optional[bytes] +StrBytes = Union[str, bytes] +NoneStrBytes = Optional[StrBytes] +OptionalInt = Optional[int] +OptionalIntFloat = Union[OptionalInt, float] +OptionalIntFloatDecimal = Union[OptionalIntFloat, Decimal] +OptionalDate = Optional[date] +StrIntFloat = Union[str, int, float] + +if TYPE_CHECKING: + from typing_extensions import Annotated + + from pydantic.dataclasses import Dataclass + from pydantic.main import BaseModel + from pydantic.typing import CallableGenerator + + ModelOrDc = Type[Union[BaseModel, Dataclass]] + +T = TypeVar('T') +_DEFINED_TYPES: 'WeakSet[type]' = WeakSet() + + +@overload +def _registered(typ: Type[T]) -> Type[T]: + pass + + +@overload +def _registered(typ: 'ConstrainedNumberMeta') -> 'ConstrainedNumberMeta': + pass + + +def _registered(typ: Union[Type[T], 'ConstrainedNumberMeta']) -> Union[Type[T], 'ConstrainedNumberMeta']: + # In order to generate valid examples of constrained types, Hypothesis needs + # to inspect the type object - so we keep a weakref to each contype object + # until it can be registered. When (or if) our Hypothesis plugin is loaded, + # it monkeypatches this function. + # If Hypothesis is never used, the total effect is to keep a weak reference + # which has minimal memory usage and doesn't even affect garbage collection. + _DEFINED_TYPES.add(typ) + return typ + + +class ConstrainedNumberMeta(type): + def __new__(cls, name: str, bases: Any, dct: Dict[str, Any]) -> 'ConstrainedInt': # type: ignore + new_cls = cast('ConstrainedInt', type.__new__(cls, name, bases, dct)) + + if new_cls.gt is not None and new_cls.ge is not None: + raise errors.ConfigError('bounds gt and ge cannot be specified at the same time') + if new_cls.lt is not None and new_cls.le is not None: + raise errors.ConfigError('bounds lt and le cannot be specified at the same time') + + return _registered(new_cls) # type: ignore + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BOOLEAN TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + StrictBool = bool +else: + + class StrictBool(int): + """ + StrictBool to allow for bools which are not type-coerced. + """ + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='boolean') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: Any) -> bool: + """ + Ensure that we only allow bools. + """ + if isinstance(value, bool): + return value + + raise errors.StrictBoolError() + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTEGER TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedInt(int, metaclass=ConstrainedNumberMeta): + strict: bool = False + gt: OptionalInt = None + ge: OptionalInt = None + lt: OptionalInt = None + le: OptionalInt = None + multiple_of: OptionalInt = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + exclusiveMinimum=cls.gt, + exclusiveMaximum=cls.lt, + minimum=cls.ge, + maximum=cls.le, + multipleOf=cls.multiple_of, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_int_validator if cls.strict else int_validator + yield number_size_validator + yield number_multiple_validator + + +def conint( + *, + strict: bool = False, + gt: Optional[int] = None, + ge: Optional[int] = None, + lt: Optional[int] = None, + le: Optional[int] = None, + multiple_of: Optional[int] = None, +) -> Type[int]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of) + return type('ConstrainedIntValue', (ConstrainedInt,), namespace) + + +if TYPE_CHECKING: + PositiveInt = int + NegativeInt = int + NonPositiveInt = int + NonNegativeInt = int + StrictInt = int +else: + + class PositiveInt(ConstrainedInt): + gt = 0 + + class NegativeInt(ConstrainedInt): + lt = 0 + + class NonPositiveInt(ConstrainedInt): + le = 0 + + class NonNegativeInt(ConstrainedInt): + ge = 0 + + class StrictInt(ConstrainedInt): + strict = True + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLOAT TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedFloat(float, metaclass=ConstrainedNumberMeta): + strict: bool = False + gt: OptionalIntFloat = None + ge: OptionalIntFloat = None + lt: OptionalIntFloat = None + le: OptionalIntFloat = None + multiple_of: OptionalIntFloat = None + allow_inf_nan: Optional[bool] = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + exclusiveMinimum=cls.gt, + exclusiveMaximum=cls.lt, + minimum=cls.ge, + maximum=cls.le, + multipleOf=cls.multiple_of, + ) + # Modify constraints to account for differences between IEEE floats and JSON + if field_schema.get('exclusiveMinimum') == -math.inf: + del field_schema['exclusiveMinimum'] + if field_schema.get('minimum') == -math.inf: + del field_schema['minimum'] + if field_schema.get('exclusiveMaximum') == math.inf: + del field_schema['exclusiveMaximum'] + if field_schema.get('maximum') == math.inf: + del field_schema['maximum'] + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_float_validator if cls.strict else float_validator + yield number_size_validator + yield number_multiple_validator + yield float_finite_validator + + +def confloat( + *, + strict: bool = False, + gt: float = None, + ge: float = None, + lt: float = None, + le: float = None, + multiple_of: float = None, + allow_inf_nan: Optional[bool] = None, +) -> Type[float]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of, allow_inf_nan=allow_inf_nan) + return type('ConstrainedFloatValue', (ConstrainedFloat,), namespace) + + +if TYPE_CHECKING: + PositiveFloat = float + NegativeFloat = float + NonPositiveFloat = float + NonNegativeFloat = float + StrictFloat = float + FiniteFloat = float +else: + + class PositiveFloat(ConstrainedFloat): + gt = 0 + + class NegativeFloat(ConstrainedFloat): + lt = 0 + + class NonPositiveFloat(ConstrainedFloat): + le = 0 + + class NonNegativeFloat(ConstrainedFloat): + ge = 0 + + class StrictFloat(ConstrainedFloat): + strict = True + + class FiniteFloat(ConstrainedFloat): + allow_inf_nan = False + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTES TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedBytes(bytes): + strip_whitespace = False + to_upper = False + to_lower = False + min_length: OptionalInt = None + max_length: OptionalInt = None + strict: bool = False + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_bytes_validator if cls.strict else bytes_validator + yield constr_strip_whitespace + yield constr_upper + yield constr_lower + yield constr_length_validator + + +def conbytes( + *, + strip_whitespace: bool = False, + to_upper: bool = False, + to_lower: bool = False, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + strict: bool = False, +) -> Type[bytes]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict( + strip_whitespace=strip_whitespace, + to_upper=to_upper, + to_lower=to_lower, + min_length=min_length, + max_length=max_length, + strict=strict, + ) + return _registered(type('ConstrainedBytesValue', (ConstrainedBytes,), namespace)) + + +if TYPE_CHECKING: + StrictBytes = bytes +else: + + class StrictBytes(ConstrainedBytes): + strict = True + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STRING TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedStr(str): + strip_whitespace = False + to_upper = False + to_lower = False + min_length: OptionalInt = None + max_length: OptionalInt = None + curtail_length: OptionalInt = None + regex: Optional[Union[str, Pattern[str]]] = None + strict = False + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + minLength=cls.min_length, + maxLength=cls.max_length, + pattern=cls.regex and cls._get_pattern(cls.regex), + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_str_validator if cls.strict else str_validator + yield constr_strip_whitespace + yield constr_upper + yield constr_lower + yield constr_length_validator + yield cls.validate + + @classmethod + def validate(cls, value: Union[str]) -> Union[str]: + if cls.curtail_length and len(value) > cls.curtail_length: + value = value[: cls.curtail_length] + + if cls.regex: + if not re.match(cls.regex, value): + raise errors.StrRegexError(pattern=cls._get_pattern(cls.regex)) + + return value + + @staticmethod + def _get_pattern(regex: Union[str, Pattern[str]]) -> str: + return regex if isinstance(regex, str) else regex.pattern + + +def constr( + *, + strip_whitespace: bool = False, + to_upper: bool = False, + to_lower: bool = False, + strict: bool = False, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + curtail_length: Optional[int] = None, + regex: Optional[str] = None, +) -> Type[str]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict( + strip_whitespace=strip_whitespace, + to_upper=to_upper, + to_lower=to_lower, + strict=strict, + min_length=min_length, + max_length=max_length, + curtail_length=curtail_length, + regex=regex and re.compile(regex), + ) + return _registered(type('ConstrainedStrValue', (ConstrainedStr,), namespace)) + + +if TYPE_CHECKING: + StrictStr = str +else: + + class StrictStr(ConstrainedStr): + strict = True + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +# This types superclass should be Set[T], but cython chokes on that... +class ConstrainedSet(set): # type: ignore + # Needed for pydantic to detect that this is a set + __origin__ = set + __args__: Set[Type[T]] # type: ignore + + min_items: Optional[int] = None + max_items: Optional[int] = None + item_type: Type[T] # type: ignore + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.set_length_validator + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) + + @classmethod + def set_length_validator(cls, v: 'Optional[Set[T]]') -> 'Optional[Set[T]]': + if v is None: + return None + + v = set_validator(v) + v_len = len(v) + + if cls.min_items is not None and v_len < cls.min_items: + raise errors.SetMinLengthError(limit_value=cls.min_items) + + if cls.max_items is not None and v_len > cls.max_items: + raise errors.SetMaxLengthError(limit_value=cls.max_items) + + return v + + +def conset(item_type: Type[T], *, min_items: Optional[int] = None, max_items: Optional[int] = None) -> Type[Set[T]]: + # __args__ is needed to conform to typing generics api + namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} + # We use new_class to be able to deal with Generic types + return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace)) + + +# This types superclass should be FrozenSet[T], but cython chokes on that... +class ConstrainedFrozenSet(frozenset): # type: ignore + # Needed for pydantic to detect that this is a set + __origin__ = frozenset + __args__: FrozenSet[Type[T]] # type: ignore + + min_items: Optional[int] = None + max_items: Optional[int] = None + item_type: Type[T] # type: ignore + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.frozenset_length_validator + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) + + @classmethod + def frozenset_length_validator(cls, v: 'Optional[FrozenSet[T]]') -> 'Optional[FrozenSet[T]]': + if v is None: + return None + + v = frozenset_validator(v) + v_len = len(v) + + if cls.min_items is not None and v_len < cls.min_items: + raise errors.FrozenSetMinLengthError(limit_value=cls.min_items) + + if cls.max_items is not None and v_len > cls.max_items: + raise errors.FrozenSetMaxLengthError(limit_value=cls.max_items) + + return v + + +def confrozenset( + item_type: Type[T], *, min_items: Optional[int] = None, max_items: Optional[int] = None +) -> Type[FrozenSet[T]]: + # __args__ is needed to conform to typing generics api + namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} + # We use new_class to be able to deal with Generic types + return new_class('ConstrainedFrozenSetValue', (ConstrainedFrozenSet,), {}, lambda ns: ns.update(namespace)) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LIST TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +# This types superclass should be List[T], but cython chokes on that... +class ConstrainedList(list): # type: ignore + # Needed for pydantic to detect that this is a list + __origin__ = list + __args__: Tuple[Type[T], ...] # type: ignore + + min_items: Optional[int] = None + max_items: Optional[int] = None + unique_items: Optional[bool] = None + item_type: Type[T] # type: ignore + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.list_length_validator + if cls.unique_items: + yield cls.unique_items_validator + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items, uniqueItems=cls.unique_items) + + @classmethod + def list_length_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]': + if v is None: + return None + + v = list_validator(v) + v_len = len(v) + + if cls.min_items is not None and v_len < cls.min_items: + raise errors.ListMinLengthError(limit_value=cls.min_items) + + if cls.max_items is not None and v_len > cls.max_items: + raise errors.ListMaxLengthError(limit_value=cls.max_items) + + return v + + @classmethod + def unique_items_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]': + if v is None: + return None + + for i, value in enumerate(v, start=1): + if value in v[i:]: + raise errors.ListUniqueItemsError() + + return v + + +def conlist( + item_type: Type[T], *, min_items: Optional[int] = None, max_items: Optional[int] = None, unique_items: bool = None +) -> Type[List[T]]: + # __args__ is needed to conform to typing generics api + namespace = dict( + min_items=min_items, max_items=max_items, unique_items=unique_items, item_type=item_type, __args__=(item_type,) + ) + # We use new_class to be able to deal with Generic types + return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace)) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PYOBJECT TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +if TYPE_CHECKING: + PyObject = Callable[..., Any] +else: + + class PyObject: + validate_always = True + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: Any) -> Any: + if isinstance(value, Callable): + return value + + try: + value = str_validator(value) + except errors.StrError: + raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable') + + try: + return import_string(value) + except ImportError as e: + raise errors.PyObjectError(error_message=str(e)) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DECIMAL TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedDecimal(Decimal, metaclass=ConstrainedNumberMeta): + gt: OptionalIntFloatDecimal = None + ge: OptionalIntFloatDecimal = None + lt: OptionalIntFloatDecimal = None + le: OptionalIntFloatDecimal = None + max_digits: OptionalInt = None + decimal_places: OptionalInt = None + multiple_of: OptionalIntFloatDecimal = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + exclusiveMinimum=cls.gt, + exclusiveMaximum=cls.lt, + minimum=cls.ge, + maximum=cls.le, + multipleOf=cls.multiple_of, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield decimal_validator + yield number_size_validator + yield number_multiple_validator + yield cls.validate + + @classmethod + def validate(cls, value: Decimal) -> Decimal: + try: + normalized_value = value.normalize() + except InvalidOperation: + normalized_value = value + digit_tuple, exponent = normalized_value.as_tuple()[1:] + if exponent in {'F', 'n', 'N'}: + raise errors.DecimalIsNotFiniteError() + + if exponent >= 0: + # A positive exponent adds that many trailing zeros. + digits = len(digit_tuple) + exponent + decimals = 0 + else: + # If the absolute value of the negative exponent is larger than the + # number of digits, then it's the same as the number of digits, + # because it'll consume all of the digits in digit_tuple and then + # add abs(exponent) - len(digit_tuple) leading zeros after the + # decimal point. + if abs(exponent) > len(digit_tuple): + digits = decimals = abs(exponent) + else: + digits = len(digit_tuple) + decimals = abs(exponent) + whole_digits = digits - decimals + + if cls.max_digits is not None and digits > cls.max_digits: + raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits) + + if cls.decimal_places is not None and decimals > cls.decimal_places: + raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places) + + if cls.max_digits is not None and cls.decimal_places is not None: + expected = cls.max_digits - cls.decimal_places + if whole_digits > expected: + raise errors.DecimalWholeDigitsError(whole_digits=expected) + + return value + + +def condecimal( + *, + gt: Decimal = None, + ge: Decimal = None, + lt: Decimal = None, + le: Decimal = None, + max_digits: Optional[int] = None, + decimal_places: Optional[int] = None, + multiple_of: Decimal = None, +) -> Type[Decimal]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict( + gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of + ) + return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ UUID TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + UUID1 = UUID + UUID3 = UUID + UUID4 = UUID + UUID5 = UUID +else: + + class UUID1(UUID): + _required_version = 1 + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format=f'uuid{cls._required_version}') + + class UUID3(UUID1): + _required_version = 3 + + class UUID4(UUID1): + _required_version = 4 + + class UUID5(UUID1): + _required_version = 5 + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATH TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + FilePath = Path + DirectoryPath = Path +else: + + class FilePath(Path): + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(format='file-path') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield path_validator + yield path_exists_validator + yield cls.validate + + @classmethod + def validate(cls, value: Path) -> Path: + if not value.is_file(): + raise errors.PathNotAFileError(path=value) + + return value + + class DirectoryPath(Path): + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(format='directory-path') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield path_validator + yield path_exists_validator + yield cls.validate + + @classmethod + def validate(cls, value: Path) -> Path: + if not value.is_dir(): + raise errors.PathNotADirectoryError(path=value) + + return value + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ JSON TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class JsonWrapper: + pass + + +class JsonMeta(type): + def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]: + if t is Any: + return Json # allow Json[Any] to replecate plain Json + return _registered(type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t})) + + +if TYPE_CHECKING: + Json = Annotated[T, ...] # Json[list[str]] will be recognized by type checkers as list[str] + +else: + + class Json(metaclass=JsonMeta): + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='json-string') + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SECRET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class SecretField(abc.ABC): + """ + Note: this should be implemented as a generic like `SecretField(ABC, Generic[T])`, + the `__init__()` should be part of the abstract class and the + `get_secret_value()` method should use the generic `T` type. + + However Cython doesn't support very well generics at the moment and + the generated code fails to be imported (see + https://github.com/cython/cython/issues/2753). + """ + + def __eq__(self, other: Any) -> bool: + return isinstance(other, self.__class__) and self.get_secret_value() == other.get_secret_value() + + def __str__(self) -> str: + return '**********' if self.get_secret_value() else '' + + def __hash__(self) -> int: + return hash(self.get_secret_value()) + + @abc.abstractmethod + def get_secret_value(self) -> Any: # pragma: no cover + ... + + +class SecretStr(SecretField): + min_length: OptionalInt = None + max_length: OptionalInt = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + type='string', + writeOnly=True, + format='password', + minLength=cls.min_length, + maxLength=cls.max_length, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + yield constr_length_validator + + @classmethod + def validate(cls, value: Any) -> 'SecretStr': + if isinstance(value, cls): + return value + value = str_validator(value) + return cls(value) + + def __init__(self, value: str): + self._secret_value = value + + def __repr__(self) -> str: + return f"SecretStr('{self}')" + + def __len__(self) -> int: + return len(self._secret_value) + + def display(self) -> str: + warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning) + return str(self) + + def get_secret_value(self) -> str: + return self._secret_value + + +class SecretBytes(SecretField): + min_length: OptionalInt = None + max_length: OptionalInt = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + type='string', + writeOnly=True, + format='password', + minLength=cls.min_length, + maxLength=cls.max_length, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + yield constr_length_validator + + @classmethod + def validate(cls, value: Any) -> 'SecretBytes': + if isinstance(value, cls): + return value + value = bytes_validator(value) + return cls(value) + + def __init__(self, value: bytes): + self._secret_value = value + + def __repr__(self) -> str: + return f"SecretBytes(b'{self}')" + + def __len__(self) -> int: + return len(self._secret_value) + + def display(self) -> str: + warnings.warn('`secret_bytes.display()` is deprecated, use `str(secret_bytes)` instead', DeprecationWarning) + return str(self) + + def get_secret_value(self) -> bytes: + return self._secret_value + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PAYMENT CARD TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class PaymentCardBrand(str, Enum): + # If you add another card type, please also add it to the + # Hypothesis strategy in `pydantic._hypothesis_plugin`. + amex = 'American Express' + mastercard = 'Mastercard' + visa = 'Visa' + other = 'other' + + def __str__(self) -> str: + return self.value + + +class PaymentCardNumber(str): + """ + Based on: https://en.wikipedia.org/wiki/Payment_card_number + """ + + strip_whitespace: ClassVar[bool] = True + min_length: ClassVar[int] = 12 + max_length: ClassVar[int] = 19 + bin: str + last4: str + brand: PaymentCardBrand + + def __init__(self, card_number: str): + self.bin = card_number[:6] + self.last4 = card_number[-4:] + self.brand = self._get_brand(card_number) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield str_validator + yield constr_strip_whitespace + yield constr_length_validator + yield cls.validate_digits + yield cls.validate_luhn_check_digit + yield cls + yield cls.validate_length_for_brand + + @property + def masked(self) -> str: + num_masked = len(self) - 10 # len(bin) + len(last4) == 10 + return f'{self.bin}{"*" * num_masked}{self.last4}' + + @classmethod + def validate_digits(cls, card_number: str) -> str: + if not card_number.isdigit(): + raise errors.NotDigitError + return card_number + + @classmethod + def validate_luhn_check_digit(cls, card_number: str) -> str: + """ + Based on: https://en.wikipedia.org/wiki/Luhn_algorithm + """ + sum_ = int(card_number[-1]) + length = len(card_number) + parity = length % 2 + for i in range(length - 1): + digit = int(card_number[i]) + if i % 2 == parity: + digit *= 2 + if digit > 9: + digit -= 9 + sum_ += digit + valid = sum_ % 10 == 0 + if not valid: + raise errors.LuhnValidationError + return card_number + + @classmethod + def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber': + """ + Validate length based on BIN for major brands: + https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN) + """ + required_length: Union[None, int, str] = None + if card_number.brand in PaymentCardBrand.mastercard: + required_length = 16 + valid = len(card_number) == required_length + elif card_number.brand == PaymentCardBrand.visa: + required_length = '13, 16 or 19' + valid = len(card_number) in {13, 16, 19} + elif card_number.brand == PaymentCardBrand.amex: + required_length = 15 + valid = len(card_number) == required_length + else: + valid = True + if not valid: + raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length) + return card_number + + @staticmethod + def _get_brand(card_number: str) -> PaymentCardBrand: + if card_number[0] == '4': + brand = PaymentCardBrand.visa + elif 51 <= int(card_number[:2]) <= 55: + brand = PaymentCardBrand.mastercard + elif card_number[:2] in {'34', '37'}: + brand = PaymentCardBrand.amex + else: + brand = PaymentCardBrand.other + return brand + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTE SIZE TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BYTE_SIZES = { + 'b': 1, + 'kb': 10**3, + 'mb': 10**6, + 'gb': 10**9, + 'tb': 10**12, + 'pb': 10**15, + 'eb': 10**18, + 'kib': 2**10, + 'mib': 2**20, + 'gib': 2**30, + 'tib': 2**40, + 'pib': 2**50, + 'eib': 2**60, +} +BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k}) +byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE) + + +class ByteSize(int): + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, v: StrIntFloat) -> 'ByteSize': + try: + return cls(int(v)) + except ValueError: + pass + + str_match = byte_string_re.match(str(v)) + if str_match is None: + raise errors.InvalidByteSize() + + scalar, unit = str_match.groups() + if unit is None: + unit = 'b' + + try: + unit_mult = BYTE_SIZES[unit.lower()] + except KeyError: + raise errors.InvalidByteSizeUnit(unit=unit) + + return cls(int(float(scalar) * unit_mult)) + + def human_readable(self, decimal: bool = False) -> str: + if decimal: + divisor = 1000 + units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + final_unit = 'EB' + else: + divisor = 1024 + units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'] + final_unit = 'EiB' + + num = float(self) + for unit in units: + if abs(num) < divisor: + return f'{num:0.1f}{unit}' + num /= divisor + + return f'{num:0.1f}{final_unit}' + + def to(self, unit: str) -> float: + try: + unit_div = BYTE_SIZES[unit.lower()] + except KeyError: + raise errors.InvalidByteSizeUnit(unit=unit) + + return self / unit_div + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DATE TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + PastDate = date + FutureDate = date +else: + + class PastDate(date): + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield parse_date + yield cls.validate + + @classmethod + def validate(cls, value: date) -> date: + if value >= date.today(): + raise errors.DateNotInThePastError() + + return value + + class FutureDate(date): + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield parse_date + yield cls.validate + + @classmethod + def validate(cls, value: date) -> date: + if value <= date.today(): + raise errors.DateNotInTheFutureError() + + return value + + +class ConstrainedDate(date, metaclass=ConstrainedNumberMeta): + gt: OptionalDate = None + ge: OptionalDate = None + lt: OptionalDate = None + le: OptionalDate = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield parse_date + yield number_size_validator + + +def condate( + *, + gt: date = None, + ge: date = None, + lt: date = None, + le: date = None, +) -> Type[date]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict(gt=gt, ge=ge, lt=lt, le=le) + return type('ConstrainedDateValue', (ConstrainedDate,), namespace) diff --git a/falcon/lib/python3.10/site-packages/pydantic/utils.py b/falcon/lib/python3.10/site-packages/pydantic/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ab2147ffe10c74f7bf1d8c25081a205533a3da06 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/utils.py @@ -0,0 +1,806 @@ +import keyword +import warnings +import weakref +from collections import OrderedDict, defaultdict, deque +from copy import deepcopy +from itertools import islice, zip_longest +from types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType +from typing import ( + TYPE_CHECKING, + AbstractSet, + Any, + Callable, + Collection, + Dict, + Generator, + Iterable, + Iterator, + List, + Mapping, + NoReturn, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +from typing_extensions import Annotated + +from pydantic.errors import ConfigError +from pydantic.typing import ( + NoneType, + WithArgsTypes, + all_literal_values, + display_as_type, + get_args, + get_origin, + is_literal_type, + is_union, +) +from pydantic.version import version_info + +if TYPE_CHECKING: + from inspect import Signature + from pathlib import Path + + from pydantic.config import BaseConfig + from pydantic.dataclasses import Dataclass + from pydantic.fields import ModelField + from pydantic.main import BaseModel + from pydantic.typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs + + RichReprResult = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]] + +__all__ = ( + 'import_string', + 'sequence_like', + 'validate_field_name', + 'lenient_isinstance', + 'lenient_issubclass', + 'in_ipython', + 'is_valid_identifier', + 'deep_update', + 'update_not_none', + 'almost_equal_floats', + 'get_model', + 'to_camel', + 'to_lower_camel', + 'is_valid_field', + 'smart_deepcopy', + 'PyObjectStr', + 'Representation', + 'GetterDict', + 'ValueItems', + 'version_info', # required here to match behaviour in v1.3 + 'ClassAttribute', + 'path_type', + 'ROOT_KEY', + 'get_unique_discriminator_alias', + 'get_discriminator_alias_and_values', + 'DUNDER_ATTRIBUTES', +) + +ROOT_KEY = '__root__' +# these are types that are returned unchanged by deepcopy +IMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = { + int, + float, + complex, + str, + bool, + bytes, + type, + NoneType, + FunctionType, + BuiltinFunctionType, + LambdaType, + weakref.ref, + CodeType, + # note: including ModuleType will differ from behaviour of deepcopy by not producing error. + # It might be not a good idea in general, but considering that this function used only internally + # against default values of fields, this will allow to actually have a field with module as default value + ModuleType, + NotImplemented.__class__, + Ellipsis.__class__, +} + +# these are types that if empty, might be copied with simple copy() instead of deepcopy() +BUILTIN_COLLECTIONS: Set[Type[Any]] = { + list, + set, + tuple, + frozenset, + dict, + OrderedDict, + defaultdict, + deque, +} + + +def import_string(dotted_path: str) -> Any: + """ + Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the + last name in the path. Raise ImportError if the import fails. + """ + from importlib import import_module + + try: + module_path, class_name = dotted_path.strip(' ').rsplit('.', 1) + except ValueError as e: + raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e + + module = import_module(module_path) + try: + return getattr(module, class_name) + except AttributeError as e: + raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e + + +def truncate(v: Union[str], *, max_len: int = 80) -> str: + """ + Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long + """ + warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning) + if isinstance(v, str) and len(v) > (max_len - 2): + # -3 so quote + string + … + quote has correct length + return (v[: (max_len - 3)] + '…').__repr__() + try: + v = v.__repr__() + except TypeError: + v = v.__class__.__repr__(v) # in case v is a type + if len(v) > max_len: + v = v[: max_len - 1] + '…' + return v + + +def sequence_like(v: Any) -> bool: + return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque)) + + +def validate_field_name(bases: Iterable[Type[Any]], field_name: str) -> None: + """ + Ensure that the field's name does not shadow an existing attribute of the model. + """ + for base in bases: + if getattr(base, field_name, None): + raise NameError( + f'Field name "{field_name}" shadows a BaseModel attribute; ' + f'use a different field name with "alias=\'{field_name}\'".' + ) + + +def lenient_isinstance(o: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool: + try: + return isinstance(o, class_or_tuple) # type: ignore[arg-type] + except TypeError: + return False + + +def lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool: + try: + return isinstance(cls, type) and issubclass(cls, class_or_tuple) # type: ignore[arg-type] + except TypeError: + if isinstance(cls, WithArgsTypes): + return False + raise # pragma: no cover + + +def in_ipython() -> bool: + """ + Check whether we're in an ipython environment, including jupyter notebooks. + """ + try: + eval('__IPYTHON__') + except NameError: + return False + else: # pragma: no cover + return True + + +def is_valid_identifier(identifier: str) -> bool: + """ + Checks that a string is a valid identifier and not a Python keyword. + :param identifier: The identifier to test. + :return: True if the identifier is valid. + """ + return identifier.isidentifier() and not keyword.iskeyword(identifier) + + +KeyType = TypeVar('KeyType') + + +def deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]: + updated_mapping = mapping.copy() + for updating_mapping in updating_mappings: + for k, v in updating_mapping.items(): + if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict): + updated_mapping[k] = deep_update(updated_mapping[k], v) + else: + updated_mapping[k] = v + return updated_mapping + + +def update_not_none(mapping: Dict[Any, Any], **update: Any) -> None: + mapping.update({k: v for k, v in update.items() if v is not None}) + + +def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool: + """ + Return True if two floats are almost equal + """ + return abs(value_1 - value_2) <= delta + + +def generate_model_signature( + init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig'] +) -> 'Signature': + """ + Generate signature for model based on its fields + """ + from inspect import Parameter, Signature, signature + + from pydantic.config import Extra + + present_params = signature(init).parameters.values() + merged_params: Dict[str, Parameter] = {} + var_kw = None + use_var_kw = False + + for param in islice(present_params, 1, None): # skip self arg + if param.kind is param.VAR_KEYWORD: + var_kw = param + continue + merged_params[param.name] = param + + if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through + allow_names = config.allow_population_by_field_name + for field_name, field in fields.items(): + param_name = field.alias + if field_name in merged_params or param_name in merged_params: + continue + elif not is_valid_identifier(param_name): + if allow_names and is_valid_identifier(field_name): + param_name = field_name + else: + use_var_kw = True + continue + + # TODO: replace annotation with actual expected types once #1055 solved + kwargs = {'default': field.default} if not field.required else {} + merged_params[param_name] = Parameter( + param_name, Parameter.KEYWORD_ONLY, annotation=field.annotation, **kwargs + ) + + if config.extra is Extra.allow: + use_var_kw = True + + if var_kw and use_var_kw: + # Make sure the parameter for extra kwargs + # does not have the same name as a field + default_model_signature = [ + ('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD), + ('data', Parameter.VAR_KEYWORD), + ] + if [(p.name, p.kind) for p in present_params] == default_model_signature: + # if this is the standard model signature, use extra_data as the extra args name + var_kw_name = 'extra_data' + else: + # else start from var_kw + var_kw_name = var_kw.name + + # generate a name that's definitely unique + while var_kw_name in fields: + var_kw_name += '_' + merged_params[var_kw_name] = var_kw.replace(name=var_kw_name) + + return Signature(parameters=list(merged_params.values()), return_annotation=None) + + +def get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']: + from pydantic.main import BaseModel + + try: + model_cls = obj.__pydantic_model__ # type: ignore + except AttributeError: + model_cls = obj + + if not issubclass(model_cls, BaseModel): + raise TypeError('Unsupported type, must be either BaseModel or dataclass') + return model_cls + + +def to_camel(string: str) -> str: + return ''.join(word.capitalize() for word in string.split('_')) + + +def to_lower_camel(string: str) -> str: + if len(string) >= 1: + pascal_string = to_camel(string) + return pascal_string[0].lower() + pascal_string[1:] + return string.lower() + + +T = TypeVar('T') + + +def unique_list( + input_list: Union[List[T], Tuple[T, ...]], + *, + name_factory: Callable[[T], str] = str, +) -> List[T]: + """ + Make a list unique while maintaining order. + We update the list if another one with the same name is set + (e.g. root validator overridden in subclass) + """ + result: List[T] = [] + result_names: List[str] = [] + for v in input_list: + v_name = name_factory(v) + if v_name not in result_names: + result_names.append(v_name) + result.append(v) + else: + result[result_names.index(v_name)] = v + + return result + + +class PyObjectStr(str): + """ + String class where repr doesn't include quotes. Useful with Representation when you want to return a string + representation of something that valid (or pseudo-valid) python. + """ + + def __repr__(self) -> str: + return str(self) + + +class Representation: + """ + Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details. + + __pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations + of objects. + """ + + __slots__: Tuple[str, ...] = tuple() + + def __repr_args__(self) -> 'ReprArgs': + """ + Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden. + + Can either return: + * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]` + * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]` + """ + attrs = ((s, getattr(self, s)) for s in self.__slots__) + return [(a, v) for a, v in attrs if v is not None] + + def __repr_name__(self) -> str: + """ + Name of the instance's class, used in __repr__. + """ + return self.__class__.__name__ + + def __repr_str__(self, join_str: str) -> str: + return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__()) + + def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]: + """ + Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects + """ + yield self.__repr_name__() + '(' + yield 1 + for name, value in self.__repr_args__(): + if name is not None: + yield name + '=' + yield fmt(value) + yield ',' + yield 0 + yield -1 + yield ')' + + def __str__(self) -> str: + return self.__repr_str__(' ') + + def __repr__(self) -> str: + return f'{self.__repr_name__()}({self.__repr_str__(", ")})' + + def __rich_repr__(self) -> 'RichReprResult': + """Get fields for Rich library""" + for name, field_repr in self.__repr_args__(): + if name is None: + yield field_repr + else: + yield name, field_repr + + +class GetterDict(Representation): + """ + Hack to make object's smell just enough like dicts for validate_model. + + We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves. + """ + + __slots__ = ('_obj',) + + def __init__(self, obj: Any): + self._obj = obj + + def __getitem__(self, key: str) -> Any: + try: + return getattr(self._obj, key) + except AttributeError as e: + raise KeyError(key) from e + + def get(self, key: Any, default: Any = None) -> Any: + return getattr(self._obj, key, default) + + def extra_keys(self) -> Set[Any]: + """ + We don't want to get any other attributes of obj if the model didn't explicitly ask for them + """ + return set() + + def keys(self) -> List[Any]: + """ + Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python + dictionaries. + """ + return list(self) + + def values(self) -> List[Any]: + return [self[k] for k in self] + + def items(self) -> Iterator[Tuple[str, Any]]: + for k in self: + yield k, self.get(k) + + def __iter__(self) -> Iterator[str]: + for name in dir(self._obj): + if not name.startswith('_'): + yield name + + def __len__(self) -> int: + return sum(1 for _ in self) + + def __contains__(self, item: Any) -> bool: + return item in self.keys() + + def __eq__(self, other: Any) -> bool: + return dict(self) == dict(other.items()) + + def __repr_args__(self) -> 'ReprArgs': + return [(None, dict(self))] + + def __repr_name__(self) -> str: + return f'GetterDict[{display_as_type(self._obj)}]' + + +class ValueItems(Representation): + """ + Class for more convenient calculation of excluded or included fields on values. + """ + + __slots__ = ('_items', '_type') + + def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None: + items = self._coerce_items(items) + + if isinstance(value, (list, tuple)): + items = self._normalize_indexes(items, len(value)) + + self._items: 'MappingIntStrAny' = items + + def is_excluded(self, item: Any) -> bool: + """ + Check if item is fully excluded. + + :param item: key or index of a value + """ + return self.is_true(self._items.get(item)) + + def is_included(self, item: Any) -> bool: + """ + Check if value is contained in self._items + + :param item: key or index of value + """ + return item in self._items + + def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]: + """ + :param e: key or index of element on value + :return: raw values for element if self._items is dict and contain needed element + """ + + item = self._items.get(e) + return item if not self.is_true(item) else None + + def _normalize_indexes(self, items: 'MappingIntStrAny', v_length: int) -> 'DictIntStrAny': + """ + :param items: dict or set of indexes which will be normalized + :param v_length: length of sequence indexes of which will be + + >>> self._normalize_indexes({0: True, -2: True, -1: True}, 4) + {0: True, 2: True, 3: True} + >>> self._normalize_indexes({'__all__': True}, 4) + {0: True, 1: True, 2: True, 3: True} + """ + + normalized_items: 'DictIntStrAny' = {} + all_items = None + for i, v in items.items(): + if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or self.is_true(v)): + raise TypeError(f'Unexpected type of exclude value for index "{i}" {v.__class__}') + if i == '__all__': + all_items = self._coerce_value(v) + continue + if not isinstance(i, int): + raise TypeError( + 'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: ' + 'expected integer keys or keyword "__all__"' + ) + normalized_i = v_length + i if i < 0 else i + normalized_items[normalized_i] = self.merge(v, normalized_items.get(normalized_i)) + + if not all_items: + return normalized_items + if self.is_true(all_items): + for i in range(v_length): + normalized_items.setdefault(i, ...) + return normalized_items + for i in range(v_length): + normalized_item = normalized_items.setdefault(i, {}) + if not self.is_true(normalized_item): + normalized_items[i] = self.merge(all_items, normalized_item) + return normalized_items + + @classmethod + def merge(cls, base: Any, override: Any, intersect: bool = False) -> Any: + """ + Merge a ``base`` item with an ``override`` item. + + Both ``base`` and ``override`` are converted to dictionaries if possible. + Sets are converted to dictionaries with the sets entries as keys and + Ellipsis as values. + + Each key-value pair existing in ``base`` is merged with ``override``, + while the rest of the key-value pairs are updated recursively with this function. + + Merging takes place based on the "union" of keys if ``intersect`` is + set to ``False`` (default) and on the intersection of keys if + ``intersect`` is set to ``True``. + """ + override = cls._coerce_value(override) + base = cls._coerce_value(base) + if override is None: + return base + if cls.is_true(base) or base is None: + return override + if cls.is_true(override): + return base if intersect else override + + # intersection or union of keys while preserving ordering: + if intersect: + merge_keys = [k for k in base if k in override] + [k for k in override if k in base] + else: + merge_keys = list(base) + [k for k in override if k not in base] + + merged: 'DictIntStrAny' = {} + for k in merge_keys: + merged_item = cls.merge(base.get(k), override.get(k), intersect=intersect) + if merged_item is not None: + merged[k] = merged_item + + return merged + + @staticmethod + def _coerce_items(items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> 'MappingIntStrAny': + if isinstance(items, Mapping): + pass + elif isinstance(items, AbstractSet): + items = dict.fromkeys(items, ...) + else: + class_name = getattr(items, '__class__', '???') + assert_never( + items, + f'Unexpected type of exclude value {class_name}', + ) + return items + + @classmethod + def _coerce_value(cls, value: Any) -> Any: + if value is None or cls.is_true(value): + return value + return cls._coerce_items(value) + + @staticmethod + def is_true(v: Any) -> bool: + return v is True or v is ... + + def __repr_args__(self) -> 'ReprArgs': + return [(None, self._items)] + + +class ClassAttribute: + """ + Hide class attribute from its instances + """ + + __slots__ = ( + 'name', + 'value', + ) + + def __init__(self, name: str, value: Any) -> None: + self.name = name + self.value = value + + def __get__(self, instance: Any, owner: Type[Any]) -> None: + if instance is None: + return self.value + raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only') + + +path_types = { + 'is_dir': 'directory', + 'is_file': 'file', + 'is_mount': 'mount point', + 'is_symlink': 'symlink', + 'is_block_device': 'block device', + 'is_char_device': 'char device', + 'is_fifo': 'FIFO', + 'is_socket': 'socket', +} + + +def path_type(p: 'Path') -> str: + """ + Find out what sort of thing a path is. + """ + assert p.exists(), 'path does not exist' + for method, name in path_types.items(): + if getattr(p, method)(): + return name + + return 'unknown' + + +Obj = TypeVar('Obj') + + +def smart_deepcopy(obj: Obj) -> Obj: + """ + Return type as is for immutable built-in types + Use obj.copy() for built-in empty collections + Use copy.deepcopy() for non-empty collections and unknown objects + """ + + obj_type = obj.__class__ + if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES: + return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway + try: + if not obj and obj_type in BUILTIN_COLLECTIONS: + # faster way for empty collections, no need to copy its members + return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method + except (TypeError, ValueError, RuntimeError): + # do we really dare to catch ALL errors? Seems a bit risky + pass + + return deepcopy(obj) # slowest way when we actually might need a deepcopy + + +def is_valid_field(name: str) -> bool: + if not name.startswith('_'): + return True + return ROOT_KEY == name + + +DUNDER_ATTRIBUTES = { + '__annotations__', + '__classcell__', + '__doc__', + '__module__', + '__orig_bases__', + '__orig_class__', + '__qualname__', + '__firstlineno__', + '__static_attributes__', +} + + +def is_valid_private_name(name: str) -> bool: + return not is_valid_field(name) and name not in DUNDER_ATTRIBUTES + + +_EMPTY = object() + + +def all_identical(left: Iterable[Any], right: Iterable[Any]) -> bool: + """ + Check that the items of `left` are the same objects as those in `right`. + + >>> a, b = object(), object() + >>> all_identical([a, b, a], [a, b, a]) + True + >>> all_identical([a, b, [a]], [a, b, [a]]) # new list object, while "equal" is not "identical" + False + """ + for left_item, right_item in zip_longest(left, right, fillvalue=_EMPTY): + if left_item is not right_item: + return False + return True + + +def assert_never(obj: NoReturn, msg: str) -> NoReturn: + """ + Helper to make sure that we have covered all possible types. + + This is mostly useful for ``mypy``, docs: + https://mypy.readthedocs.io/en/latest/literal_types.html#exhaustive-checks + """ + raise TypeError(msg) + + +def get_unique_discriminator_alias(all_aliases: Collection[str], discriminator_key: str) -> str: + """Validate that all aliases are the same and if that's the case return the alias""" + unique_aliases = set(all_aliases) + if len(unique_aliases) > 1: + raise ConfigError( + f'Aliases for discriminator {discriminator_key!r} must be the same (got {", ".join(sorted(all_aliases))})' + ) + return unique_aliases.pop() + + +def get_discriminator_alias_and_values(tp: Any, discriminator_key: str) -> Tuple[str, Tuple[str, ...]]: + """ + Get alias and all valid values in the `Literal` type of the discriminator field + `tp` can be a `BaseModel` class or directly an `Annotated` `Union` of many. + """ + is_root_model = getattr(tp, '__custom_root_type__', False) + + if get_origin(tp) is Annotated: + tp = get_args(tp)[0] + + if hasattr(tp, '__pydantic_model__'): + tp = tp.__pydantic_model__ + + if is_union(get_origin(tp)): + alias, all_values = _get_union_alias_and_all_values(tp, discriminator_key) + return alias, tuple(v for values in all_values for v in values) + elif is_root_model: + union_type = tp.__fields__[ROOT_KEY].type_ + alias, all_values = _get_union_alias_and_all_values(union_type, discriminator_key) + + if len(set(all_values)) > 1: + raise ConfigError( + f'Field {discriminator_key!r} is not the same for all submodels of {display_as_type(tp)!r}' + ) + + return alias, all_values[0] + + else: + try: + t_discriminator_type = tp.__fields__[discriminator_key].type_ + except AttributeError as e: + raise TypeError(f'Type {tp.__name__!r} is not a valid `BaseModel` or `dataclass`') from e + except KeyError as e: + raise ConfigError(f'Model {tp.__name__!r} needs a discriminator field for key {discriminator_key!r}') from e + + if not is_literal_type(t_discriminator_type): + raise ConfigError(f'Field {discriminator_key!r} of model {tp.__name__!r} needs to be a `Literal`') + + return tp.__fields__[discriminator_key].alias, all_literal_values(t_discriminator_type) + + +def _get_union_alias_and_all_values( + union_type: Type[Any], discriminator_key: str +) -> Tuple[str, Tuple[Tuple[str, ...], ...]]: + zipped_aliases_values = [get_discriminator_alias_and_values(t, discriminator_key) for t in get_args(union_type)] + # unzip: [('alias_a',('v1', 'v2)), ('alias_b', ('v3',))] => [('alias_a', 'alias_b'), (('v1', 'v2'), ('v3',))] + all_aliases, all_values = zip(*zipped_aliases_values) + return get_unique_discriminator_alias(all_aliases, discriminator_key), all_values diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/annotated_types.py b/falcon/lib/python3.10/site-packages/pydantic/v1/annotated_types.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4c81ecbb4402fc4c028a371f718ddbc64c29e9 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/annotated_types.py @@ -0,0 +1 @@ +from pydantic.annotated_types import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/decorator.py b/falcon/lib/python3.10/site-packages/pydantic/v1/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..807a8b78553f67cb752b45b40a19cedcb95f65e5 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/decorator.py @@ -0,0 +1 @@ +from pydantic.decorator import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/errors.py b/falcon/lib/python3.10/site-packages/pydantic/v1/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..f98f02925ab2f4709adf19d666aeba9665405cd8 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/errors.py @@ -0,0 +1 @@ +from pydantic.errors import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/fields.py b/falcon/lib/python3.10/site-packages/pydantic/v1/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..e3b4dfd7894cc799bd43733d8aaeb7885a7ba221 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/fields.py @@ -0,0 +1 @@ +from pydantic.fields import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/generics.py b/falcon/lib/python3.10/site-packages/pydantic/v1/generics.py new file mode 100644 index 0000000000000000000000000000000000000000..7b7fb67eebc9e5b857af8156dd9c001a401d13b8 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/generics.py @@ -0,0 +1 @@ +from pydantic.generics import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/main.py b/falcon/lib/python3.10/site-packages/pydantic/v1/main.py new file mode 100644 index 0000000000000000000000000000000000000000..6498ea8ee6bc8ae13fe62ad8926535c5d3a2bc72 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/main.py @@ -0,0 +1 @@ +from pydantic.main import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/mypy.py b/falcon/lib/python3.10/site-packages/pydantic/v1/mypy.py new file mode 100644 index 0000000000000000000000000000000000000000..27d90268a9dfe76287478c282bb15a947236a8e7 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/mypy.py @@ -0,0 +1 @@ +from pydantic.mypy import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/schema.py b/falcon/lib/python3.10/site-packages/pydantic/v1/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..dfadc4f114ede935311c41c6a54e9e94be491579 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/schema.py @@ -0,0 +1 @@ +from pydantic.schema import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/tools.py b/falcon/lib/python3.10/site-packages/pydantic/v1/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..bf59428d4340526dfab0644df59f1247a19b055b --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/tools.py @@ -0,0 +1 @@ +from pydantic.tools import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/v1/types.py b/falcon/lib/python3.10/site-packages/pydantic/v1/types.py new file mode 100644 index 0000000000000000000000000000000000000000..ea0eda7d7d2e4c9e8b9b36192cddb70e9e393fc1 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/v1/types.py @@ -0,0 +1 @@ +from pydantic.types import * # noqa: F403,F401 diff --git a/falcon/lib/python3.10/site-packages/pydantic/version.cpython-310-x86_64-linux-gnu.so b/falcon/lib/python3.10/site-packages/pydantic/version.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0b152251bee568caea1813d40235f3d15a2e9a8b Binary files /dev/null and b/falcon/lib/python3.10/site-packages/pydantic/version.cpython-310-x86_64-linux-gnu.so differ diff --git a/falcon/lib/python3.10/site-packages/pydantic/version.py b/falcon/lib/python3.10/site-packages/pydantic/version.py new file mode 100644 index 0000000000000000000000000000000000000000..090ef5649c161eff33df344562727d7be1d53b41 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pydantic/version.py @@ -0,0 +1,38 @@ +__all__ = 'compiled', 'VERSION', 'version_info' + +VERSION = '1.10.20' + +try: + import cython # type: ignore +except ImportError: + compiled: bool = False +else: # pragma: no cover + try: + compiled = cython.compiled + except AttributeError: + compiled = False + + +def version_info() -> str: + import platform + import sys + from importlib import import_module + from pathlib import Path + + optional_deps = [] + for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'): + try: + import_module(p.replace('-', '_')) + except ImportError: + continue + optional_deps.append(p) + + info = { + 'pydantic version': VERSION, + 'pydantic compiled': compiled, + 'install path': Path(__file__).resolve().parent, + 'python version': sys.version, + 'platform': platform.platform(), + 'optional deps. installed': optional_deps, + } + return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items()) diff --git a/falcon/lib/python3.10/site-packages/pysam.libs/libsasl2-7de4d792.so.3.0.0 b/falcon/lib/python3.10/site-packages/pysam.libs/libsasl2-7de4d792.so.3.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..b5f4af58fdb2e7c30534459e0b6b255cbd98d862 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/pysam.libs/libsasl2-7de4d792.so.3.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c9ef46743a5fef99ea89dc310d828a7a1280f4bc5376f22e2d6fcd6a3ed4086 +size 138761 diff --git a/falcon/lib/python3.10/site-packages/wheel-0.45.1.dist-info/LICENSE.txt b/falcon/lib/python3.10/site-packages/wheel-0.45.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..a31470f14c5978d5fcc3bc173b8399b6c9a6443f --- /dev/null +++ b/falcon/lib/python3.10/site-packages/wheel-0.45.1.dist-info/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012 Daniel Holth and contributors + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/falcon/lib/python3.10/site-packages/wheel-0.45.1.dist-info/WHEEL b/falcon/lib/python3.10/site-packages/wheel-0.45.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/wheel-0.45.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any