diff --git a/parrot/lib/python3.10/asyncio/__init__.py b/parrot/lib/python3.10/asyncio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..200b14c2a3f21eb04bdb164896112038fba247f8 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/__init__.py @@ -0,0 +1,43 @@ +"""The asyncio package, tracking PEP 3156.""" + +# flake8: noqa + +import sys + +# This relies on each of the submodules having an __all__ variable. +from .base_events import * +from .coroutines import * +from .events import * +from .exceptions import * +from .futures import * +from .locks import * +from .protocols import * +from .runners import * +from .queues import * +from .streams import * +from .subprocess import * +from .tasks import * +from .threads import * +from .transports import * + +__all__ = (base_events.__all__ + + coroutines.__all__ + + events.__all__ + + exceptions.__all__ + + futures.__all__ + + locks.__all__ + + protocols.__all__ + + runners.__all__ + + queues.__all__ + + streams.__all__ + + subprocess.__all__ + + tasks.__all__ + + threads.__all__ + + transports.__all__) + +if sys.platform == 'win32': # pragma: no cover + from .windows_events import * + __all__ += windows_events.__all__ +else: + from .unix_events import * # pragma: no cover + __all__ += unix_events.__all__ diff --git a/parrot/lib/python3.10/asyncio/__main__.py b/parrot/lib/python3.10/asyncio/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..73330f4ac3f6fded7f811c7d5143ede44a73e3df --- /dev/null +++ b/parrot/lib/python3.10/asyncio/__main__.py @@ -0,0 +1,127 @@ +import ast +import asyncio +import code +import concurrent.futures +import inspect +import sys +import threading +import types +import warnings + +from . import futures + + +class AsyncIOInteractiveConsole(code.InteractiveConsole): + + def __init__(self, locals, loop): + super().__init__(locals) + self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT + + self.loop = loop + + def runcode(self, code): + future = concurrent.futures.Future() + + def callback(): + global repl_future + global repl_future_interrupted + + repl_future = None + repl_future_interrupted = False + + func = types.FunctionType(code, self.locals) + try: + coro = func() + except SystemExit: + raise + except KeyboardInterrupt as ex: + repl_future_interrupted = True + future.set_exception(ex) + return + except BaseException as ex: + future.set_exception(ex) + return + + if not inspect.iscoroutine(coro): + future.set_result(coro) + return + + try: + repl_future = self.loop.create_task(coro) + futures._chain_future(repl_future, future) + except BaseException as exc: + future.set_exception(exc) + + loop.call_soon_threadsafe(callback) + + try: + return future.result() + except SystemExit: + raise + except BaseException: + if repl_future_interrupted: + self.write("\nKeyboardInterrupt\n") + else: + self.showtraceback() + + +class REPLThread(threading.Thread): + + def run(self): + try: + banner = ( + f'asyncio REPL {sys.version} on {sys.platform}\n' + f'Use "await" directly instead of "asyncio.run()".\n' + f'Type "help", "copyright", "credits" or "license" ' + f'for more information.\n' + f'{getattr(sys, "ps1", ">>> ")}import asyncio' + ) + + console.interact( + banner=banner, + exitmsg='exiting asyncio REPL...') + finally: + warnings.filterwarnings( + 'ignore', + message=r'^coroutine .* was never awaited$', + category=RuntimeWarning) + + loop.call_soon_threadsafe(loop.stop) + + +if __name__ == '__main__': + sys.audit("cpython.run_stdin") + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + repl_locals = {'asyncio': asyncio} + for key in {'__name__', '__package__', + '__loader__', '__spec__', + '__builtins__', '__file__'}: + repl_locals[key] = locals()[key] + + console = AsyncIOInteractiveConsole(repl_locals, loop) + + repl_future = None + repl_future_interrupted = False + + try: + import readline # NoQA + except ImportError: + pass + + repl_thread = REPLThread() + repl_thread.daemon = True + repl_thread.start() + + while True: + try: + loop.run_forever() + except KeyboardInterrupt: + if repl_future and not repl_future.done(): + repl_future.cancel() + repl_future_interrupted = True + continue + else: + break diff --git a/parrot/lib/python3.10/asyncio/base_futures.py b/parrot/lib/python3.10/asyncio/base_futures.py new file mode 100644 index 0000000000000000000000000000000000000000..2c01ac98e10fcaa4ac0b88a2f196dc6450769bf7 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/base_futures.py @@ -0,0 +1,80 @@ +__all__ = () + +import reprlib +from _thread import get_ident + +from . import format_helpers + +# States for Future. +_PENDING = 'PENDING' +_CANCELLED = 'CANCELLED' +_FINISHED = 'FINISHED' + + +def isfuture(obj): + """Check for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + """ + return (hasattr(obj.__class__, '_asyncio_future_blocking') and + obj._asyncio_future_blocking is not None) + + +def _format_callbacks(cb): + """helper function for Future.__repr__""" + size = len(cb) + if not size: + cb = '' + + def format_cb(callback): + return format_helpers._format_callback_source(callback, ()) + + if size == 1: + cb = format_cb(cb[0][0]) + elif size == 2: + cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0])) + elif size > 2: + cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]), + size - 2, + format_cb(cb[-1][0])) + return f'cb=[{cb}]' + + +# bpo-42183: _repr_running is needed for repr protection +# when a Future or Task result contains itself directly or indirectly. +# The logic is borrowed from @reprlib.recursive_repr decorator. +# Unfortunately, the direct decorator usage is impossible because of +# AttributeError: '_asyncio.Task' object has no attribute '__module__' error. +# +# After fixing this thing we can return to the decorator based approach. +_repr_running = set() + + +def _future_repr_info(future): + # (Future) -> str + """helper function for Future.__repr__""" + info = [future._state.lower()] + if future._state == _FINISHED: + if future._exception is not None: + info.append(f'exception={future._exception!r}') + else: + key = id(future), get_ident() + if key in _repr_running: + result = '...' + else: + _repr_running.add(key) + try: + # use reprlib to limit the length of the output, especially + # for very long strings + result = reprlib.repr(future._result) + finally: + _repr_running.discard(key) + info.append(f'result={result}') + if future._callbacks: + info.append(_format_callbacks(future._callbacks)) + if future._source_traceback: + frame = future._source_traceback[-1] + info.append(f'created at {frame[0]}:{frame[1]}') + return info diff --git a/parrot/lib/python3.10/asyncio/base_tasks.py b/parrot/lib/python3.10/asyncio/base_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..09bb171a2ce750a06a39e978bea0d3a7accf25f4 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/base_tasks.py @@ -0,0 +1,85 @@ +import linecache +import traceback + +from . import base_futures +from . import coroutines + + +def _task_repr_info(task): + info = base_futures._future_repr_info(task) + + if task._must_cancel: + # replace status + info[0] = 'cancelling' + + info.insert(1, 'name=%r' % task.get_name()) + + coro = coroutines._format_coroutine(task._coro) + info.insert(2, f'coro=<{coro}>') + + if task._fut_waiter is not None: + info.insert(3, f'wait_for={task._fut_waiter!r}') + return info + + +def _task_get_stack(task, limit): + frames = [] + if hasattr(task._coro, 'cr_frame'): + # case 1: 'async def' coroutines + f = task._coro.cr_frame + elif hasattr(task._coro, 'gi_frame'): + # case 2: legacy coroutines + f = task._coro.gi_frame + elif hasattr(task._coro, 'ag_frame'): + # case 3: async generators + f = task._coro.ag_frame + else: + # case 4: unknown objects + f = None + if f is not None: + while f is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(f) + f = f.f_back + frames.reverse() + elif task._exception is not None: + tb = task._exception.__traceback__ + while tb is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(tb.tb_frame) + tb = tb.tb_next + return frames + + +def _task_print_stack(task, limit, file): + extracted_list = [] + checked = set() + for f in task.get_stack(limit=limit): + lineno = f.f_lineno + co = f.f_code + filename = co.co_filename + name = co.co_name + if filename not in checked: + checked.add(filename) + linecache.checkcache(filename) + line = linecache.getline(filename, lineno, f.f_globals) + extracted_list.append((filename, lineno, name, line)) + + exc = task._exception + if not extracted_list: + print(f'No stack for {task!r}', file=file) + elif exc is not None: + print(f'Traceback for {task!r} (most recent call last):', file=file) + else: + print(f'Stack for {task!r} (most recent call last):', file=file) + + traceback.print_list(extracted_list, file=file) + if exc is not None: + for line in traceback.format_exception_only(exc.__class__, exc): + print(line, file=file, end='') diff --git a/parrot/lib/python3.10/asyncio/coroutines.py b/parrot/lib/python3.10/asyncio/coroutines.py new file mode 100644 index 0000000000000000000000000000000000000000..9664ea74d7514775fac6622d25d1d9dc175b92a9 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/coroutines.py @@ -0,0 +1,269 @@ +__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine' + +import collections.abc +import functools +import inspect +import os +import sys +import traceback +import types +import warnings + +from . import base_futures +from . import constants +from . import format_helpers +from .log import logger + + +def _is_debug_mode(): + # If you set _DEBUG to true, @coroutine will wrap the resulting + # generator objects in a CoroWrapper instance (defined below). That + # instance will log a message when the generator is never iterated + # over, which may happen when you forget to use "await" or "yield from" + # with a coroutine call. + # Note that the value of the _DEBUG flag is taken + # when the decorator is used, so to be of any use it must be set + # before you define your coroutines. A downside of using this feature + # is that tracebacks show entries for the CoroWrapper.__next__ method + # when _DEBUG is true. + return sys.flags.dev_mode or (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) + + +_DEBUG = _is_debug_mode() + + +class CoroWrapper: + # Wrapper for coroutine object in _DEBUG mode. + + def __init__(self, gen, func=None): + assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen + self.gen = gen + self.func = func # Used to unwrap @coroutine decorator + self._source_traceback = format_helpers.extract_stack(sys._getframe(1)) + self.__name__ = getattr(gen, '__name__', None) + self.__qualname__ = getattr(gen, '__qualname__', None) + + def __repr__(self): + coro_repr = _format_coroutine(self) + if self._source_traceback: + frame = self._source_traceback[-1] + coro_repr += f', created at {frame[0]}:{frame[1]}' + + return f'<{self.__class__.__name__} {coro_repr}>' + + def __iter__(self): + return self + + def __next__(self): + return self.gen.send(None) + + def send(self, value): + return self.gen.send(value) + + def throw(self, type, value=None, traceback=None): + return self.gen.throw(type, value, traceback) + + def close(self): + return self.gen.close() + + @property + def gi_frame(self): + return self.gen.gi_frame + + @property + def gi_running(self): + return self.gen.gi_running + + @property + def gi_code(self): + return self.gen.gi_code + + def __await__(self): + return self + + @property + def gi_yieldfrom(self): + return self.gen.gi_yieldfrom + + def __del__(self): + # Be careful accessing self.gen.frame -- self.gen might not exist. + gen = getattr(self, 'gen', None) + frame = getattr(gen, 'gi_frame', None) + if frame is not None and frame.f_lasti == -1: + msg = f'{self!r} was never yielded from' + tb = getattr(self, '_source_traceback', ()) + if tb: + tb = ''.join(traceback.format_list(tb)) + msg += (f'\nCoroutine object created at ' + f'(most recent call last, truncated to ' + f'{constants.DEBUG_STACK_DEPTH} last lines):\n') + msg += tb.rstrip() + logger.error(msg) + + +def coroutine(func): + """Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + """ + warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead', + DeprecationWarning, + stacklevel=2) + if inspect.iscoroutinefunction(func): + # In Python 3.5 that's all we need to do for coroutines + # defined with "async def". + return func + + if inspect.isgeneratorfunction(func): + coro = func + else: + @functools.wraps(func) + def coro(*args, **kw): + res = func(*args, **kw) + if (base_futures.isfuture(res) or inspect.isgenerator(res) or + isinstance(res, CoroWrapper)): + res = yield from res + else: + # If 'res' is an awaitable, run it. + try: + await_meth = res.__await__ + except AttributeError: + pass + else: + if isinstance(res, collections.abc.Awaitable): + res = yield from await_meth() + return res + + coro = types.coroutine(coro) + if not _DEBUG: + wrapper = coro + else: + @functools.wraps(func) + def wrapper(*args, **kwds): + w = CoroWrapper(coro(*args, **kwds), func=func) + if w._source_traceback: + del w._source_traceback[-1] + # Python < 3.5 does not implement __qualname__ + # on generator objects, so we set it manually. + # We use getattr as some callables (such as + # functools.partial may lack __qualname__). + w.__name__ = getattr(func, '__name__', None) + w.__qualname__ = getattr(func, '__qualname__', None) + return w + + wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction(). + return wrapper + + +# A marker for iscoroutinefunction. +_is_coroutine = object() + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function.""" + return (inspect.iscoroutinefunction(func) or + getattr(func, '_is_coroutine', None) is _is_coroutine) + + +# Prioritize native coroutine check to speed-up +# asyncio.iscoroutine. +_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, + collections.abc.Coroutine, CoroWrapper) +_iscoroutine_typecache = set() + + +def iscoroutine(obj): + """Return True if obj is a coroutine object.""" + if type(obj) in _iscoroutine_typecache: + return True + + if isinstance(obj, _COROUTINE_TYPES): + # Just in case we don't want to cache more than 100 + # positive types. That shouldn't ever happen, unless + # someone stressing the system on purpose. + if len(_iscoroutine_typecache) < 100: + _iscoroutine_typecache.add(type(obj)) + return True + else: + return False + + +def _format_coroutine(coro): + assert iscoroutine(coro) + + is_corowrapper = isinstance(coro, CoroWrapper) + + def get_name(coro): + # Coroutines compiled with Cython sometimes don't have + # proper __qualname__ or __name__. While that is a bug + # in Cython, asyncio shouldn't crash with an AttributeError + # in its __repr__ functions. + if is_corowrapper: + return format_helpers._format_callback(coro.func, (), {}) + + if hasattr(coro, '__qualname__') and coro.__qualname__: + coro_name = coro.__qualname__ + elif hasattr(coro, '__name__') and coro.__name__: + coro_name = coro.__name__ + else: + # Stop masking Cython bugs, expose them in a friendly way. + coro_name = f'<{type(coro).__name__} without __name__>' + return f'{coro_name}()' + + def is_running(coro): + try: + return coro.cr_running + except AttributeError: + try: + return coro.gi_running + except AttributeError: + return False + + coro_code = None + if hasattr(coro, 'cr_code') and coro.cr_code: + coro_code = coro.cr_code + elif hasattr(coro, 'gi_code') and coro.gi_code: + coro_code = coro.gi_code + + coro_name = get_name(coro) + + if not coro_code: + # Built-in types might not have __qualname__ or __name__. + if is_running(coro): + return f'{coro_name} running' + else: + return coro_name + + coro_frame = None + if hasattr(coro, 'gi_frame') and coro.gi_frame: + coro_frame = coro.gi_frame + elif hasattr(coro, 'cr_frame') and coro.cr_frame: + coro_frame = coro.cr_frame + + # If Cython's coroutine has a fake code object without proper + # co_filename -- expose that. + filename = coro_code.co_filename or '' + + lineno = 0 + if (is_corowrapper and + coro.func is not None and + not inspect.isgeneratorfunction(coro.func)): + source = format_helpers._get_function_source(coro.func) + if source is not None: + filename, lineno = source + if coro_frame is None: + coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' + else: + coro_repr = f'{coro_name} running, defined at {filename}:{lineno}' + + elif coro_frame is not None: + lineno = coro_frame.f_lineno + coro_repr = f'{coro_name} running at {filename}:{lineno}' + + else: + lineno = coro_code.co_firstlineno + coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' + + return coro_repr diff --git a/parrot/lib/python3.10/asyncio/exceptions.py b/parrot/lib/python3.10/asyncio/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..f07e4486577381616e7e2f7f05747ed7374316a1 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/exceptions.py @@ -0,0 +1,58 @@ +"""asyncio exceptions.""" + + +__all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError', + 'IncompleteReadError', 'LimitOverrunError', + 'SendfileNotAvailableError') + + +class CancelledError(BaseException): + """The Future or Task was cancelled.""" + + +class TimeoutError(Exception): + """The operation exceeded the given deadline.""" + + +class InvalidStateError(Exception): + """The operation is not allowed in this state.""" + + +class SendfileNotAvailableError(RuntimeError): + """Sendfile syscall is not available. + + Raised if OS does not support sendfile syscall for given socket or + file type. + """ + + +class IncompleteReadError(EOFError): + """ + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes (or None if unknown) + """ + def __init__(self, partial, expected): + r_expected = 'undefined' if expected is None else repr(expected) + super().__init__(f'{len(partial)} bytes read on a total of ' + f'{r_expected} expected bytes') + self.partial = partial + self.expected = expected + + def __reduce__(self): + return type(self), (self.partial, self.expected) + + +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + def __reduce__(self): + return type(self), (self.args[0], self.consumed) diff --git a/parrot/lib/python3.10/asyncio/format_helpers.py b/parrot/lib/python3.10/asyncio/format_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..27d11fd4fa9553e21077bd4a93c523dbcb1d32cb --- /dev/null +++ b/parrot/lib/python3.10/asyncio/format_helpers.py @@ -0,0 +1,76 @@ +import functools +import inspect +import reprlib +import sys +import traceback + +from . import constants + + +def _get_function_source(func): + func = inspect.unwrap(func) + if inspect.isfunction(func): + code = func.__code__ + return (code.co_filename, code.co_firstlineno) + if isinstance(func, functools.partial): + return _get_function_source(func.func) + if isinstance(func, functools.partialmethod): + return _get_function_source(func.func) + return None + + +def _format_callback_source(func, args): + func_repr = _format_callback(func, args, None) + source = _get_function_source(func) + if source: + func_repr += f' at {source[0]}:{source[1]}' + return func_repr + + +def _format_args_and_kwargs(args, kwargs): + """Format function arguments and keyword arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + """ + # use reprlib to limit the length of the output + items = [] + if args: + items.extend(reprlib.repr(arg) for arg in args) + if kwargs: + items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items()) + return '({})'.format(', '.join(items)) + + +def _format_callback(func, args, kwargs, suffix=''): + if isinstance(func, functools.partial): + suffix = _format_args_and_kwargs(args, kwargs) + suffix + return _format_callback(func.func, func.args, func.keywords, suffix) + + if hasattr(func, '__qualname__') and func.__qualname__: + func_repr = func.__qualname__ + elif hasattr(func, '__name__') and func.__name__: + func_repr = func.__name__ + else: + func_repr = repr(func) + + func_repr += _format_args_and_kwargs(args, kwargs) + if suffix: + func_repr += suffix + return func_repr + + +def extract_stack(f=None, limit=None): + """Replacement for traceback.extract_stack() that only does the + necessary work for asyncio debug mode. + """ + if f is None: + f = sys._getframe().f_back + if limit is None: + # Limit the amount of work to a reasonable amount, as extract_stack() + # can be called for each coroutine and future in debug mode. + limit = constants.DEBUG_STACK_DEPTH + stack = traceback.StackSummary.extract(traceback.walk_stack(f), + limit=limit, + lookup_lines=False) + stack.reverse() + return stack diff --git a/parrot/lib/python3.10/asyncio/futures.py b/parrot/lib/python3.10/asyncio/futures.py new file mode 100644 index 0000000000000000000000000000000000000000..5d00ab4c8d3119bd4c2da81a1e90996f5da2af0c --- /dev/null +++ b/parrot/lib/python3.10/asyncio/futures.py @@ -0,0 +1,426 @@ +"""A Future class similar to the one in PEP 3148.""" + +__all__ = ( + 'Future', 'wrap_future', 'isfuture', +) + +import concurrent.futures +import contextvars +import logging +import sys +from types import GenericAlias + +from . import base_futures +from . import events +from . import exceptions +from . import format_helpers + + +isfuture = base_futures.isfuture + + +_PENDING = base_futures._PENDING +_CANCELLED = base_futures._CANCELLED +_FINISHED = base_futures._FINISHED + + +STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging + + +class Future: + """This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - This class is not thread-safe. + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + """ + + # Class variables serving as defaults for instance variables. + _state = _PENDING + _result = None + _exception = None + _loop = None + _source_traceback = None + _cancel_message = None + # A saved CancelledError for later chaining as an exception context. + _cancelled_exc = None + + # This field is used for a dual purpose: + # - Its presence is a marker to declare that a class implements + # the Future protocol (i.e. is intended to be duck-type compatible). + # The value must also be not-None, to enable a subclass to declare + # that it is not compatible by setting this to None. + # - It is set by __iter__() below so that Task._step() can tell + # the difference between + # `await Future()` or`yield from Future()` (correct) vs. + # `yield Future()` (incorrect). + _asyncio_future_blocking = False + + __log_traceback = False + + def __init__(self, *, loop=None): + """Initialize the future. + + The optional event_loop argument allows explicitly setting the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + """ + if loop is None: + self._loop = events._get_event_loop() + else: + self._loop = loop + self._callbacks = [] + if self._loop.get_debug(): + self._source_traceback = format_helpers.extract_stack( + sys._getframe(1)) + + _repr_info = base_futures._future_repr_info + + def __repr__(self): + return '<{} {}>'.format(self.__class__.__name__, + ' '.join(self._repr_info())) + + def __del__(self): + if not self.__log_traceback: + # set_exception() was not called, or result() or exception() + # has consumed the exception + return + exc = self._exception + context = { + 'message': + f'{self.__class__.__name__} exception was never retrieved', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + + __class_getitem__ = classmethod(GenericAlias) + + @property + def _log_traceback(self): + return self.__log_traceback + + @_log_traceback.setter + def _log_traceback(self, val): + if val: + raise ValueError('_log_traceback can only be set to False') + self.__log_traceback = False + + def get_loop(self): + """Return the event loop the Future is bound to.""" + loop = self._loop + if loop is None: + raise RuntimeError("Future object is not initialized.") + return loop + + def _make_cancelled_error(self): + """Create the CancelledError to raise if the Future is cancelled. + + This should only be called once when handling a cancellation since + it erases the saved context exception value. + """ + if self._cancel_message is None: + exc = exceptions.CancelledError() + else: + exc = exceptions.CancelledError(self._cancel_message) + exc.__context__ = self._cancelled_exc + # Remove the reference since we don't need this anymore. + self._cancelled_exc = None + return exc + + def cancel(self, msg=None): + """Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + """ + self.__log_traceback = False + if self._state != _PENDING: + return False + self._state = _CANCELLED + self._cancel_message = msg + self.__schedule_callbacks() + return True + + def __schedule_callbacks(self): + """Internal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + """ + callbacks = self._callbacks[:] + if not callbacks: + return + + self._callbacks[:] = [] + for callback, ctx in callbacks: + self._loop.call_soon(callback, self, context=ctx) + + def cancelled(self): + """Return True if the future was cancelled.""" + return self._state == _CANCELLED + + # Don't implement running(); see http://bugs.python.org/issue18699 + + def done(self): + """Return True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + """ + return self._state != _PENDING + + def result(self): + """Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + """ + if self._state == _CANCELLED: + exc = self._make_cancelled_error() + raise exc + if self._state != _FINISHED: + raise exceptions.InvalidStateError('Result is not ready.') + self.__log_traceback = False + if self._exception is not None: + raise self._exception.with_traceback(self._exception_tb) + return self._result + + def exception(self): + """Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + """ + if self._state == _CANCELLED: + exc = self._make_cancelled_error() + raise exc + if self._state != _FINISHED: + raise exceptions.InvalidStateError('Exception is not set.') + self.__log_traceback = False + return self._exception + + def add_done_callback(self, fn, *, context=None): + """Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + """ + if self._state != _PENDING: + self._loop.call_soon(fn, self, context=context) + else: + if context is None: + context = contextvars.copy_context() + self._callbacks.append((fn, context)) + + # New method not in PEP 3148. + + def remove_done_callback(self, fn): + """Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + """ + filtered_callbacks = [(f, ctx) + for (f, ctx) in self._callbacks + if f != fn] + removed_count = len(self._callbacks) - len(filtered_callbacks) + if removed_count: + self._callbacks[:] = filtered_callbacks + return removed_count + + # So-called internal methods (note: no set_running_or_notify_cancel()). + + def set_result(self, result): + """Mark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise exceptions.InvalidStateError(f'{self._state}: {self!r}') + self._result = result + self._state = _FINISHED + self.__schedule_callbacks() + + def set_exception(self, exception): + """Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise exceptions.InvalidStateError(f'{self._state}: {self!r}') + if isinstance(exception, type): + exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") + self._exception = exception + self._exception_tb = exception.__traceback__ + self._state = _FINISHED + self.__schedule_callbacks() + self.__log_traceback = True + + def __await__(self): + if not self.done(): + self._asyncio_future_blocking = True + yield self # This tells Task to wait for completion. + if not self.done(): + raise RuntimeError("await wasn't used with future") + return self.result() # May raise too. + + __iter__ = __await__ # make compatible with 'yield from'. + + +# Needed for testing purposes. +_PyFuture = Future + + +def _get_loop(fut): + # Tries to call Future.get_loop() if it's available. + # Otherwise fallbacks to using the old '_loop' property. + try: + get_loop = fut.get_loop + except AttributeError: + pass + else: + return get_loop() + return fut._loop + + +def _set_result_unless_cancelled(fut, result): + """Helper setting the result only if the future was not cancelled.""" + if fut.cancelled(): + return + fut.set_result(result) + + +def _convert_future_exc(exc): + exc_class = type(exc) + if exc_class is concurrent.futures.CancelledError: + return exceptions.CancelledError(*exc.args) + elif exc_class is concurrent.futures.TimeoutError: + return exceptions.TimeoutError(*exc.args) + elif exc_class is concurrent.futures.InvalidStateError: + return exceptions.InvalidStateError(*exc.args) + else: + return exc + + +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(_convert_future_exc(exception)) + else: + result = source.result() + concurrent.set_result(result) + + +def _copy_future_state(source, dest): + """Internal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + """ + assert source.done() + if dest.cancelled(): + return + assert not dest.done() + if source.cancelled(): + dest.cancel() + else: + exception = source.exception() + if exception is not None: + dest.set_exception(_convert_future_exc(exception)) + else: + result = source.result() + dest.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isfuture(source) and not isinstance(source, + concurrent.futures.Future): + raise TypeError('A future is required for source argument') + if not isfuture(destination) and not isinstance(destination, + concurrent.futures.Future): + raise TypeError('A future is required for destination argument') + source_loop = _get_loop(source) if isfuture(source) else None + dest_loop = _get_loop(destination) if isfuture(destination) else None + + def _set_state(future, other): + if isfuture(future): + _copy_future_state(other, future) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if (destination.cancelled() and + dest_loop is not None and dest_loop.is_closed()): + return + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + if dest_loop.is_closed(): + return + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): + """Wrap concurrent.futures.Future object.""" + if isfuture(future): + return future + assert isinstance(future, concurrent.futures.Future), \ + f'concurrent.futures.Future is expected, got {future!r}' + if loop is None: + loop = events._get_event_loop() + new_future = loop.create_future() + _chain_future(future, new_future) + return new_future + + +try: + import _asyncio +except ImportError: + pass +else: + # _CFuture is needed for tests. + Future = _CFuture = _asyncio.Future diff --git a/parrot/lib/python3.10/asyncio/locks.py b/parrot/lib/python3.10/asyncio/locks.py new file mode 100644 index 0000000000000000000000000000000000000000..e192159127a3f602b5649e9a4a48e841e572ec00 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/locks.py @@ -0,0 +1,438 @@ +"""Synchronization primitives.""" + +__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore') + +import collections + +from . import exceptions +from . import mixins +from . import tasks + + +class _ContextManagerMixin: + async def __aenter__(self): + await self.acquire() + # We have no use for the "as ..." clause in the with + # statement for locks. + return None + + async def __aexit__(self, exc_type, exc, tb): + self.release() + + +class Lock(_ContextManagerMixin, mixins._LoopBoundMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'await'. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + """ + + def __init__(self, *, loop=mixins._marker): + super().__init__(loop=loop) + self._waiters = None + self._locked = False + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self._locked else 'unlocked' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def locked(self): + """Return True if lock is acquired.""" + return self._locked + + async def acquire(self): + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + if (not self._locked and (self._waiters is None or + all(w.cancelled() for w in self._waiters))): + self._locked = True + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._get_loop().create_future() + self._waiters.append(fut) + + # Finally block should be called before the CancelledError + # handling as we don't want CancelledError to call + # _wake_up_first() and attempt to wake up itself. + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + if not self._locked: + self._wake_up_first() + raise + + self._locked = True + return True + + def release(self): + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + self._wake_up_first() + else: + raise RuntimeError('Lock is not acquired.') + + def _wake_up_first(self): + """Wake up the first waiter if it isn't done.""" + if not self._waiters: + return + try: + fut = next(iter(self._waiters)) + except StopIteration: + return + + # .done() necessarily means that a waiter will wake up later on and + # either take the lock, or, if it was cancelled and lock wasn't + # taken already, will hit this again and wake up a new waiter. + if not fut.done(): + fut.set_result(True) + + +class Event(mixins._LoopBoundMixin): + """Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + """ + + def __init__(self, *, loop=mixins._marker): + super().__init__(loop=loop) + self._waiters = collections.deque() + self._value = False + + def __repr__(self): + res = super().__repr__() + extra = 'set' if self._value else 'unset' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def is_set(self): + """Return True if and only if the internal flag is true.""" + return self._value + + def set(self): + """Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + """ + if not self._value: + self._value = True + + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + + def clear(self): + """Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.""" + self._value = False + + async def wait(self): + """Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + """ + if self._value: + return True + + fut = self._get_loop().create_future() + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + +class Condition(_ContextManagerMixin, mixins._LoopBoundMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock=None, *, loop=mixins._marker): + super().__init__(loop=loop) + if lock is None: + lock = Lock() + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters = collections.deque() + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else 'unlocked' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + async def wait(self): + """Wait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + """ + if not self.locked(): + raise RuntimeError('cannot wait on un-acquired lock') + + self.release() + try: + fut = self._get_loop().create_future() + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + finally: + # Must reacquire lock even if wait is cancelled + cancelled = False + while True: + try: + await self.acquire() + break + except exceptions.CancelledError: + cancelled = True + + if cancelled: + raise exceptions.CancelledError + + async def wait_for(self, predicate): + """Wait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + """ + result = predicate() + while not result: + await self.wait() + result = predicate() + return result + + def notify(self, n=1): + """By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError('cannot notify on un-acquired lock') + + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self): + """Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) + + +class Semaphore(_ContextManagerMixin, mixins._LoopBoundMixin): + """A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + """ + + def __init__(self, value=1, *, loop=mixins._marker): + super().__init__(loop=loop) + if value < 0: + raise ValueError("Semaphore initial value must be >= 0") + self._waiters = None + self._value = value + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else f'unlocked, value:{self._value}' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def locked(self): + """Returns True if semaphore cannot be acquired immediately.""" + return self._value == 0 or ( + any(not w.cancelled() for w in (self._waiters or ()))) + + async def acquire(self): + """Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + """ + if not self.locked(): + self._value -= 1 + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._get_loop().create_future() + self._waiters.append(fut) + + # Finally block should be called before the CancelledError + # handling as we don't want CancelledError to call + # _wake_up_first() and attempt to wake up itself. + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + if not fut.cancelled(): + self._value += 1 + self._wake_up_next() + raise + + if self._value > 0: + self._wake_up_next() + return True + + def release(self): + """Release a semaphore, incrementing the internal counter by one. + + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + """ + self._value += 1 + self._wake_up_next() + + def _wake_up_next(self): + """Wake up the first waiter that isn't done.""" + if not self._waiters: + return + + for fut in self._waiters: + if not fut.done(): + self._value -= 1 + fut.set_result(True) + return + + +class BoundedSemaphore(Semaphore): + """A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + """ + + def __init__(self, value=1, *, loop=mixins._marker): + self._bound_value = value + super().__init__(value, loop=loop) + + def release(self): + if self._value >= self._bound_value: + raise ValueError('BoundedSemaphore released too many times') + super().release() diff --git a/parrot/lib/python3.10/asyncio/mixins.py b/parrot/lib/python3.10/asyncio/mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..650df05ccc93ea652d22637b740b56d8a4bb60a3 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/mixins.py @@ -0,0 +1,31 @@ +"""Event loop mixins.""" + +import threading +from . import events + +_global_lock = threading.Lock() + +# Used as a sentinel for loop parameter +_marker = object() + + +class _LoopBoundMixin: + _loop = None + + def __init__(self, *, loop=_marker): + if loop is not _marker: + raise TypeError( + f'As of 3.10, the *loop* parameter was removed from ' + f'{type(self).__name__}() since it is no longer necessary' + ) + + def _get_loop(self): + loop = events._get_running_loop() + + if self._loop is None: + with _global_lock: + if self._loop is None: + self._loop = loop + if loop is not self._loop: + raise RuntimeError(f'{self!r} is bound to a different event loop') + return loop diff --git a/parrot/lib/python3.10/asyncio/protocols.py b/parrot/lib/python3.10/asyncio/protocols.py new file mode 100644 index 0000000000000000000000000000000000000000..09987b164c66e57f8bd12190899a86947cd065b6 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/protocols.py @@ -0,0 +1,216 @@ +"""Abstract Protocol base classes.""" + +__all__ = ( + 'BaseProtocol', 'Protocol', 'DatagramProtocol', + 'SubprocessProtocol', 'BufferedProtocol', +) + + +class BaseProtocol: + """Common base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + """ + + __slots__ = () + + def connection_made(self, transport): + """Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + """ + + def connection_lost(self, exc): + """Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + + def pause_writing(self): + """Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + """ + + def resume_writing(self): + """Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + """ + + +class Protocol(BaseProtocol): + """Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + """ + + __slots__ = () + + def data_received(self, data): + """Called when some data is received. + + The argument is a bytes object. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class BufferedProtocol(BaseProtocol): + """Interface for stream protocol with manual buffer control. + + Event methods, such as `create_server` and `create_connection`, + accept factories that return protocols that implement this interface. + + The idea of BufferedProtocol is that it allows to manually allocate + and control the receive buffer. Event loops can then use the buffer + provided by the protocol to avoid unnecessary data copies. This + can result in noticeable performance improvement for protocols that + receive big amounts of data. Sophisticated protocols can allocate + the buffer only once at creation time. + + State machine of calls: + + start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end + + * CM: connection_made() + * GB: get_buffer() + * BU: buffer_updated() + * ER: eof_received() + * CL: connection_lost() + """ + + __slots__ = () + + def get_buffer(self, sizehint): + """Called to allocate a new receive buffer. + + *sizehint* is a recommended minimal size for the returned + buffer. When set to -1, the buffer size can be arbitrary. + + Must return an object that implements the + :ref:`buffer protocol `. + It is an error to return a zero-sized buffer. + """ + + def buffer_updated(self, nbytes): + """Called when the buffer was updated with the received data. + + *nbytes* is the total number of bytes that were written to + the buffer. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class DatagramProtocol(BaseProtocol): + """Interface for datagram protocol.""" + + __slots__ = () + + def datagram_received(self, data, addr): + """Called when some datagram is received.""" + + def error_received(self, exc): + """Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + """ + + +class SubprocessProtocol(BaseProtocol): + """Interface for protocol for subprocess calls.""" + + __slots__ = () + + def pipe_data_received(self, fd, data): + """Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + """ + + def pipe_connection_lost(self, fd, exc): + """Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + """ + + def process_exited(self): + """Called when subprocess has exited.""" + + +def _feed_data_to_buffered_proto(proto, data): + data_len = len(data) + while data_len: + buf = proto.get_buffer(data_len) + buf_len = len(buf) + if not buf_len: + raise RuntimeError('get_buffer() returned an empty buffer') + + if buf_len >= data_len: + buf[:data_len] = data + proto.buffer_updated(data_len) + return + else: + buf[:buf_len] = data[:buf_len] + proto.buffer_updated(buf_len) + data = data[buf_len:] + data_len = len(data) diff --git a/parrot/lib/python3.10/asyncio/runners.py b/parrot/lib/python3.10/asyncio/runners.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5e9a48479ef78e7d8c7a8706d56603f234f2ba --- /dev/null +++ b/parrot/lib/python3.10/asyncio/runners.py @@ -0,0 +1,73 @@ +__all__ = 'run', + +from . import coroutines +from . import events +from . import tasks + + +def run(main, *, debug=None): + """Execute the coroutine and return the result. + + This function runs the passed coroutine, taking care of + managing the asyncio event loop and finalizing asynchronous + generators. + + This function cannot be called when another asyncio event loop is + running in the same thread. + + If debug is True, the event loop will be run in debug mode. + + This function always creates a new event loop and closes it at the end. + It should be used as a main entry point for asyncio programs, and should + ideally only be called once. + + Example: + + async def main(): + await asyncio.sleep(1) + print('hello') + + asyncio.run(main()) + """ + if events._get_running_loop() is not None: + raise RuntimeError( + "asyncio.run() cannot be called from a running event loop") + + if not coroutines.iscoroutine(main): + raise ValueError("a coroutine was expected, got {!r}".format(main)) + + loop = events.new_event_loop() + try: + events.set_event_loop(loop) + if debug is not None: + loop.set_debug(debug) + return loop.run_until_complete(main) + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + loop.run_until_complete(loop.shutdown_default_executor()) + finally: + events.set_event_loop(None) + loop.close() + + +def _cancel_all_tasks(loop): + to_cancel = tasks.all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler({ + 'message': 'unhandled exception during asyncio.run() shutdown', + 'exception': task.exception(), + 'task': task, + }) diff --git a/parrot/lib/python3.10/asyncio/selector_events.py b/parrot/lib/python3.10/asyncio/selector_events.py new file mode 100644 index 0000000000000000000000000000000000000000..8282f28037a62a25b79343d8b7b955f700ed83c9 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/selector_events.py @@ -0,0 +1,1105 @@ +"""Event loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +""" + +__all__ = 'BaseSelectorEventLoop', + +import collections +import errno +import functools +import selectors +import socket +import warnings +import weakref +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import constants +from . import events +from . import futures +from . import protocols +from . import sslproto +from . import transports +from . import trsock +from .log import logger + + +def _test_selector_event(selector, fd, event): + # Test if the selector is monitoring 'event' events + # for the file descriptor 'fd'. + try: + key = selector.get_key(fd) + except KeyError: + return False + else: + return bool(key.events & event) + + +class BaseSelectorEventLoop(base_events.BaseEventLoop): + """Selector event loop. + + See events.EventLoop for API specification. + """ + + def __init__(self, selector=None): + super().__init__() + + if selector is None: + selector = selectors.DefaultSelector() + logger.debug('Using selector: %s', selector.__class__.__name__) + self._selector = selector + self._make_self_pipe() + self._transports = weakref.WeakValueDictionary() + + def _make_socket_transport(self, sock, protocol, waiter=None, *, + extra=None, server=None): + return _SelectorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport( + self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + ssl_protocol = sslproto.SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + _SelectorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + return _SelectorDatagramTransport(self, sock, protocol, + address, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + self._close_self_pipe() + super().close() + if self._selector is not None: + self._selector.close() + self._selector = None + + def _close_self_pipe(self): + self._remove_reader(self._ssock.fileno()) + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = socket.socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + self._add_reader(self._ssock.fileno(), self._read_from_self) + + def _process_self_data(self, data): + pass + + def _read_from_self(self): + while True: + try: + data = self._ssock.recv(4096) + if not data: + break + self._process_self_data(data) + except InterruptedError: + continue + except BlockingIOError: + break + + def _write_to_self(self): + # This may be called from a different thread, possibly after + # _close_self_pipe() has been called or even while it is + # running. Guard for self._csock being None or closed. When + # a socket is closed, send() raises OSError (with errno set to + # EBADF, but let's not rely on the exact error code). + csock = self._csock + if csock is None: + return + + try: + csock.send(b'\0') + except OSError: + if self._debug: + logger.debug("Fail to write a null byte into the " + "self-pipe socket", + exc_info=True) + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + self._add_reader(sock.fileno(), self._accept_connection, + protocol_factory, sock, sslcontext, server, backlog, + ssl_handshake_timeout) + + def _accept_connection( + self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + # This method is only called once for each event loop tick where the + # listening socket has triggered an EVENT_READ. There may be multiple + # connections waiting for an .accept() so it is called in a loop. + # See https://bugs.python.org/issue27906 for more details. + for _ in range(backlog): + try: + conn, addr = sock.accept() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + conn.setblocking(False) + except (BlockingIOError, InterruptedError, ConnectionAbortedError): + # Early exit because the socket accept buffer is empty. + return None + except OSError as exc: + # There's nowhere to send the error, so just log it. + if exc.errno in (errno.EMFILE, errno.ENFILE, + errno.ENOBUFS, errno.ENOMEM): + # Some platforms (e.g. Linux keep reporting the FD as + # ready, so we remove the read handler temporarily. + # We'll try again in a while. + self.call_exception_handler({ + 'message': 'socket.accept() out of system resource', + 'exception': exc, + 'socket': trsock.TransportSocket(sock), + }) + self._remove_reader(sock.fileno()) + self.call_later(constants.ACCEPT_RETRY_DELAY, + self._start_serving, + protocol_factory, sock, sslcontext, server, + backlog, ssl_handshake_timeout) + else: + raise # The event loop will catch, log and ignore it. + else: + extra = {'peername': addr} + accept = self._accept_connection2( + protocol_factory, conn, extra, sslcontext, server, + ssl_handshake_timeout) + self.create_task(accept) + + async def _accept_connection2( + self, protocol_factory, conn, extra, + sslcontext=None, server=None, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + protocol = None + transport = None + try: + protocol = protocol_factory() + waiter = self.create_future() + if sslcontext: + transport = self._make_ssl_transport( + conn, protocol, sslcontext, waiter=waiter, + server_side=True, extra=extra, server=server, + ssl_handshake_timeout=ssl_handshake_timeout) + else: + transport = self._make_socket_transport( + conn, protocol, waiter=waiter, extra=extra, + server=server) + + try: + await waiter + except BaseException: + transport.close() + raise + # It's now up to the protocol to handle the connection. + + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if self._debug: + context = { + 'message': + 'Error on transport creation for incoming connection', + 'exception': exc, + } + if protocol is not None: + context['protocol'] = protocol + if transport is not None: + context['transport'] = transport + self.call_exception_handler(context) + + def _ensure_fd_no_transport(self, fd): + fileno = fd + if not isinstance(fileno, int): + try: + fileno = int(fileno.fileno()) + except (AttributeError, TypeError, ValueError): + # This code matches selectors._fileobj_to_fd function. + raise ValueError(f"Invalid file object: {fd!r}") from None + try: + transport = self._transports[fileno] + except KeyError: + pass + else: + if not transport.is_closing(): + raise RuntimeError( + f'File descriptor {fd!r} is used by transport ' + f'{transport!r}') + + def _add_reader(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self, None) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_READ, + (handle, None)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_READ, + (handle, writer)) + if reader is not None: + reader.cancel() + return handle + + def _remove_reader(self, fd): + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + mask &= ~selectors.EVENT_READ + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (None, writer)) + + if reader is not None: + reader.cancel() + return True + else: + return False + + def _add_writer(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self, None) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_WRITE, + (None, handle)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_WRITE, + (reader, handle)) + if writer is not None: + writer.cancel() + return handle + + def _remove_writer(self, fd): + """Remove a writer callback.""" + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + # Remove both writer and connector. + mask &= ~selectors.EVENT_WRITE + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (reader, None)) + + if writer is not None: + writer.cancel() + return True + else: + return False + + def add_reader(self, fd, callback, *args): + """Add a reader callback.""" + self._ensure_fd_no_transport(fd) + self._add_reader(fd, callback, *args) + + def remove_reader(self, fd): + """Remove a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_reader(fd) + + def add_writer(self, fd, callback, *args): + """Add a writer callback..""" + self._ensure_fd_no_transport(fd) + self._add_writer(fd, callback, *args) + + def remove_writer(self, fd): + """Remove a writer callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_writer(fd) + + async def sock_recv(self, sock, n): + """Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + return sock.recv(n) + except (BlockingIOError, InterruptedError): + pass + fut = self.create_future() + fd = sock.fileno() + self._ensure_fd_no_transport(fd) + handle = self._add_reader(fd, self._sock_recv, fut, sock, n) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd, handle=handle)) + return await fut + + def _sock_read_done(self, fd, fut, handle=None): + if handle is None or not handle.cancelled(): + self.remove_reader(fd) + + def _sock_recv(self, fut, sock, n): + # _sock_recv() can add itself as an I/O callback if the operation can't + # be done immediately. Don't use it directly, call sock_recv(). + if fut.done(): + return + try: + data = sock.recv(n) + except (BlockingIOError, InterruptedError): + return # try again next time + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(data) + + async def sock_recv_into(self, sock, buf): + """Receive data from the socket. + + The received data is written into *buf* (a writable buffer). + The return value is the number of bytes written. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + return sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + pass + fut = self.create_future() + fd = sock.fileno() + self._ensure_fd_no_transport(fd) + handle = self._add_reader(fd, self._sock_recv_into, fut, sock, buf) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd, handle=handle)) + return await fut + + def _sock_recv_into(self, fut, sock, buf): + # _sock_recv_into() can add itself as an I/O callback if the operation + # can't be done immediately. Don't use it directly, call + # sock_recv_into(). + if fut.done(): + return + try: + nbytes = sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return # try again next time + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(nbytes) + + async def sock_sendall(self, sock, data): + """Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + n = sock.send(data) + except (BlockingIOError, InterruptedError): + n = 0 + + if n == len(data): + # all data sent + return + + fut = self.create_future() + fd = sock.fileno() + self._ensure_fd_no_transport(fd) + # use a trick with a list in closure to store a mutable state + handle = self._add_writer(fd, self._sock_sendall, fut, sock, + memoryview(data), [n]) + fut.add_done_callback( + functools.partial(self._sock_write_done, fd, handle=handle)) + return await fut + + def _sock_sendall(self, fut, sock, view, pos): + if fut.done(): + # Future cancellation can be scheduled on previous loop iteration + return + start = pos[0] + try: + n = sock.send(view[start:]) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + return + + start += n + + if start == len(view): + fut.set_result(None) + else: + pos[0] = start + + async def sock_connect(self, sock, address): + """Connect to a remote socket at address. + + This method is a coroutine. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + if sock.family == socket.AF_INET or ( + base_events._HAS_IPv6 and sock.family == socket.AF_INET6): + resolved = await self._ensure_resolved( + address, family=sock.family, type=sock.type, proto=sock.proto, + loop=self, + ) + _, _, _, _, address = resolved[0] + + fut = self.create_future() + self._sock_connect(fut, sock, address) + try: + return await fut + finally: + # Needed to break cycles when an exception occurs. + fut = None + + def _sock_connect(self, fut, sock, address): + fd = sock.fileno() + try: + sock.connect(address) + except (BlockingIOError, InterruptedError): + # Issue #23618: When the C function connect() fails with EINTR, the + # connection runs in background. We have to wait until the socket + # becomes writable to be notified when the connection succeed or + # fails. + self._ensure_fd_no_transport(fd) + handle = self._add_writer( + fd, self._sock_connect_cb, fut, sock, address) + fut.add_done_callback( + functools.partial(self._sock_write_done, fd, handle=handle)) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + finally: + fut = None + + def _sock_write_done(self, fd, fut, handle=None): + if handle is None or not handle.cancelled(): + self.remove_writer(fd) + + def _sock_connect_cb(self, fut, sock, address): + if fut.done(): + return + + try: + err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # Jump to any except clause below. + raise OSError(err, f'Connect call failed {address}') + except (BlockingIOError, InterruptedError): + # socket is still registered, the callback will be retried later + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + finally: + fut = None + + async def sock_accept(self, sock): + """Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = self.create_future() + self._sock_accept(fut, sock) + return await fut + + def _sock_accept(self, fut, sock): + fd = sock.fileno() + try: + conn, address = sock.accept() + conn.setblocking(False) + except (BlockingIOError, InterruptedError): + self._ensure_fd_no_transport(fd) + handle = self._add_reader(fd, self._sock_accept, fut, sock) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd, handle=handle)) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result((conn, address)) + + async def _sendfile_native(self, transp, file, offset, count): + del self._transports[transp._sock_fd] + resume_reading = transp.is_reading() + transp.pause_reading() + await transp._make_empty_waiter() + try: + return await self.sock_sendfile(transp._sock, file, offset, count, + fallback=False) + finally: + transp._reset_empty_waiter() + if resume_reading: + transp.resume_reading() + self._transports[transp._sock_fd] = transp + + def _process_events(self, event_list): + for key, mask in event_list: + fileobj, (reader, writer) = key.fileobj, key.data + if mask & selectors.EVENT_READ and reader is not None: + if reader._cancelled: + self._remove_reader(fileobj) + else: + self._add_callback(reader) + if mask & selectors.EVENT_WRITE and writer is not None: + if writer._cancelled: + self._remove_writer(fileobj) + else: + self._add_callback(writer) + + def _stop_serving(self, sock): + self._remove_reader(sock.fileno()) + sock.close() + + +class _SelectorTransport(transports._FlowControlMixin, + transports.Transport): + + max_size = 256 * 1024 # Buffer size passed to recv(). + + _buffer_factory = bytearray # Constructs initial value for self._buffer. + + # Attribute used in the destructor: it must be set even if the constructor + # is not called (see _SelectorSslTransport which may start by raising an + # exception) + _sock = None + + def __init__(self, loop, sock, protocol, extra=None, server=None): + super().__init__(extra, loop) + self._extra['socket'] = trsock.TransportSocket(sock) + try: + self._extra['sockname'] = sock.getsockname() + except OSError: + self._extra['sockname'] = None + if 'peername' not in self._extra: + try: + self._extra['peername'] = sock.getpeername() + except socket.error: + self._extra['peername'] = None + self._sock = sock + self._sock_fd = sock.fileno() + + self._protocol_connected = False + self.set_protocol(protocol) + + self._server = server + self._buffer = self._buffer_factory() + self._conn_lost = 0 # Set when call to connection_lost scheduled. + self._closing = False # Set when close() called. + if self._server is not None: + self._server._attach() + loop._transports[self._sock_fd] = self + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append(f'fd={self._sock_fd}') + # test if the transport was closed + if self._loop is not None and not self._loop.is_closed(): + polling = _test_selector_event(self._loop._selector, + self._sock_fd, selectors.EVENT_READ) + if polling: + info.append('read=polling') + else: + info.append('read=idle') + + polling = _test_selector_event(self._loop._selector, + self._sock_fd, + selectors.EVENT_WRITE) + if polling: + state = 'polling' + else: + state = 'idle' + + bufsize = self.get_write_buffer_size() + info.append(f'write=<{state}, bufsize={bufsize}>') + return '<{}>'.format(' '.join(info)) + + def abort(self): + self._force_close(None) + + def set_protocol(self, protocol): + self._protocol = protocol + self._protocol_connected = True + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._closing: + return + self._closing = True + self._loop._remove_reader(self._sock_fd) + if not self._buffer: + self._conn_lost += 1 + self._loop._remove_writer(self._sock_fd) + self._loop.call_soon(self._call_connection_lost, None) + + def __del__(self, _warn=warnings.warn): + if self._sock is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._sock.close() + + def _fatal_error(self, exc, message='Fatal error on transport'): + # Should be called from exception handler only. + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._force_close(exc) + + def _force_close(self, exc): + if self._conn_lost: + return + if self._buffer: + self._buffer.clear() + self._loop._remove_writer(self._sock_fd) + if not self._closing: + self._closing = True + self._loop._remove_reader(self._sock_fd) + self._conn_lost += 1 + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + if self._protocol_connected: + self._protocol.connection_lost(exc) + finally: + self._sock.close() + self._sock = None + self._protocol = None + self._loop = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + return len(self._buffer) + + def _add_reader(self, fd, callback, *args): + if self._closing: + return + + self._loop._add_reader(fd, callback, *args) + + +class _SelectorSocketTransport(_SelectorTransport): + + _start_tls_compatible = True + _sendfile_compatible = constants._SendfileMode.TRY_NATIVE + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + + self._read_ready_cb = None + super().__init__(loop, sock, protocol, extra, server) + self._eof = False + self._paused = False + self._empty_waiter = None + + # Disable the Nagle algorithm -- small writes will be + # sent without waiting for the TCP ACK. This generally + # decreases the latency (in some cases significantly.) + base_events._set_nodelay(self._sock) + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def set_protocol(self, protocol): + if isinstance(protocol, protocols.BufferedProtocol): + self._read_ready_cb = self._read_ready__get_buffer + else: + self._read_ready_cb = self._read_ready__data_received + + super().set_protocol(protocol) + + def is_reading(self): + return not self._paused and not self._closing + + def pause_reading(self): + if self._closing or self._paused: + return + self._paused = True + self._loop._remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + self._paused = False + self._add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _read_ready(self): + self._read_ready_cb() + + def _read_ready__get_buffer(self): + if self._conn_lost: + return + + try: + buf = self._protocol.get_buffer(-1) + if not len(buf): + raise RuntimeError('get_buffer() returned an empty buffer') + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.get_buffer() call failed.') + return + + try: + nbytes = self._sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not nbytes: + self._read_ready__on_eof() + return + + try: + self._protocol.buffer_updated(nbytes) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.buffer_updated() call failed.') + + def _read_ready__data_received(self): + if self._conn_lost: + return + try: + data = self._sock.recv(self.max_size) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not data: + self._read_ready__on_eof() + return + + try: + self._protocol.data_received(data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.data_received() call failed.') + + def _read_ready__on_eof(self): + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + try: + keep_open = self._protocol.eof_received() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.eof_received() call failed.') + return + + if keep_open: + # We're keeping the connection open so the + # protocol can write more, but we still can't + # receive more, so remove the reader callback. + self._loop._remove_reader(self._sock_fd) + else: + self.close() + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f'data argument must be a bytes-like object, ' + f'not {type(data).__name__!r}') + if self._eof: + raise RuntimeError('Cannot call write() after write_eof()') + if self._empty_waiter is not None: + raise RuntimeError('unable to write; sendfile is in progress') + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Optimization: try to send now. + try: + n = self._sock.send(data) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal write error on socket transport') + return + else: + data = data[n:] + if not data: + return + # Not all was written; register write handler. + self._loop._add_writer(self._sock_fd, self._write_ready) + + # Add it to the buffer. + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + if self._conn_lost: + return + try: + n = self._sock.send(self._buffer) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop._remove_writer(self._sock_fd) + self._buffer.clear() + self._fatal_error(exc, 'Fatal write error on socket transport') + if self._empty_waiter is not None: + self._empty_waiter.set_exception(exc) + else: + if n: + del self._buffer[:n] + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._empty_waiter is not None: + self._empty_waiter.set_result(None) + if self._closing: + self._call_connection_lost(None) + elif self._eof: + self._sock.shutdown(socket.SHUT_WR) + + def write_eof(self): + if self._closing or self._eof: + return + self._eof = True + if not self._buffer: + self._sock.shutdown(socket.SHUT_WR) + + def can_write_eof(self): + return True + + def _call_connection_lost(self, exc): + super()._call_connection_lost(exc) + if self._empty_waiter is not None: + self._empty_waiter.set_exception( + ConnectionError("Connection is closed by peer")) + + def _make_empty_waiter(self): + if self._empty_waiter is not None: + raise RuntimeError("Empty waiter is already set") + self._empty_waiter = self._loop.create_future() + if not self._buffer: + self._empty_waiter.set_result(None) + return self._empty_waiter + + def _reset_empty_waiter(self): + self._empty_waiter = None + + +class _SelectorDatagramTransport(_SelectorTransport): + + _buffer_factory = collections.deque + + def __init__(self, loop, sock, protocol, address=None, + waiter=None, extra=None): + super().__init__(loop, sock, protocol, extra) + self._address = address + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def get_write_buffer_size(self): + return sum(len(data) for data, _ in self._buffer) + + def _read_ready(self): + if self._conn_lost: + return + try: + data, addr = self._sock.recvfrom(self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._protocol.error_received(exc) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on datagram transport') + else: + self._protocol.datagram_received(data, addr) + + def sendto(self, data, addr=None): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f'data argument must be a bytes-like object, ' + f'not {type(data).__name__!r}') + if not data: + return + + if self._address: + if addr not in (None, self._address): + raise ValueError( + f'Invalid address: must be None or {self._address}') + addr = self._address + + if self._conn_lost and self._address: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + if self._extra['peername']: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + return + except (BlockingIOError, InterruptedError): + self._loop._add_writer(self._sock_fd, self._sendto_ready) + except OSError as exc: + self._protocol.error_received(exc) + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal write error on datagram transport') + return + + # Ensure that what we buffer is immutable. + self._buffer.append((bytes(data), addr)) + self._maybe_pause_protocol() + + def _sendto_ready(self): + while self._buffer: + data, addr = self._buffer.popleft() + try: + if self._extra['peername']: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + except (BlockingIOError, InterruptedError): + self._buffer.appendleft((data, addr)) # Try again later. + break + except OSError as exc: + self._protocol.error_received(exc) + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal write error on datagram transport') + return + + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) diff --git a/parrot/lib/python3.10/asyncio/subprocess.py b/parrot/lib/python3.10/asyncio/subprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..cd10231f710f1139590545b3b9e3bd88c12544f2 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/subprocess.py @@ -0,0 +1,223 @@ +__all__ = 'create_subprocess_exec', 'create_subprocess_shell' + +import subprocess + +from . import events +from . import protocols +from . import streams +from . import tasks +from .log import logger + + +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +DEVNULL = subprocess.DEVNULL + + +class SubprocessStreamProtocol(streams.FlowControlMixin, + protocols.SubprocessProtocol): + """Like StreamReaderProtocol, but for a subprocess.""" + + def __init__(self, limit, loop): + super().__init__(loop=loop) + self._limit = limit + self.stdin = self.stdout = self.stderr = None + self._transport = None + self._process_exited = False + self._pipe_fds = [] + self._stdin_closed = self._loop.create_future() + + def __repr__(self): + info = [self.__class__.__name__] + if self.stdin is not None: + info.append(f'stdin={self.stdin!r}') + if self.stdout is not None: + info.append(f'stdout={self.stdout!r}') + if self.stderr is not None: + info.append(f'stderr={self.stderr!r}') + return '<{}>'.format(' '.join(info)) + + def connection_made(self, transport): + self._transport = transport + + stdout_transport = transport.get_pipe_transport(1) + if stdout_transport is not None: + self.stdout = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stdout.set_transport(stdout_transport) + self._pipe_fds.append(1) + + stderr_transport = transport.get_pipe_transport(2) + if stderr_transport is not None: + self.stderr = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stderr.set_transport(stderr_transport) + self._pipe_fds.append(2) + + stdin_transport = transport.get_pipe_transport(0) + if stdin_transport is not None: + self.stdin = streams.StreamWriter(stdin_transport, + protocol=self, + reader=None, + loop=self._loop) + + def pipe_data_received(self, fd, data): + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + reader.feed_data(data) + + def pipe_connection_lost(self, fd, exc): + if fd == 0: + pipe = self.stdin + if pipe is not None: + pipe.close() + self.connection_lost(exc) + if exc is None: + self._stdin_closed.set_result(None) + else: + self._stdin_closed.set_exception(exc) + return + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + if exc is None: + reader.feed_eof() + else: + reader.set_exception(exc) + + if fd in self._pipe_fds: + self._pipe_fds.remove(fd) + self._maybe_close_transport() + + def process_exited(self): + self._process_exited = True + self._maybe_close_transport() + + def _maybe_close_transport(self): + if len(self._pipe_fds) == 0 and self._process_exited: + self._transport.close() + self._transport = None + + def _get_close_waiter(self, stream): + if stream is self.stdin: + return self._stdin_closed + + +class Process: + def __init__(self, transport, protocol, loop): + self._transport = transport + self._protocol = protocol + self._loop = loop + self.stdin = protocol.stdin + self.stdout = protocol.stdout + self.stderr = protocol.stderr + self.pid = transport.get_pid() + + def __repr__(self): + return f'<{self.__class__.__name__} {self.pid}>' + + @property + def returncode(self): + return self._transport.get_returncode() + + async def wait(self): + """Wait until the process exit and return the process return code.""" + return await self._transport._wait() + + def send_signal(self, signal): + self._transport.send_signal(signal) + + def terminate(self): + self._transport.terminate() + + def kill(self): + self._transport.kill() + + async def _feed_stdin(self, input): + debug = self._loop.get_debug() + self.stdin.write(input) + if debug: + logger.debug( + '%r communicate: feed stdin (%s bytes)', self, len(input)) + try: + await self.stdin.drain() + except (BrokenPipeError, ConnectionResetError) as exc: + # communicate() ignores BrokenPipeError and ConnectionResetError + if debug: + logger.debug('%r communicate: stdin got %r', self, exc) + + if debug: + logger.debug('%r communicate: close stdin', self) + self.stdin.close() + + async def _noop(self): + return None + + async def _read_stream(self, fd): + transport = self._transport.get_pipe_transport(fd) + if fd == 2: + stream = self.stderr + else: + assert fd == 1 + stream = self.stdout + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: read %s', self, name) + output = await stream.read() + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: close %s', self, name) + transport.close() + return output + + async def communicate(self, input=None): + if input is not None: + stdin = self._feed_stdin(input) + else: + stdin = self._noop() + if self.stdout is not None: + stdout = self._read_stream(1) + else: + stdout = self._noop() + if self.stderr is not None: + stderr = self._read_stream(2) + else: + stderr = self._noop() + stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr) + await self.wait() + return (stdout, stderr) + + +async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None, + limit=streams._DEFAULT_LIMIT, **kwds): + loop = events.get_running_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = await loop.subprocess_shell( + protocol_factory, + cmd, stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) + + +async def create_subprocess_exec(program, *args, stdin=None, stdout=None, + stderr=None, limit=streams._DEFAULT_LIMIT, + **kwds): + loop = events.get_running_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = await loop.subprocess_exec( + protocol_factory, + program, *args, + stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) diff --git a/parrot/lib/python3.10/asyncio/tasks.py b/parrot/lib/python3.10/asyncio/tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..f391586d128730328c424541420db6a02fcf39c7 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/tasks.py @@ -0,0 +1,946 @@ +"""Support for tasks, coroutines and the scheduler.""" + +__all__ = ( + 'Task', 'create_task', + 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', + 'wait', 'wait_for', 'as_completed', 'sleep', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', + 'current_task', 'all_tasks', + '_register_task', '_unregister_task', '_enter_task', '_leave_task', +) + +import concurrent.futures +import contextvars +import functools +import inspect +import itertools +import types +import warnings +import weakref +from types import GenericAlias + +from . import base_tasks +from . import coroutines +from . import events +from . import exceptions +from . import futures +from .coroutines import _is_coroutine + +# Helper to generate new task names +# This uses itertools.count() instead of a "+= 1" operation because the latter +# is not thread safe. See bpo-11866 for a longer explanation. +_task_name_counter = itertools.count(1).__next__ + + +def current_task(loop=None): + """Return a currently executed task.""" + if loop is None: + loop = events.get_running_loop() + return _current_tasks.get(loop) + + +def all_tasks(loop=None): + """Return a set of all tasks for the loop.""" + if loop is None: + loop = events.get_running_loop() + # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another + # thread while we do so. Therefore we cast it to list prior to filtering. The list + # cast itself requires iteration, so we repeat it several times ignoring + # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for + # details. + i = 0 + while True: + try: + tasks = list(_all_tasks) + except RuntimeError: + i += 1 + if i >= 1000: + raise + else: + break + return {t for t in tasks + if futures._get_loop(t) is loop and not t.done()} + + +def _set_task_name(task, name): + if name is not None: + try: + set_name = task.set_name + except AttributeError: + pass + else: + set_name(name) + + +class Task(futures._PyFuture): # Inherit Python Task implementation + # from a Python Future implementation. + + """A coroutine wrapped in a Future.""" + + # An important invariant maintained while a Task not done: + # + # - Either _fut_waiter is None, and _step() is scheduled; + # - or _fut_waiter is some Future, and _step() is *not* scheduled. + # + # The only transition from the latter to the former is through + # _wakeup(). When _fut_waiter is not None, one of its callbacks + # must be _wakeup(). + + # If False, don't log a message if the task is destroyed whereas its + # status is still pending + _log_destroy_pending = True + + def __init__(self, coro, *, loop=None, name=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + if not coroutines.iscoroutine(coro): + # raise after Future.__init__(), attrs are required for __del__ + # prevent logging for pending task in __del__ + self._log_destroy_pending = False + raise TypeError(f"a coroutine was expected, got {coro!r}") + + if name is None: + self._name = f'Task-{_task_name_counter()}' + else: + self._name = str(name) + + self._must_cancel = False + self._fut_waiter = None + self._coro = coro + self._context = contextvars.copy_context() + + self._loop.call_soon(self.__step, context=self._context) + _register_task(self) + + def __del__(self): + if self._state == futures._PENDING and self._log_destroy_pending: + context = { + 'task': self, + 'message': 'Task was destroyed but it is pending!', + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + super().__del__() + + __class_getitem__ = classmethod(GenericAlias) + + def _repr_info(self): + return base_tasks._task_repr_info(self) + + def get_coro(self): + return self._coro + + def get_name(self): + return self._name + + def set_name(self, value): + self._name = str(value) + + def set_result(self, result): + raise RuntimeError('Task does not support set_result operation') + + def set_exception(self, exception): + raise RuntimeError('Task does not support set_exception operation') + + def get_stack(self, *, limit=None): + """Return the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + """ + return base_tasks._task_get_stack(self, limit) + + def print_stack(self, *, limit=None, file=None): + """Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + """ + return base_tasks._task_print_stack(self, limit, file) + + def cancel(self, msg=None): + """Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + """ + self._log_traceback = False + if self.done(): + return False + if self._fut_waiter is not None: + if self._fut_waiter.cancel(msg=msg): + # Leave self._fut_waiter; it may be a Task that + # catches and ignores the cancellation so we may have + # to cancel it again later. + return True + # It must be the case that self.__step is already scheduled. + self._must_cancel = True + self._cancel_message = msg + return True + + def __step(self, exc=None): + if self.done(): + raise exceptions.InvalidStateError( + f'_step(): already done: {self!r}, {exc!r}') + if self._must_cancel: + if not isinstance(exc, exceptions.CancelledError): + exc = self._make_cancelled_error() + self._must_cancel = False + coro = self._coro + self._fut_waiter = None + + _enter_task(self._loop, self) + # Call either coro.throw(exc) or coro.send(None). + try: + if exc is None: + # We use the `send` method directly, because coroutines + # don't have `__iter__` and `__next__` methods. + result = coro.send(None) + else: + result = coro.throw(exc) + except StopIteration as exc: + if self._must_cancel: + # Task is cancelled right before coro stops. + self._must_cancel = False + super().cancel(msg=self._cancel_message) + else: + super().set_result(exc.value) + except exceptions.CancelledError as exc: + # Save the original exception so we can chain it later. + self._cancelled_exc = exc + super().cancel() # I.e., Future.cancel(self). + except (KeyboardInterrupt, SystemExit) as exc: + super().set_exception(exc) + raise + except BaseException as exc: + super().set_exception(exc) + else: + blocking = getattr(result, '_asyncio_future_blocking', None) + if blocking is not None: + # Yielded Future must come from Future.__iter__(). + if futures._get_loop(result) is not self._loop: + new_exc = RuntimeError( + f'Task {self!r} got Future ' + f'{result!r} attached to a different loop') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + elif blocking: + if result is self: + new_exc = RuntimeError( + f'Task cannot await on itself: {self!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + else: + result._asyncio_future_blocking = False + result.add_done_callback( + self.__wakeup, context=self._context) + self._fut_waiter = result + if self._must_cancel: + if self._fut_waiter.cancel( + msg=self._cancel_message): + self._must_cancel = False + else: + new_exc = RuntimeError( + f'yield was used instead of yield from ' + f'in task {self!r} with {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + + elif result is None: + # Bare yield relinquishes control for one event loop iteration. + self._loop.call_soon(self.__step, context=self._context) + elif inspect.isgenerator(result): + # Yielding a generator is just wrong. + new_exc = RuntimeError( + f'yield was used instead of yield from for ' + f'generator in task {self!r} with {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + else: + # Yielding something else is an error. + new_exc = RuntimeError(f'Task got bad yield: {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + finally: + _leave_task(self._loop, self) + self = None # Needed to break cycles when an exception occurs. + + def __wakeup(self, future): + try: + future.result() + except BaseException as exc: + # This may also be a cancellation. + self.__step(exc) + else: + # Don't pass the value of `future.result()` explicitly, + # as `Future.__iter__` and `Future.__await__` don't need it. + # If we call `_step(value, None)` instead of `_step()`, + # Python eval loop would use `.send(value)` method call, + # instead of `__next__()`, which is slower for futures + # that return non-generator iterators from their `__iter__`. + self.__step() + self = None # Needed to break cycles when an exception occurs. + + +_PyTask = Task + + +try: + import _asyncio +except ImportError: + pass +else: + # _CTask is needed for tests. + Task = _CTask = _asyncio.Task + + +def create_task(coro, *, name=None): + """Schedule the execution of a coroutine object in a spawn task. + + Return a Task object. + """ + loop = events.get_running_loop() + task = loop.create_task(coro) + _set_task_name(task, name) + return task + + +# wait() and as_completed() similar to those in PEP 3148. + +FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED +FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION +ALL_COMPLETED = concurrent.futures.ALL_COMPLETED + + +async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED): + """Wait for the Futures and coroutines given by fs to complete. + + The fs iterable must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = await asyncio.wait(fs) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError(f"expect a list of futures, not {type(fs).__name__}") + if not fs: + raise ValueError('Set of coroutines/Futures is empty.') + if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED): + raise ValueError(f'Invalid return_when value: {return_when}') + + loop = events.get_running_loop() + + fs = set(fs) + + if any(coroutines.iscoroutine(f) for f in fs): + warnings.warn("The explicit passing of coroutine objects to " + "asyncio.wait() is deprecated since Python 3.8, and " + "scheduled for removal in Python 3.11.", + DeprecationWarning, stacklevel=2) + + fs = {ensure_future(f, loop=loop) for f in fs} + + return await _wait(fs, timeout, return_when, loop) + + +def _release_waiter(waiter, *args): + if not waiter.done(): + waiter.set_result(None) + + +async def wait_for(fut, timeout): + """Wait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + """ + loop = events.get_running_loop() + + if timeout is None: + return await fut + + if timeout <= 0: + fut = ensure_future(fut, loop=loop) + + if fut.done(): + return fut.result() + + await _cancel_and_wait(fut, loop=loop) + try: + return fut.result() + except exceptions.CancelledError as exc: + raise exceptions.TimeoutError() from exc + + waiter = loop.create_future() + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + cb = functools.partial(_release_waiter, waiter) + + fut = ensure_future(fut, loop=loop) + fut.add_done_callback(cb) + + try: + # wait until the future completes or the timeout + try: + await waiter + except exceptions.CancelledError: + if fut.done(): + return fut.result() + else: + fut.remove_done_callback(cb) + # We must ensure that the task is not running + # after wait_for() returns. + # See https://bugs.python.org/issue32751 + await _cancel_and_wait(fut, loop=loop) + raise + + if fut.done(): + return fut.result() + else: + fut.remove_done_callback(cb) + # We must ensure that the task is not running + # after wait_for() returns. + # See https://bugs.python.org/issue32751 + await _cancel_and_wait(fut, loop=loop) + # In case task cancellation failed with some + # exception, we should re-raise it + # See https://bugs.python.org/issue40607 + try: + return fut.result() + except exceptions.CancelledError as exc: + raise exceptions.TimeoutError() from exc + finally: + timeout_handle.cancel() + + +async def _wait(fs, timeout, return_when, loop): + """Internal helper for wait(). + + The fs argument must be a collection of Futures. + """ + assert fs, 'Set of Futures is empty.' + waiter = loop.create_future() + timeout_handle = None + if timeout is not None: + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + counter = len(fs) + + def _on_completion(f): + nonlocal counter + counter -= 1 + if (counter <= 0 or + return_when == FIRST_COMPLETED or + return_when == FIRST_EXCEPTION and (not f.cancelled() and + f.exception() is not None)): + if timeout_handle is not None: + timeout_handle.cancel() + if not waiter.done(): + waiter.set_result(None) + + for f in fs: + f.add_done_callback(_on_completion) + + try: + await waiter + finally: + if timeout_handle is not None: + timeout_handle.cancel() + for f in fs: + f.remove_done_callback(_on_completion) + + done, pending = set(), set() + for f in fs: + if f.done(): + done.add(f) + else: + pending.add(f) + return done, pending + + +async def _cancel_and_wait(fut, loop): + """Cancel the *fut* future or task and wait until it completes.""" + + waiter = loop.create_future() + cb = functools.partial(_release_waiter, waiter) + fut.add_done_callback(cb) + + try: + fut.cancel() + # We cannot wait on *fut* directly to make + # sure _cancel_and_wait itself is reliably cancellable. + await waiter + finally: + fut.remove_done_callback(cb) + + +# This is *not* a @coroutine! It is just an iterator (yielding Futures). +def as_completed(fs, *, timeout=None): + """Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = await f # The 'await' may raise. + # Use result. + + If a timeout is specified, the 'await' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}") + + from .queues import Queue # Import here to avoid circular import problem. + done = Queue() + + loop = events._get_event_loop() + todo = {ensure_future(f, loop=loop) for f in set(fs)} + timeout_handle = None + + def _on_timeout(): + for f in todo: + f.remove_done_callback(_on_completion) + done.put_nowait(None) # Queue a dummy value for _wait_for_one(). + todo.clear() # Can't do todo.remove(f) in the loop. + + def _on_completion(f): + if not todo: + return # _on_timeout() was here first. + todo.remove(f) + done.put_nowait(f) + if not todo and timeout_handle is not None: + timeout_handle.cancel() + + async def _wait_for_one(): + f = await done.get() + if f is None: + # Dummy value from _on_timeout(). + raise exceptions.TimeoutError + return f.result() # May raise f.exception(). + + for f in todo: + f.add_done_callback(_on_completion) + if todo and timeout is not None: + timeout_handle = loop.call_later(timeout, _on_timeout) + for _ in range(len(todo)): + yield _wait_for_one() + + +@types.coroutine +def __sleep0(): + """Skip one event loop run cycle. + + This is a private helper for 'asyncio.sleep()', used + when the 'delay' is set to 0. It uses a bare 'yield' + expression (which Task.__step knows how to handle) + instead of creating a Future object. + """ + yield + + +async def sleep(delay, result=None): + """Coroutine that completes after a given time (in seconds).""" + if delay <= 0: + await __sleep0() + return result + + loop = events.get_running_loop() + future = loop.create_future() + h = loop.call_later(delay, + futures._set_result_unless_cancelled, + future, result) + try: + return await future + finally: + h.cancel() + + +def ensure_future(coro_or_future, *, loop=None): + """Wrap a coroutine or an awaitable in a future. + + If the argument is a Future, it is returned directly. + """ + return _ensure_future(coro_or_future, loop=loop) + + +def _ensure_future(coro_or_future, *, loop=None): + if futures.isfuture(coro_or_future): + if loop is not None and loop is not futures._get_loop(coro_or_future): + raise ValueError('The future belongs to a different loop than ' + 'the one specified as the loop argument') + return coro_or_future + called_wrap_awaitable = False + if not coroutines.iscoroutine(coro_or_future): + if inspect.isawaitable(coro_or_future): + coro_or_future = _wrap_awaitable(coro_or_future) + called_wrap_awaitable = True + else: + raise TypeError('An asyncio.Future, a coroutine or an awaitable ' + 'is required') + + if loop is None: + loop = events._get_event_loop(stacklevel=4) + try: + return loop.create_task(coro_or_future) + except RuntimeError: + if not called_wrap_awaitable: + coro_or_future.close() + raise + + +@types.coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) + +_wrap_awaitable._is_coroutine = _is_coroutine + + +class _GatheringFuture(futures.Future): + """Helper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + """ + + def __init__(self, children, *, loop): + assert loop is not None + super().__init__(loop=loop) + self._children = children + self._cancel_requested = False + + def cancel(self, msg=None): + if self.done(): + return False + ret = False + for child in self._children: + if child.cancel(msg=msg): + ret = True + if ret: + # If any child tasks were actually cancelled, we should + # propagate the cancellation request regardless of + # *return_exceptions* argument. See issue 32684. + self._cancel_requested = True + return ret + + +def gather(*coros_or_futures, return_exceptions=False): + """Return a future aggregating results from the given coroutines/futures. + + Coroutines will be wrapped in a future and scheduled in the event + loop. They will not necessarily be scheduled in the same order as + passed in. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + + If *return_exceptions* is False, cancelling gather() after it + has been marked done won't cancel any submitted awaitables. + For instance, gather can be marked done after propagating an + exception to the caller, therefore, calling ``gather.cancel()`` + after catching an exception (raised by one of the awaitables) from + gather won't cancel any other awaitables. + """ + if not coros_or_futures: + loop = events._get_event_loop() + outer = loop.create_future() + outer.set_result([]) + return outer + + def _done_callback(fut): + nonlocal nfinished + nfinished += 1 + + if outer is None or outer.done(): + if not fut.cancelled(): + # Mark exception retrieved. + fut.exception() + return + + if not return_exceptions: + if fut.cancelled(): + # Check if 'fut' is cancelled first, as + # 'fut.exception()' will *raise* a CancelledError + # instead of returning it. + exc = fut._make_cancelled_error() + outer.set_exception(exc) + return + else: + exc = fut.exception() + if exc is not None: + outer.set_exception(exc) + return + + if nfinished == nfuts: + # All futures are done; create a list of results + # and set it to the 'outer' future. + results = [] + + for fut in children: + if fut.cancelled(): + # Check if 'fut' is cancelled first, as 'fut.exception()' + # will *raise* a CancelledError instead of returning it. + # Also, since we're adding the exception return value + # to 'results' instead of raising it, don't bother + # setting __context__. This also lets us preserve + # calling '_make_cancelled_error()' at most once. + res = exceptions.CancelledError( + '' if fut._cancel_message is None else + fut._cancel_message) + else: + res = fut.exception() + if res is None: + res = fut.result() + results.append(res) + + if outer._cancel_requested: + # If gather is being cancelled we must propagate the + # cancellation regardless of *return_exceptions* argument. + # See issue 32684. + exc = fut._make_cancelled_error() + outer.set_exception(exc) + else: + outer.set_result(results) + + arg_to_fut = {} + children = [] + nfuts = 0 + nfinished = 0 + loop = None + outer = None # bpo-46672 + for arg in coros_or_futures: + if arg not in arg_to_fut: + fut = _ensure_future(arg, loop=loop) + if loop is None: + loop = futures._get_loop(fut) + if fut is not arg: + # 'arg' was not a Future, therefore, 'fut' is a new + # Future created specifically for 'arg'. Since the caller + # can't control it, disable the "destroy pending task" + # warning. + fut._log_destroy_pending = False + + nfuts += 1 + arg_to_fut[arg] = fut + fut.add_done_callback(_done_callback) + + else: + # There's a duplicate Future object in coros_or_futures. + fut = arg_to_fut[arg] + + children.append(fut) + + outer = _GatheringFuture(children, loop=loop) + return outer + + +def shield(arg): + """Wait for a future, shielding it from cancellation. + + The statement + + task = asyncio.create_task(something()) + res = await shield(task) + + is exactly equivalent to the statement + + res = await something() + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + task = asyncio.create_task(something()) + try: + res = await shield(task) + except CancelledError: + res = None + + Save a reference to tasks passed to this function, to avoid + a task disappearing mid-execution. The event loop only keeps + weak references to tasks. A task that isn't referenced elsewhere + may get garbage collected at any time, even before it's done. + """ + inner = _ensure_future(arg) + if inner.done(): + # Shortcut. + return inner + loop = futures._get_loop(inner) + outer = loop.create_future() + + def _inner_done_callback(inner): + if outer.cancelled(): + if not inner.cancelled(): + # Mark inner's result as retrieved. + inner.exception() + return + + if inner.cancelled(): + outer.cancel() + else: + exc = inner.exception() + if exc is not None: + outer.set_exception(exc) + else: + outer.set_result(inner.result()) + + + def _outer_done_callback(outer): + if not inner.done(): + inner.remove_done_callback(_inner_done_callback) + + inner.add_done_callback(_inner_done_callback) + outer.add_done_callback(_outer_done_callback) + return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise + + loop.call_soon_threadsafe(callback) + return future + + +# WeakSet containing all alive tasks. +_all_tasks = weakref.WeakSet() + +# Dictionary containing tasks that are currently active in +# all running event loops. {EventLoop: Task} +_current_tasks = {} + + +def _register_task(task): + """Register a new task in asyncio as executed by loop.""" + _all_tasks.add(task) + + +def _enter_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not None: + raise RuntimeError(f"Cannot enter into task {task!r} while another " + f"task {current_task!r} is being executed.") + _current_tasks[loop] = task + + +def _leave_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not task: + raise RuntimeError(f"Leaving task {task!r} does not match " + f"the current task {current_task!r}.") + del _current_tasks[loop] + + +def _unregister_task(task): + """Unregister a task.""" + _all_tasks.discard(task) + + +_py_register_task = _register_task +_py_unregister_task = _unregister_task +_py_enter_task = _enter_task +_py_leave_task = _leave_task + + +try: + from _asyncio import (_register_task, _unregister_task, + _enter_task, _leave_task, + _all_tasks, _current_tasks) +except ImportError: + pass +else: + _c_register_task = _register_task + _c_unregister_task = _unregister_task + _c_enter_task = _enter_task + _c_leave_task = _leave_task diff --git a/parrot/lib/python3.10/asyncio/threads.py b/parrot/lib/python3.10/asyncio/threads.py new file mode 100644 index 0000000000000000000000000000000000000000..db048a8231de166e2af4f2e06092d98def9b1703 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/threads.py @@ -0,0 +1,25 @@ +"""High-level support for working with threads in asyncio""" + +import functools +import contextvars + +from . import events + + +__all__ = "to_thread", + + +async def to_thread(func, /, *args, **kwargs): + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Return a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = events.get_running_loop() + ctx = contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) diff --git a/parrot/lib/python3.10/asyncio/windows_utils.py b/parrot/lib/python3.10/asyncio/windows_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef277fac3e291c307d5e26ada8ef5e28c47434f1 --- /dev/null +++ b/parrot/lib/python3.10/asyncio/windows_utils.py @@ -0,0 +1,173 @@ +"""Various Windows specific bits and pieces.""" + +import sys + +if sys.platform != 'win32': # pragma: no cover + raise ImportError('win32 only') + +import _winapi +import itertools +import msvcrt +import os +import subprocess +import tempfile +import warnings + + +__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle' + + +# Constants/globals + + +BUFSIZE = 8192 +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +_mmap_counter = itertools.count() + + +# Replacement for os.pipe() using handles instead of fds + + +def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): + """Like os.pipe() but with overlapped support and using handles not fds.""" + address = tempfile.mktemp( + prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format( + os.getpid(), next(_mmap_counter))) + + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = bufsize, bufsize + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, bufsize + + openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + + if overlapped[0]: + openmode |= _winapi.FILE_FLAG_OVERLAPPED + + if overlapped[1]: + flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED + else: + flags_and_attribs = 0 + + h1 = h2 = None + try: + h1 = _winapi.CreateNamedPipe( + address, openmode, _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + flags_and_attribs, _winapi.NULL) + + ov = _winapi.ConnectNamedPipe(h1, overlapped=True) + ov.GetOverlappedResult(True) + return h1, h2 + except: + if h1 is not None: + _winapi.CloseHandle(h1) + if h2 is not None: + _winapi.CloseHandle(h2) + raise + + +# Wrapper for a pipe handle + + +class PipeHandle: + """Wrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + """ + def __init__(self, handle): + self._handle = handle + + def __repr__(self): + if self._handle is not None: + handle = f'handle={self._handle!r}' + else: + handle = 'closed' + return f'<{self.__class__.__name__} {handle}>' + + @property + def handle(self): + return self._handle + + def fileno(self): + if self._handle is None: + raise ValueError("I/O operation on closed pipe") + return self._handle + + def close(self, *, CloseHandle=_winapi.CloseHandle): + if self._handle is not None: + CloseHandle(self._handle) + self._handle = None + + def __del__(self, _warn=warnings.warn): + if self._handle is not None: + _warn(f"unclosed {self!r}", ResourceWarning, source=self) + self.close() + + def __enter__(self): + return self + + def __exit__(self, t, v, tb): + self.close() + + +# Replacement for subprocess.Popen using overlapped pipe handles + + +class Popen(subprocess.Popen): + """Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + """ + def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): + assert not kwds.get('universal_newlines') + assert kwds.get('bufsize', 0) == 0 + stdin_rfd = stdout_wfd = stderr_wfd = None + stdin_wh = stdout_rh = stderr_rh = None + if stdin == PIPE: + stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) + stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) + else: + stdin_rfd = stdin + if stdout == PIPE: + stdout_rh, stdout_wh = pipe(overlapped=(True, False)) + stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) + else: + stdout_wfd = stdout + if stderr == PIPE: + stderr_rh, stderr_wh = pipe(overlapped=(True, False)) + stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) + elif stderr == STDOUT: + stderr_wfd = stdout_wfd + else: + stderr_wfd = stderr + try: + super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, + stderr=stderr_wfd, **kwds) + except: + for h in (stdin_wh, stdout_rh, stderr_rh): + if h is not None: + _winapi.CloseHandle(h) + raise + else: + if stdin_wh is not None: + self.stdin = PipeHandle(stdin_wh) + if stdout_rh is not None: + self.stdout = PipeHandle(stdout_rh) + if stderr_rh is not None: + self.stderr = PipeHandle(stderr_rh) + finally: + if stdin == PIPE: + os.close(stdin_rfd) + if stdout == PIPE: + os.close(stdout_wfd) + if stderr == PIPE: + os.close(stderr_wfd) diff --git a/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/LICENSE b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..035adf953c2632a4fa9429f0afd4df1c80788b19 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/LICENSE @@ -0,0 +1,13 @@ +Copyright 2017 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/RECORD b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ad3afb0dc202671d4e974f2549cebd5f9b7f320f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/RECORD @@ -0,0 +1,99 @@ +fire-0.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +fire-0.6.0.dist-info/LICENSE,sha256=pd53tiJmvKC7l78FiZLwsPMIqDqMpV7hD79r2O2PftA,573 +fire-0.6.0.dist-info/METADATA,sha256=qD8PSB7hZN-8d361I8FkymUFY5IDjvt-LLEqNEbEC40,1885 +fire-0.6.0.dist-info/RECORD,, +fire-0.6.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fire-0.6.0.dist-info/WHEEL,sha256=9Hm2OB-j1QcCUq9Jguht7ayGIIZBRTdOXD1qg9cCgPM,109 +fire-0.6.0.dist-info/top_level.txt,sha256=y463xaVSW7PORTz4SuTbtFVLuW7MAT2-3cbSk1E8wR8,5 +fire/__init__.py,sha256=wr1oug-Y_X9v5pqcJWlyuT4p5op33Bw2Mtg5RU2vFtM,786 +fire/__main__.py,sha256=CCzw1dZ8CwD6zEurTzql4gRcJv5hzfFBnARintWna6I,4491 +fire/__pycache__/__init__.cpython-310.pyc,, +fire/__pycache__/__main__.cpython-310.pyc,, +fire/__pycache__/completion.cpython-310.pyc,, +fire/__pycache__/completion_test.cpython-310.pyc,, +fire/__pycache__/core.cpython-310.pyc,, +fire/__pycache__/core_test.cpython-310.pyc,, +fire/__pycache__/custom_descriptions.cpython-310.pyc,, +fire/__pycache__/custom_descriptions_test.cpython-310.pyc,, +fire/__pycache__/decorators.cpython-310.pyc,, +fire/__pycache__/decorators_test.cpython-310.pyc,, +fire/__pycache__/docstrings.cpython-310.pyc,, +fire/__pycache__/docstrings_fuzz_test.cpython-310.pyc,, +fire/__pycache__/docstrings_test.cpython-310.pyc,, +fire/__pycache__/fire_import_test.cpython-310.pyc,, +fire/__pycache__/fire_test.cpython-310.pyc,, +fire/__pycache__/formatting.cpython-310.pyc,, +fire/__pycache__/formatting_test.cpython-310.pyc,, +fire/__pycache__/formatting_windows.cpython-310.pyc,, +fire/__pycache__/helptext.cpython-310.pyc,, +fire/__pycache__/helptext_test.cpython-310.pyc,, +fire/__pycache__/inspectutils.cpython-310.pyc,, +fire/__pycache__/inspectutils_test.cpython-310.pyc,, +fire/__pycache__/interact.cpython-310.pyc,, +fire/__pycache__/interact_test.cpython-310.pyc,, +fire/__pycache__/main_test.cpython-310.pyc,, +fire/__pycache__/parser.cpython-310.pyc,, +fire/__pycache__/parser_fuzz_test.cpython-310.pyc,, +fire/__pycache__/parser_test.cpython-310.pyc,, +fire/__pycache__/test_components.cpython-310.pyc,, +fire/__pycache__/test_components_bin.cpython-310.pyc,, +fire/__pycache__/test_components_py3.cpython-310.pyc,, +fire/__pycache__/test_components_test.cpython-310.pyc,, +fire/__pycache__/testutils.cpython-310.pyc,, +fire/__pycache__/testutils_test.cpython-310.pyc,, +fire/__pycache__/trace.cpython-310.pyc,, +fire/__pycache__/trace_test.cpython-310.pyc,, +fire/__pycache__/value_types.cpython-310.pyc,, +fire/completion.py,sha256=i6PXwLwhyli9J2OFK0E-RS2IPSIvEN6gPAPDtztQbcQ,16411 +fire/completion_test.py,sha256=_wj8dJSxBFcYhOZJafgCeaDJJFPWhw3VeFYBSnWGJbI,6437 +fire/console/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fire/console/__pycache__/__init__.cpython-310.pyc,, +fire/console/__pycache__/console_attr.cpython-310.pyc,, +fire/console/__pycache__/console_attr_os.cpython-310.pyc,, +fire/console/__pycache__/console_io.cpython-310.pyc,, +fire/console/__pycache__/console_pager.cpython-310.pyc,, +fire/console/__pycache__/encoding.cpython-310.pyc,, +fire/console/__pycache__/files.cpython-310.pyc,, +fire/console/__pycache__/platforms.cpython-310.pyc,, +fire/console/__pycache__/text.cpython-310.pyc,, +fire/console/console_attr.py,sha256=ElNEyM5AImJyUTzsasXwvn6Xvo7pRSWnQQH6rMus3Gg,23668 +fire/console/console_attr_os.py,sha256=r4BC7-8ifbu26RS_VkT0gAT3yQQnfxetaSSReKl9kEg,7565 +fire/console/console_io.py,sha256=Jsd78YWVxGfjjRosscYj7NK4rhw3aCcyO_AWpUvdKiY,4259 +fire/console/console_pager.py,sha256=G-rD2lgOB2_ZvxDCxFES5-dhRTkfjVCFy9ytqG1PVMc,9563 +fire/console/encoding.py,sha256=TWQUFdvmXUdox3lOml14nG5jK5nUv0qJ2jLHzpULgJU,7008 +fire/console/files.py,sha256=csLhCj6lf5WbpT7gk1FRH6LbgGI5ALZJX9uY1DDy62U,4081 +fire/console/platforms.py,sha256=DcWAZicS3XGnUp0SIOQcDPWgcHi8rWhNcymg8d3WMMs,16420 +fire/console/text.py,sha256=DxqVltezneaw_Qnsg2CSzd5AUSjoltCaNo4k3gNwuPs,2776 +fire/core.py,sha256=81aRyUwfBn9L3_gqOmW-N92RRthifQRXaB55lZmNOnI,37051 +fire/core_test.py,sha256=iK9zldIaHVnQiApIkNqeAHeDDEnc-EVcFJ3wNAlUGwQ,9902 +fire/custom_descriptions.py,sha256=dljGNnDgdwh8xqfJDImwo0LJcul9oRLXMIeXu1VTyqs,5481 +fire/custom_descriptions_test.py,sha256=w326Msp44tzGcsy_Dj8VQDPp2t6XriYqwan1K6xi8lk,2764 +fire/decorators.py,sha256=MTtOCixcOGpRnaLKJFXK-VVxyhUyWWvNavRhoDKswEE,3689 +fire/decorators_test.py,sha256=ySslVNlkcKvUq8L_4eBxjEDFjxW1zS0nQdKrtgtdWL4,5740 +fire/docstrings.py,sha256=hUte73TIFh0GOMObY7OIlt_gJRrs2kKKSj7fK9t5PI4,25168 +fire/docstrings_fuzz_test.py,sha256=vj0uA4IIwxI2CZcNDhISJ5JFpONVgyG9oCkrjp-eXvk,1210 +fire/docstrings_test.py,sha256=hFtiLsfD0kpA-uxj4n2hGftYRh9CfRc3jsa7xPUhLJA,12394 +fire/fire_import_test.py,sha256=LiRF4SrUl8_6Vzab5RpNsreWXQGOVdQl44g8h5GetEg,1093 +fire/fire_test.py,sha256=Uw0UmKKARoX_X1amAzvC3dRCwpnUqXbhkNWlc9Xiwpo,29595 +fire/formatting.py,sha256=qzSoLeFQI_LtCsPoWYxMSnh_RG2TF6aT9fmxNbf-Hgw,2826 +fire/formatting_test.py,sha256=fpL8nUzhxEkRQJIMF6Yi5fc3Fbk926niqZyitDwVGow,2757 +fire/formatting_windows.py,sha256=0nLNgkc2snyBTmmudyjkkgFRxHJn2E4iGqcL-HKnzV0,2189 +fire/helptext.py,sha256=R4nEbQbaLReTRzqlh4gChjZu4Ipe3qDTYs1ixAJgoDA,28076 +fire/helptext_test.py,sha256=hCbeVa9Q8wX_bT0NchOvkNH-qAqeWkbWEFSBFShiwa4,23858 +fire/inspectutils.py,sha256=WTMMxCECIKJ_ciUAEuAsyJ6_doEos7_o-5xaKw1VE6U,12584 +fire/inspectutils_test.py,sha256=Pc22wzv1f6P6Hf72hG9zMjbbAMYnm0hbfr6MzSrfPh0,5285 +fire/interact.py,sha256=AQh2X42gLUe1Ga5H3PMtvX_ijcXwMo4CPDytSPRS48g,3157 +fire/interact_test.py,sha256=sOgM_7X54U7cQyeMeY_mMOl7xjKD1EB3E98SBnWxqIo,1532 +fire/main_test.py,sha256=65FWWxljKVe8H9kT4XFI0NQFJDQFI2aPIhyE0nqZSVA,3377 +fire/parser.py,sha256=FsgS-cN2EIvQztG59p8JfTVmMp1nehtiLdNKKH1fyF4,4654 +fire/parser_fuzz_test.py,sha256=94qG2l3XSIvY-qeDzaJ5Tph1DJRIcIhut0CHxfjKesI,3196 +fire/parser_test.py,sha256=D81TbWgqNRQH5osrpbMdVrpGYiUQhYNyn5Quns0Tndo,6489 +fire/test_components.py,sha256=o5aaOmZtJIjZ3QlIY8JAZW71kD8J5Sc9ygO6FEmbWjc,12123 +fire/test_components_bin.py,sha256=QV1utpGK7bXjjTLavqXM6qW8iaz_EwlMuu6vSAgwsuE,917 +fire/test_components_py3.py,sha256=KTAzulmX4h8YEkY1Mzjy4tY54RtKtX6eYttVIgr6EFc,2434 +fire/test_components_test.py,sha256=8DUlUmfOUl6u1Yv_G5YNzABRyX-oheE_0v_Hfz5hF40,1340 +fire/testutils.py,sha256=gcWpXVbnesdgtc9vPFnNtbMHels3EmhmaEt7zX4yr6E,3906 +fire/testutils_test.py,sha256=Jt4NiIg-MQuO1iFcUHT52HD0cD-ZAc4EwcG7A9p84Fo,1890 +fire/trace.py,sha256=Wb3gn1OyWthu-V7voblKlWRaf6dzQNmonrXlHODPgd4,10593 +fire/trace_test.py,sha256=su0iGoB_IRUCsqTXWEqnWAuonzS_ah8p0ze3A-yq9Oc,5222 +fire/value_types.py,sha256=-c9rG7uoBTCYSwYLyQ0cXDIDNjVt0FtKkNKHlMXnlec,2756 diff --git a/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/REQUESTED b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..eaea6f3b57e8165ab7d2673900a9939348ee5e42 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.8.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8e1e746d827f862e6af5d9709bd79afa2790294 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/fire-0.6.0.dist-info/top_level.txt @@ -0,0 +1 @@ +fire diff --git a/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/METADATA b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..f1797d2936cf2617628cef303b44e544e7a05377 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/METADATA @@ -0,0 +1,56 @@ +Metadata-Version: 2.1 +Name: jsonschema-specifications +Version: 2023.12.1 +Summary: The JSON Schema meta-schemas and vocabularies, exposed as a Registry +Project-URL: Documentation, https://jsonschema-specifications.readthedocs.io/ +Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema-specifications +Project-URL: Issues, https://github.com/python-jsonschema/jsonschema-specifications/issues/ +Project-URL: Funding, https://github.com/sponsors/Julian +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema-specifications?utm_source=pypi-jsonschema-specifications&utm_medium=referral&utm_campaign=pypi-link +Project-URL: Source, https://github.com/python-jsonschema/jsonschema-specifications +Author: Julian Berman +Author-email: Julian+jsonschema-specifications@GrayVines.com +License: MIT +License-File: COPYING +Keywords: data validation,json,json schema,jsonschema,validation +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: File Formats :: JSON :: JSON Schema +Requires-Python: >=3.8 +Requires-Dist: importlib-resources>=1.4.0; python_version < '3.9' +Requires-Dist: referencing>=0.31.0 +Description-Content-Type: text/x-rst + +============================= +``jsonschema-specifications`` +============================= + +|PyPI| |Pythons| |CI| |ReadTheDocs| + +JSON support files from the `JSON Schema Specifications `_ (metaschemas, vocabularies, etc.), packaged for runtime access from Python as a `referencing-based Schema Registry `_. + +.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema-specifications.svg + :alt: PyPI version + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema-specifications.svg + :alt: Supported Python versions + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |CI| image:: https://github.com/python-jsonschema/jsonschema-specifications/workflows/CI/badge.svg + :alt: Build status + :target: https://github.com/python-jsonschema/jsonschema-specifications/actions?query=workflow%3ACI + +.. |ReadTheDocs| image:: https://readthedocs.org/projects/jsonschema-specifications/badge/?version=stable&style=flat + :alt: ReadTheDocs status + :target: https://jsonschema-specifications.readthedocs.io/en/stable/ diff --git a/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/RECORD b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..6e2609d9fad7a4c264d2534401077c58abcce835 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/RECORD @@ -0,0 +1,34 @@ +jsonschema_specifications-2023.12.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonschema_specifications-2023.12.1.dist-info/METADATA,sha256=fnTxmkgaHmLZ2WGJ1erA8FSWtQmx1wB3tpnalxBQ2C0,2977 +jsonschema_specifications-2023.12.1.dist-info/RECORD,, +jsonschema_specifications-2023.12.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema_specifications-2023.12.1.dist-info/WHEEL,sha256=mRYSEL3Ih6g5a_CVMIcwiF__0Ae4_gLYh01YFNwiq1k,87 +jsonschema_specifications-2023.12.1.dist-info/licenses/COPYING,sha256=QtzWNJX4e063x3V6-jebtVpT-Ur9el9lfZrfVyNuUVw,1057 +jsonschema_specifications/__init__.py,sha256=XmYAMRnPbRv_oidAvNZyoOttwYAvBdqr77ouDD32ZXE,385 +jsonschema_specifications/__pycache__/__init__.cpython-310.pyc,, +jsonschema_specifications/__pycache__/_core.cpython-310.pyc,, +jsonschema_specifications/_core.py,sha256=tFhc1CMleJ3AJOK_bjxOpFQTdrsUClFGfFxPBU_CebM,1140 +jsonschema_specifications/schemas/draft201909/metaschema.json,sha256=e3YbPhIfCgyh6ioLjizIVrz4AWBLgmjXG6yqICvAwTs,1785 +jsonschema_specifications/schemas/draft201909/vocabularies/applicator,sha256=aJUQDplyb7sQcFhRK77D7P1LJOj9L6zuPlBe5ysNTDE,1860 +jsonschema_specifications/schemas/draft201909/vocabularies/content,sha256=m31PVaTi_bAsQwBo_f-rxzKt3OI42j8d8mkCScM1MnQ,517 +jsonschema_specifications/schemas/draft201909/vocabularies/core,sha256=taLElX9kldClCB8ECevooU5BOayyA_x0hHH47eKvWyw,1531 +jsonschema_specifications/schemas/draft201909/vocabularies/meta-data,sha256=1H4kRd1qgicaKY2DzGxsuNSuHhXg3Fa-zTehY-zwEoY,892 +jsonschema_specifications/schemas/draft201909/vocabularies/validation,sha256=HlJsHTNac0gF_ILPV5jBK5YK19olF8Zs2lobCTWcPBw,2834 +jsonschema_specifications/schemas/draft202012/metaschema.json,sha256=Qdp29a-3zgYtJI92JGOpL3ykfk4PkFsiS6av7vkd7Q8,2452 +jsonschema_specifications/schemas/draft202012/vocabularies/applicator,sha256=xKbkFHuR_vf-ptwFjLG_k0AvdBS3ZXiosWqvHa1qrO8,1659 +jsonschema_specifications/schemas/draft202012/vocabularies/content,sha256=CDQ3R3ZOSlgUJieTz01lIFenkThjxZUNQyl-jh_axbY,519 +jsonschema_specifications/schemas/draft202012/vocabularies/core,sha256=wtEqjk3RHTNt_IOj9mOqTGnwtJs76wlP_rJbUxb0gD0,1564 +jsonschema_specifications/schemas/draft202012/vocabularies/format,sha256=UOu_55BhGoSbjMQAoJwdDg-2q1wNQ6DyIgH9NiUFa_Q,403 +jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation,sha256=q8d1rf79idIjWBcNm_k_Tr0jSVY7u-3WDwK-98gSvMA,448 +jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion,sha256=xSJCuaG7eGsmw-gset1CjDH5yW5XXc6Z5W6l_qptogw,445 +jsonschema_specifications/schemas/draft202012/vocabularies/meta-data,sha256=j3bW4U9Bubku-TO3CM3FFEyLUmhlGtEZGEhfsXVPHHY,892 +jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated,sha256=Lb-8tzmUtnCwl2SSre4f_7RsIWgnhNL1pMpWH54tDLQ,506 +jsonschema_specifications/schemas/draft202012/vocabularies/validation,sha256=cBCjHlQfMtK-ch4t40jfdcmzaHaj7TBId_wKvaHTelg,2834 +jsonschema_specifications/schemas/draft3/metaschema.json,sha256=LPdfZENvtb43Si6qJ6uLfh_WUcm0ba6mxnsC_WTiRYs,2600 +jsonschema_specifications/schemas/draft4/metaschema.json,sha256=4UidC0dV8CeTMCWR0_y48Htok6gqlPJIlfjk7fEbguI,4357 +jsonschema_specifications/schemas/draft6/metaschema.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437 +jsonschema_specifications/schemas/draft7/metaschema.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819 +jsonschema_specifications/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc,, +jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc,, +jsonschema_specifications/tests/test_jsonschema_specifications.py,sha256=WkbYRW6A6FoZ0rivShfqVLSCsAiHJ2x8TxqECJTXPTY,1106 diff --git a/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/REQUESTED b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..2860816abecb3c5ad7da6d018a4334b87e6b6cfc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.21.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/licenses/COPYING b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/licenses/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..a9f853e43069b8e3f8a156a4af2b1198a004230d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/jsonschema_specifications-2023.12.1.dist-info/licenses/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2022 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0f46bc0edd00d1950d98fec0f78e4366691391d5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE @@ -0,0 +1,38 @@ +Copyright (c) 2008-2016 California Institute of Technology. +Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +All rights reserved. + +This software forks the python package "multiprocessing". Licence and +copyright information for multiprocessing can be found in "COPYING". + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1e8d30dd497e6ac867b1f7f11f79ac2704525be0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA @@ -0,0 +1,203 @@ +Metadata-Version: 2.1 +Name: multiprocess +Version: 0.70.16 +Summary: better multiprocessing and multithreading in Python +Home-page: https://github.com/uqfoundation/multiprocess +Download-URL: https://pypi.org/project/multiprocess/#files +Author: Mike McKerns +Author-email: mmckerns@uqfoundation.org +Maintainer: Mike McKerns +Maintainer-email: mmckerns@uqfoundation.org +License: BSD-3-Clause +Project-URL: Documentation, http://multiprocess.rtfd.io +Project-URL: Source Code, https://github.com/uqfoundation/multiprocess +Project-URL: Bug Tracker, https://github.com/uqfoundation/multiprocess/issues +Platform: Linux +Platform: Windows +Platform: Mac +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +Requires-Python: >=3.8 +License-File: LICENSE +License-File: COPYING +Requires-Dist: dill (>=0.3.8) + +----------------------------------------------------------------- +multiprocess: better multiprocessing and multithreading in Python +----------------------------------------------------------------- + +About Multiprocess +================== + +``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6. + +``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing. +``multiprocess`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``multiprocess`` enables: + + - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues + - objects to be shared between processes using a server process or (for simple data) shared memory + +``multiprocess`` provides: + + - equivalents of all the synchronization primitives in ``threading`` + - a ``Pool`` class to facilitate submitting tasks to worker processes + - enhanced serialization, using ``dill`` + + +Current Release +=============== + +The latest released version of ``multiprocess`` is available from: + + https://pypi.org/project/multiprocess + +``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``multiprocess`` can be installed with ``pip``:: + + $ pip install multiprocess + +For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler. + + +Requirements +============ + +``multiprocess`` requires: + + - ``python`` (or ``pypy``), **>=3.8** + - ``setuptools``, **>=42** + - ``dill``, **>=0.3.8** + + +Basic Usage +=========== + +The ``multiprocess.Process`` class follows the API of ``threading.Thread``. +For example :: + + from multiprocess import Process, Queue + + def f(q): + q.put('hello world') + + if __name__ == '__main__': + q = Queue() + p = Process(target=f, args=[q]) + p.start() + print (q.get()) + p.join() + +Synchronization primitives like locks, semaphores and conditions are +available, for example :: + + >>> from multiprocess import Condition + >>> c = Condition() + >>> print (c) + ), 0> + >>> c.acquire() + True + >>> print (c) + ), 0> + +One can also use a manager to create shared objects either in shared +memory or in a server process, for example :: + + >>> from multiprocess import Manager + >>> manager = Manager() + >>> l = manager.list(range(10)) + >>> l.reverse() + >>> print (l) + [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + >>> print (repr(l)) + + +Tasks can be offloaded to a pool of worker processes in various ways, +for example :: + + >>> from multiprocess import Pool + >>> def f(x): return x*x + ... + >>> p = Pool(4) + >>> result = p.map_async(f, range(10)) + >>> print (result.get(timeout=1)) + [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] + +When ``dill`` is installed, serialization is extended to most objects, +for example :: + + >>> from multiprocess import Pool + >>> p = Pool(4) + >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10))) + [0, 2, 6, 12, 20, 30, 42, 56, 72, 90] + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that +demonstrate how ``multiprocess`` can be used to leverge multiple processes +to execute Python in parallel. You can run the test suite with +``python -m multiprocess.tests``. As ``multiprocess`` conforms to the +``multiprocessing`` interface, the examples and documentation found at +http://docs.python.org/library/multiprocessing.html also apply to +``multiprocess`` if one will ``import multiprocessing as multiprocess``. +See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples +for a set of examples that demonstrate some basic use cases and benchmarking +for running Python code in parallel. Please feel free to submit a ticket on +github, or ask a question on stackoverflow (**@Mike McKerns**). If you would +like to share how you use ``multiprocess`` in your work, please send an email +(to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``multiprocess`` to do research that leads to publication, we ask that you +acknowledge use of ``multiprocess`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. diff --git a/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..1813bd9b3282c7440d0a8510c7b408a91edab1a5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD @@ -0,0 +1,74 @@ +_multiprocess/__init__.py,sha256=zX5_h36TGSL0brHRtBvCL5E59ccW7yjL79i-Y399ODM,321 +_multiprocess/__pycache__/__init__.cpython-310.pyc,, +multiprocess-0.70.16.dist-info/COPYING,sha256=n3_yfLkw0sMgLuB-PS1hRvTeZ20GmjPaMWbJjNuoOpU,1493 +multiprocess-0.70.16.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +multiprocess-0.70.16.dist-info/LICENSE,sha256=6XUJedJKg2dhI98BD3PMtVtZvRFT-oGczkOr5B4tEEA,1930 +multiprocess-0.70.16.dist-info/METADATA,sha256=Sv2eH2CjjyjVYaryTKqHkbJTlxlVA-SbmziCgkBJeQ0,7151 +multiprocess-0.70.16.dist-info/RECORD,, +multiprocess-0.70.16.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +multiprocess-0.70.16.dist-info/WHEEL,sha256=KxatxaZA14OswIJTdImHhiM2tdZgU-xLZEzs-sYveVc,94 +multiprocess-0.70.16.dist-info/top_level.txt,sha256=qtJc8GNdvi6suNpISX0Myln9AXJBYrNuas1MCqRPPqg,27 +multiprocess/__info__.py,sha256=84TUBn1oJMNpbVvXKs0lKyfLYaZvRr-ZVh1zHM9VeCY,7997 +multiprocess/__init__.py,sha256=XWUBDGorUkDW04h64xe51pUV9N5gzvSDj3tNT2ekifw,1856 +multiprocess/__pycache__/__info__.cpython-310.pyc,, +multiprocess/__pycache__/__init__.cpython-310.pyc,, +multiprocess/__pycache__/connection.cpython-310.pyc,, +multiprocess/__pycache__/context.cpython-310.pyc,, +multiprocess/__pycache__/forkserver.cpython-310.pyc,, +multiprocess/__pycache__/heap.cpython-310.pyc,, +multiprocess/__pycache__/managers.cpython-310.pyc,, +multiprocess/__pycache__/pool.cpython-310.pyc,, +multiprocess/__pycache__/popen_fork.cpython-310.pyc,, +multiprocess/__pycache__/popen_forkserver.cpython-310.pyc,, +multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc,, +multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc,, +multiprocess/__pycache__/process.cpython-310.pyc,, +multiprocess/__pycache__/queues.cpython-310.pyc,, +multiprocess/__pycache__/reduction.cpython-310.pyc,, +multiprocess/__pycache__/resource_sharer.cpython-310.pyc,, +multiprocess/__pycache__/resource_tracker.cpython-310.pyc,, +multiprocess/__pycache__/shared_memory.cpython-310.pyc,, +multiprocess/__pycache__/sharedctypes.cpython-310.pyc,, +multiprocess/__pycache__/spawn.cpython-310.pyc,, +multiprocess/__pycache__/synchronize.cpython-310.pyc,, +multiprocess/__pycache__/util.cpython-310.pyc,, +multiprocess/connection.py,sha256=TO9BbLVlLVjTjr0fP7lIumBgiLwaFVnpqMBgFG6iL9s,31843 +multiprocess/context.py,sha256=2fYvgfnu3B8wj8UyNndHUHgeuVDoVxlkFFKryycstaU,11610 +multiprocess/dummy/__init__.py,sha256=kSekDqD_NCy0FDg7XnxZSgW-Ldg1_iRr07sNwDajKpA,3061 +multiprocess/dummy/__pycache__/__init__.cpython-310.pyc,, +multiprocess/dummy/__pycache__/connection.cpython-310.pyc,, +multiprocess/dummy/connection.py,sha256=1j3Rl5_enBM-_kMO6HDmum3kPAoFE4Zs485HV5H-V6s,1598 +multiprocess/forkserver.py,sha256=hiltKfLImDYJyAcezNAgMDaQznB2LtYWgwre0QroLRg,12138 +multiprocess/heap.py,sha256=9rt5u5m5rkhJNfDWiCLpYDoWIt0LbElmx52yMqk7phQ,11626 +multiprocess/managers.py,sha256=Y5m_aCdLE4mSCuyVrwMWg5Nh9f4OdSHDlSajyOgyGao,47562 +multiprocess/pool.py,sha256=FTmtfoqkuN8Dd48f5TgdkokoxYN75xcnR78Hw-bLSng,32759 +multiprocess/popen_fork.py,sha256=Nvq5vVId24UfkOQxXhxZbcXuo8d6YMc409yRXAamTd0,2374 +multiprocess/popen_forkserver.py,sha256=SrEbV8Wv0Uu_UegkaW-cayXRdjTGXr560Yyy90pj-yE,2227 +multiprocess/popen_spawn_posix.py,sha256=l7XSWqR5UWiUSJh35qeSElLuNfUeEYwvH5HzKRnnyqg,2029 +multiprocess/popen_spawn_win32.py,sha256=A9uvlPmhO8JBzNcEU_Gmix2Q_qYJW1NXZgXPwtN5Ao0,4011 +multiprocess/process.py,sha256=GIIo2NiBsX1r_m0J1TcnbdeSulGLWHElRCuYRkkdgQ4,12083 +multiprocess/queues.py,sha256=sgXCXnIOVrPScqI3lwRD9t3IshqIBMEksLtouPH9Nzc,12139 +multiprocess/reduction.py,sha256=NQQ6KbDhmuAyaDeWaIarTZQokGPhcFda1poNnPm5uNc,9637 +multiprocess/resource_sharer.py,sha256=nEApLhMQqd8KunfaNKl3n8vdeiCGPxKrSL1Ja0nNAEk,5132 +multiprocess/resource_tracker.py,sha256=_D2iX4IWRe3dOwLoLjfCnXNbDAM4pRzA8qEMTcRfutw,9056 +multiprocess/shared_memory.py,sha256=UTAecHECIOHElP9Tg6yURCo4pKZiLy65TkASjEXeGus,18458 +multiprocess/sharedctypes.py,sha256=d-9SKRJHRlJJC331IxEoWOUXIeY9zxCbhWejXOmzGw0,6306 +multiprocess/spawn.py,sha256=cgtV66HhV_yIVzvdblc8bVdSpem16Ks0BOFu_bV5PDQ,9293 +multiprocess/synchronize.py,sha256=6q1ijwWyWLWLO8uUtaYT9MKepAYKfdzWPSEZGyJFP4s,11829 +multiprocess/tests/__init__.py,sha256=k00IjwhAUV_O1bp81895vN1gLnFzBM3iM-QTn5VrQnU,199087 +multiprocess/tests/__main__.py,sha256=RauIRQrO0HwRq_clLqbBk4gwo5Xw3-ASLuC029XaHeA,912 +multiprocess/tests/__pycache__/__init__.cpython-310.pyc,, +multiprocess/tests/__pycache__/__main__.cpython-310.pyc,, +multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc,, +multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc,, +multiprocess/tests/mp_fork_bomb.py,sha256=6ADOEzh1aXHZ21aOGoBPhKcgB5sj15G9tQVgSc6GrlY,448 +multiprocess/tests/mp_preload.py,sha256=1-WvLFMaPoH-vZbpUaJvvZHFxTpA9tgmct2vblQy99M,365 +multiprocess/tests/test_multiprocessing_fork.py,sha256=ue1SQLJFxm1oc_3F2gR_WRtt39jhaj0l_Ht6Y1MBmFo,476 +multiprocess/tests/test_multiprocessing_forkserver.py,sha256=VFlUuZI60gyRbNxfHWDlgmy3zm-dPTldLWuKQZ8KObs,391 +multiprocess/tests/test_multiprocessing_main_handling.py,sha256=mtmN0K-spqZCcZVNLf_HrhP186-knpY6eaoFonL1U4U,12018 +multiprocess/tests/test_multiprocessing_spawn.py,sha256=2UAisJX58GZCbYuDFay_x97R9akhzzjIA4VuUUzITOY,276 +multiprocess/util.py,sha256=OPI3CZ34BNwwwa7AqW-eGhnuSUsu-ozy2NRU8BYKuwg,14012 diff --git a/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/REQUESTED b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..6d6b3dada5abf9570af11d88980a1299e01af0ad --- /dev/null +++ b/parrot/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py310-none-any + diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h new file mode 100644 index 0000000000000000000000000000000000000000..58d46aacd473147ec42ab03d90ddec77d3e7245d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h @@ -0,0 +1,321 @@ +#pragma once + +#include +#include +#include +#include + +// Forward declare TI +namespace at { +class Tensor; +struct TensorIterator; + +namespace native { +enum class TransposeType; +} + +} + +namespace at::native { + +enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss}; + +#if AT_BUILD_WITH_LAPACK() +// Define per-batch functions to be used in the implementation of batched +// linear algebra operations + +template +void lapackCholesky(char uplo, int n, scalar_t *a, int lda, int *info); + +template +void lapackCholeskyInverse(char uplo, int n, scalar_t *a, int lda, int *info); + +template +void lapackEig(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *w, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, value_t *rwork, int *info); + +template +void lapackGeqrf(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); + +template +void lapackOrgqr(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); + +template +void lapackOrmqr(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info); + +template +void lapackSyevd(char jobz, char uplo, int n, scalar_t* a, int lda, value_t* w, scalar_t* work, int lwork, value_t* rwork, int lrwork, int* iwork, int liwork, int* info); + +template +void lapackGels(char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info); + +template +void lapackGelsd(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + value_t *s, value_t rcond, int *rank, + scalar_t* work, int lwork, + value_t *rwork, int* iwork, int *info); + +template +void lapackGelsy(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + int *jpvt, value_t rcond, int *rank, + scalar_t *work, int lwork, value_t* rwork, int *info); + +template +void lapackGelss(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + value_t *s, value_t rcond, int *rank, + scalar_t *work, int lwork, + value_t *rwork, int *info); + +template +struct lapackLstsq_impl; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGels( + trans, m, n, nrhs, + a, lda, b, ldb, + work, lwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelsy( + m, n, nrhs, + a, lda, b, ldb, + jpvt, rcond, rank, + work, lwork, rwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelsd( + m, n, nrhs, + a, lda, b, ldb, + s, rcond, rank, + work, lwork, + rwork, iwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelss( + m, n, nrhs, + a, lda, b, ldb, + s, rcond, rank, + work, lwork, + rwork, info); + } +}; + +template +void lapackLstsq( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackLstsq_impl::call( + trans, m, n, nrhs, + a, lda, b, ldb, + work, lwork, info, + jpvt, rcond, rank, rwork, + s, + iwork); +} + +template +void lapackLuSolve(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info); + +template +void lapackLu(int m, int n, scalar_t *a, int lda, int *ipiv, int *info); + +template +void lapackLdlHermitian( + char uplo, + int n, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* work, + int lwork, + int* info); + +template +void lapackLdlSymmetric( + char uplo, + int n, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* work, + int lwork, + int* info); + +template +void lapackLdlSolveHermitian( + char uplo, + int n, + int nrhs, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* b, + int ldb, + int* info); + +template +void lapackLdlSolveSymmetric( + char uplo, + int n, + int nrhs, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* b, + int ldb, + int* info); + +template +void lapackSvd(char jobz, int m, int n, scalar_t *a, int lda, value_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, value_t *rwork, int *iwork, int *info); +#endif + +#if AT_BUILD_WITH_BLAS() +template +void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int nrhs, scalar_t* a, int lda, scalar_t* b, int ldb); +#endif + +using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/); +DECLARE_DISPATCH(cholesky_fn, cholesky_stub); + +using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/); + +DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub); + +using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/); + +DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub); + +using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/); +DECLARE_DISPATCH(geqrf_fn, geqrf_stub); + +using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/); +DECLARE_DISPATCH(orgqr_fn, orgqr_stub); + +using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/); +DECLARE_DISPATCH(ormqr_fn, ormqr_stub); + +using linalg_eigh_fn = void (*)( + const Tensor& /*eigenvalues*/, + const Tensor& /*eigenvectors*/, + const Tensor& /*infos*/, + bool /*upper*/, + bool /*compute_eigenvectors*/); +DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub); + +using lstsq_fn = void (*)( + const Tensor& /*a*/, + Tensor& /*b*/, + Tensor& /*rank*/, + Tensor& /*singular_values*/, + Tensor& /*infos*/, + double /*rcond*/, + std::string /*driver_name*/); +DECLARE_DISPATCH(lstsq_fn, lstsq_stub); + +using triangular_solve_fn = void (*)( + const Tensor& /*A*/, + const Tensor& /*B*/, + bool /*left*/, + bool /*upper*/, + TransposeType /*transpose*/, + bool /*unitriangular*/); +DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub); + +using lu_factor_fn = void (*)( + const Tensor& /*input*/, + const Tensor& /*pivots*/, + const Tensor& /*infos*/, + bool /*compute_pivots*/); +DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub); + +using unpack_pivots_fn = void(*)( + TensorIterator& iter, + const int64_t dim_size, + const int64_t max_pivot); +DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub); + +using lu_solve_fn = void (*)( + const Tensor& /*LU*/, + const Tensor& /*pivots*/, + const Tensor& /*B*/, + TransposeType /*trans*/); +DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub); + +using ldl_factor_fn = void (*)( + const Tensor& /*LD*/, + const Tensor& /*pivots*/, + const Tensor& /*info*/, + bool /*upper*/, + bool /*hermitian*/); +DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub); + +using svd_fn = void (*)( + const Tensor& /*A*/, + const bool /*full_matrices*/, + const bool /*compute_uv*/, + const std::optional& /*driver*/, + const Tensor& /*U*/, + const Tensor& /*S*/, + const Tensor& /*Vh*/, + const Tensor& /*info*/); +DECLARE_DISPATCH(svd_fn, svd_stub); + +using ldl_solve_fn = void (*)( + const Tensor& /*LD*/, + const Tensor& /*pivots*/, + const Tensor& /*result*/, + bool /*upper*/, + bool /*hermitian*/); +DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub); +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h new file mode 100644 index 0000000000000000000000000000000000000000..8f3f8bcb7e68fb5f8cb77ffd003accd83801f0a8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include + + +namespace at { +struct TensorIterator; +struct TensorIteratorBase; +} + +namespace at::native { + +inline void alpha_check(const ScalarType dtype, const Scalar& alpha) { + TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool, + "Boolean alpha only supported for Boolean results."); + TORCH_CHECK(isFloatingType(dtype) || isComplexType(dtype) + || alpha.isIntegral(true), + "For integral input tensors, argument alpha must not be a floating point number."); + TORCH_CHECK(isComplexType(dtype) || !alpha.isComplex(), + "For non-complex input tensors, argument alpha must not be a complex number.") +} + +// Basic checking for all sub functions. +inline void sub_check(const TensorBase& self, const TensorBase& other) { + TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool, + "Subtraction, the `-` operator, with two bool tensors is not supported. " + "Use the `^` or `logical_xor()` operator instead.") + TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool, + "Subtraction, the `-` operator, with a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead."); +} + +inline void sub_check(const TensorBase& self, const Scalar& scalar) { + TORCH_CHECK(self.scalar_type() != kBool || !scalar.isBoolean(), + "Subtraction, the `-` operator, with two bool tensors is not supported. " + "Use the `^` or `logical_xor()` operator instead.") + TORCH_CHECK(self.scalar_type() != kBool && !scalar.isBoolean(), + "Subtraction, the `-` operator, with a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead."); +} + +using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha); +using structured_binary_fn_double = void(*)(TensorIteratorBase&, double); +using structured_binary_fn = void(*)(TensorIteratorBase&); + +using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha); +using binary_fn_double = void(*)(TensorIterator&, double); +using binary_fn = void(*)(TensorIterator&); +using binary_clamp_fn_alpha = + void(*)(TensorIterator&, const Scalar& alpha, const Scalar& min_val, const Scalar& max_val); + +// NB: codegenned +DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub); + +DECLARE_DISPATCH(binary_clamp_fn_alpha, add_clamp_stub); +DECLARE_DISPATCH(structured_binary_fn_alpha, sub_stub); +DECLARE_DISPATCH(structured_binary_fn, mul_stub); +DECLARE_DISPATCH(structured_binary_fn, div_true_stub); +DECLARE_DISPATCH(structured_binary_fn, div_floor_stub); +DECLARE_DISPATCH(structured_binary_fn, div_trunc_stub); +DECLARE_DISPATCH(structured_binary_fn, atan2_stub); +DECLARE_DISPATCH(structured_binary_fn, remainder_stub); +DECLARE_DISPATCH(structured_binary_fn, bitwise_and_stub); +DECLARE_DISPATCH(structured_binary_fn, bitwise_or_stub); +DECLARE_DISPATCH(structured_binary_fn, bitwise_xor_stub); +DECLARE_DISPATCH(structured_binary_fn, lshift_stub); +DECLARE_DISPATCH(structured_binary_fn, rshift_stub); +DECLARE_DISPATCH(binary_fn, logical_xor_stub); +DECLARE_DISPATCH(binary_fn, logical_and_stub); +DECLARE_DISPATCH(binary_fn, logical_or_stub); +DECLARE_DISPATCH(structured_binary_fn, lt_stub); +DECLARE_DISPATCH(structured_binary_fn, le_stub); +DECLARE_DISPATCH(structured_binary_fn, gt_stub); +DECLARE_DISPATCH(structured_binary_fn, ge_stub); +DECLARE_DISPATCH(structured_binary_fn, eq_stub); +DECLARE_DISPATCH(structured_binary_fn, ne_stub); +DECLARE_DISPATCH(binary_fn, max_elementwise_stub); +DECLARE_DISPATCH(binary_fn, min_elementwise_stub); +DECLARE_DISPATCH(structured_binary_fn, maximum_stub); +DECLARE_DISPATCH(structured_binary_fn, minimum_stub); +DECLARE_DISPATCH(structured_binary_fn, fmax_stub); +DECLARE_DISPATCH(structured_binary_fn, fmin_stub); +DECLARE_DISPATCH(structured_binary_fn_double, smooth_l1_stub); +DECLARE_DISPATCH(binary_fn_double, huber_stub); +DECLARE_DISPATCH(structured_binary_fn, sigmoid_backward_stub); +DECLARE_DISPATCH(binary_fn_alpha, logit_backward_stub); +DECLARE_DISPATCH(structured_binary_fn, tanh_backward_stub); +DECLARE_DISPATCH(structured_binary_fn, mse_stub); +DECLARE_DISPATCH(structured_binary_fn, fmod_stub); +DECLARE_DISPATCH(structured_binary_fn, logaddexp_stub); +DECLARE_DISPATCH(structured_binary_fn, logaddexp2_stub); +DECLARE_DISPATCH(structured_binary_fn, gcd_stub); +DECLARE_DISPATCH(structured_binary_fn, lcm_stub); +DECLARE_DISPATCH(structured_binary_fn, hypot_stub); +DECLARE_DISPATCH(structured_binary_fn, igamma_stub); +DECLARE_DISPATCH(structured_binary_fn, igammac_stub); +DECLARE_DISPATCH(structured_binary_fn, nextafter_stub); +DECLARE_DISPATCH(structured_binary_fn, heaviside_stub); +DECLARE_DISPATCH(structured_binary_fn, copysign_stub); +DECLARE_DISPATCH(structured_binary_fn, xlogy_stub); +DECLARE_DISPATCH(structured_binary_fn, xlog1py_stub); +DECLARE_DISPATCH(structured_binary_fn, zeta_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_t_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_u_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_v_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_w_stub); +DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_h_stub); +DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_he_stub); +DECLARE_DISPATCH(structured_binary_fn, laguerre_polynomial_l_stub); +DECLARE_DISPATCH(structured_binary_fn, legendre_polynomial_p_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_t_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub); + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ComplexHelper.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ComplexHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..1b09350aef6ebbef4bd339cdf911b2a68991a17c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ComplexHelper.h @@ -0,0 +1,97 @@ +#pragma once + +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include + +#include +#endif + +// WARNING: this header contains non-inline functions and should be only +// included from ONE cpp file + +namespace at::native { + +// View tensor with new dtype, storage offset, sizes and strides +inline Tensor view_tensor( + const Tensor &tensor, ScalarType dtype, + c10::SymInt offset, SymIntArrayRef sizes, SymIntArrayRef strides) { + Storage storage = tensor.storage(); + auto key_set = tensor.key_set().remove(DispatchKey::Conjugate); + auto new_tensor = detail::make_tensor( + c10::TensorImpl::VIEW, std::move(storage), key_set, scalarTypeToTypeMeta(dtype)); + auto * impl = new_tensor.unsafeGetTensorImpl(); + impl->set_sizes_and_strides(sizes, strides, offset); + return new_tensor; +} + +inline SymDimVector computeStrideForViewAsReal(SymIntArrayRef oldstride) { + SymDimVector res(oldstride.size() + 1); + for (const auto i : c10::irange(oldstride.size())) { + res[i] = oldstride[i] * 2; + } + res.back() = 1; + return res; +} + +inline Tensor _view_as_real_physical(const Tensor& self) { + TORCH_CHECK(self.is_complex(), "view_as_real is only supported for complex tensors"); + auto old_sizes = self.sym_sizes(); + SymDimVector new_sizes(old_sizes.size() + 1); + std::copy(old_sizes.begin(), old_sizes.end(), new_sizes.begin()); + // last dimension will always have two elements containing the real and imag vals + new_sizes.back() = 2; + auto new_strides = computeStrideForViewAsReal(self.sym_strides()); + auto new_storage_offset = self.sym_storage_offset() * 2; + const auto float_type = c10::toRealValueType(self.scalar_type()); + auto real_tensor = view_tensor(self, float_type, std::move(new_storage_offset), new_sizes, new_strides); + return real_tensor; +} + +// expects as input a complex tensor and returns back a tensor +// with corresponding real dtype containing the complex values +// in the last two dimensions +Tensor view_as_real(const Tensor& self) { + TORCH_CHECK(!self.is_conj(), "view_as_real doesn't work on unresolved conjugated tensors. To resolve the conjugate tensor so you can view it as real, use self.resolve_conj(); however, be warned that the resulting tensor will NOT alias the original."); + return _view_as_real_physical(self); +} + +inline SymDimVector computeStrideForViewAsComplex(SymIntArrayRef oldstride) { + const auto dim = oldstride.size(); + TORCH_CHECK(dim > 0 && oldstride[dim - 1] == 1, "Tensor must have a last dimension with stride 1"); + + SymDimVector res(dim - 1); + for (const auto i : c10::irange(res.size())) { + TORCH_CHECK(oldstride[i] % 2 == 0, "Tensor must have a stride divisible by 2 for all but last dimension"); + res[i] = oldstride[i] / 2; + } + return res; +} + +// expects as input a float or double tensor with last dimension of size 2 +// and returns back a tensor with corresponding complex dtype +Tensor view_as_complex(const Tensor& self) { + TORCH_CHECK( + self.scalar_type() == kFloat || self.scalar_type() == kDouble || self.scalar_type() == kHalf, + "view_as_complex is only supported for half, float and double tensors, but got a tensor of scalar type: ", self.scalar_type()); + + auto old_sizes = self.sym_sizes(); + TORCH_CHECK(!old_sizes.empty(), "Input tensor must have one or more dimensions"); + TORCH_CHECK(old_sizes[old_sizes.size()-1] == 2, "Tensor must have a last dimension of size 2"); + SymDimVector new_sizes(old_sizes.begin(), old_sizes.end() - 1); + + const auto new_strides = computeStrideForViewAsComplex(self.sym_strides()); + const auto complex_type = c10::toComplexType(self.scalar_type()); + + TORCH_CHECK(self.sym_storage_offset() % 2 == 0, "Tensor must have a storage_offset divisible by 2"); + const auto new_storage_offset = self.sym_storage_offset() / 2; + + return view_tensor(self, complex_type, new_storage_offset, new_sizes, new_strides); +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessorCommon.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessorCommon.h new file mode 100644 index 0000000000000000000000000000000000000000..9111c3515afcefec2d81a261737ec28bcae00cdc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessorCommon.h @@ -0,0 +1,263 @@ +#include + +#pragma once + +namespace at::native { + +namespace { + +// operator_brackets_proxy is used in +// CompositeRandomAccessor in place of operator[]. +// For some iterators, references returned by operator[] +// could become invalid, operator_brackets_proxy tries to +// resolve that by making accessor[n] to be equivalent to +// *(accessor + n). +template +class operator_brackets_proxy { + using reference = typename std::iterator_traits::reference; + using value_type = typename std::iterator_traits::value_type; + +public: + C10_HOST_DEVICE + operator_brackets_proxy(Accessor const& accessor) + : accessor(accessor) + {} + + C10_HOST_DEVICE + operator reference() { + return *accessor; + } + + C10_HOST_DEVICE + reference operator*() { + return *accessor; + } + + C10_HOST_DEVICE + operator_brackets_proxy& operator=(value_type const& val) { + *accessor = val; + return *this; + } + +private: + Accessor accessor; +}; + +} + +// references_holder is used as a surrogate for the +// references type from std::iterator_traits in CompositeRandomAccessor. +// It is assumed in CompositeRandomAccessor that +// References = tuple, +// Values = tuple by default, +// but they could be anything as long as References could be +// cast to Values. +// If you plan to use it with STL, for example, you will need to +// define 'swap` and `get`(aka std::get) methods. +template +class references_holder { +public: + using values = Values; + using references = References; + + C10_HOST_DEVICE + references_holder(references refs) + : refs{std::move(refs)} + {} + + C10_HOST_DEVICE + operator references() { + return refs; + } + + C10_HOST_DEVICE + operator values() { + return refs; + } + + C10_HOST_DEVICE + references_holder& operator=(values vals) { + refs = vals; + return *this; + } + + C10_HOST_DEVICE + references& data() { + return refs; + } + +protected: + references refs; +}; + +// CompositeRandomAccessor is essentially a simplified version of +// a random access iterator over two random access iterators. +// TupleInfo should contain a variadic type `tuple`, and a method `tie`, +// which constructs a tuple of references from a variadic list of arguments. +template +class CompositeRandomAccessor { + using self_type = CompositeRandomAccessor; + + using key_accessor_value_type = + typename std::iterator_traits::value_type; + using value_accessor_value_type = + typename std::iterator_traits::value_type; + using key_accessor_reference_type = + typename std::iterator_traits::reference; + using value_accessor_reference_type = + typename std::iterator_traits::reference; + + using composite_value_type = typename TupleInfo::template tuple< + key_accessor_value_type, + value_accessor_value_type>; + using composite_reference = typename TupleInfo::template tuple< + key_accessor_reference_type, + value_accessor_reference_type>; + +public: + using value_type = composite_value_type; + using reference = references_holder; + // Note that CompositeRandomAccessor does not hold key and values + // in a specific datastructure, which means that a pointer to a (key, value) + // is not defined. Hence we just use a pointer type of the KeyAccessor. + using pointer = typename std::iterator_traits::pointer; + using difference_type = typename std::iterator_traits::difference_type; + using iterator_category = std::random_access_iterator_tag; + + C10_HOST_DEVICE + CompositeRandomAccessor() = default; + + C10_HOST_DEVICE + CompositeRandomAccessor(KeyAccessor keys, ValueAccessor values) + : keys(keys), values(values) + {} + + // Pointer-like operations { + C10_HOST_DEVICE + reference operator*() const { + return TupleInfo::tie(*keys, *values); + } + + // operator->() is supposed to return a pointer type. + // Since CompositeRandomAccessor does not hold pointers to pairs, + // we just return a pointer to a key. + C10_HOST_DEVICE + auto* operator->() const { + return keys.operator->(); + } + + C10_HOST_DEVICE + reference operator[](difference_type idx) { + return operator_brackets_proxy( + CompositeRandomAccessor(keys + idx, values + idx) + ); + } + // } + + // Prefix/postfix increment/decrement { + C10_HOST_DEVICE + CompositeRandomAccessor& operator++() { + ++keys; + ++values; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator++(int) { + CompositeRandomAccessor copy(*this); + ++*this; + return copy; + } + + C10_HOST_DEVICE + CompositeRandomAccessor& operator--() { + --keys; + --values; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator--(int) { + CompositeRandomAccessor copy(*this); + --*this; + return copy; + } + // } + + // Arithmetic operations { + C10_HOST_DEVICE + CompositeRandomAccessor& operator+=(difference_type offset) { + keys += offset; + values += offset; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator+(difference_type offset) const { + return CompositeRandomAccessor(keys + offset, values + offset); + } + + C10_HOST_DEVICE + friend CompositeRandomAccessor operator+( + difference_type offset, + const CompositeRandomAccessor& accessor + ) { + return accessor + offset; + } + + C10_HOST_DEVICE + CompositeRandomAccessor& operator-=(difference_type offset) { + keys -= offset; + values -= offset; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator-(difference_type offset) const { + return CompositeRandomAccessor(keys - offset, values - offset); + } + + C10_HOST_DEVICE + difference_type operator-(const CompositeRandomAccessor& other) const { + return keys - other.keys; + } + // } + + // Comparison operators { + C10_HOST_DEVICE + bool operator==(const CompositeRandomAccessor& other) const { + return keys == other.keys; + } + + C10_HOST_DEVICE + bool operator!=(const CompositeRandomAccessor& other) const { + return keys != other.keys; + } + + C10_HOST_DEVICE + bool operator<(const CompositeRandomAccessor& other) const { + return keys < other.keys; + } + + C10_HOST_DEVICE + bool operator<=(const CompositeRandomAccessor& other) const { + return keys <= other.keys; + } + + C10_HOST_DEVICE + bool operator>(const CompositeRandomAccessor& other) const { + return keys > other.keys; + } + + C10_HOST_DEVICE + bool operator>=(const CompositeRandomAccessor& other) const { + return keys >= other.keys; + } + // } + +protected: + KeyAccessor keys; + ValueAccessor values; +}; + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h new file mode 100644 index 0000000000000000000000000000000000000000..9daee7f2d6c43586630ad38ac731b22ce2416ff3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace at { +class Tensor; + +namespace native { + +using cross_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const int64_t d); + +DECLARE_DISPATCH(cross_fn, cross_stub); + +}} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..cd580020374a66aa058938e1186fbfd577a76980 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h @@ -0,0 +1,229 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \ + TORCH_CHECK( \ + T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \ + "Need " #T " of dimension ", \ + DIM, \ + " and " #T ".size[", \ + DIM_SIZE, \ + "] == ", \ + SIZE, \ + " but got input to be of shape ", \ + T.sizes()) + +namespace at::native::internal { +namespace { +inline bool all_positive(IntArrayRef& arr) { + return std::all_of( + arr.begin(), arr.end(), [](int64_t item) { return item > 0; }); +} + +inline bool all_nonnegative(std::vector& arr) { + return std::all_of( + arr.begin(), arr.end(), [](int64_t item) { return item >= 0; }); +} + +} // namespace + +// calculate the rear part of output tensor sizes +template +std::vector get_output_size( + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef stride_size, + IntArrayRef pad_size, + IntArrayRef dilation_size) { + std::vector sizes; + for (const auto index : c10::irange(dim)) { + sizes.push_back( + div_rtn( + input.size(index + input.dim() - dim) + 2 * pad_size[index] - + (dilation_size[index] * (kernel_size[index] - 1) + 1), + stride_size[index]) + + 1); + } + return sizes; +} + +// calculate the sizes of output tensor +template +std::vector get_output_size( + const Tensor& input, + const Tensor& weight, + IntArrayRef kernel_size, + IntArrayRef stride_size, + IntArrayRef pad_size, + IntArrayRef dilation_size) { + auto output_size = get_output_size( + input, kernel_size, stride_size, pad_size, dilation_size); + output_size.insert(output_size.begin(), weight.size(0)); + if (input.dim() == dim + 2) { + output_size.insert(output_size.begin(), input.size(0)); + } + return output_size; +} +/* + slow_conv_dilated_shape_check - check user-input to dilated convolution + forward and backward functions. +*/ +template +void slow_conv_dilated_shape_check( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + const Tensor& grad_output, + IntArrayRef kernel_size, + IntArrayRef stride_size, + IntArrayRef pad_size, + IntArrayRef dilation_size) { + /* + When the following tensors are defined: + + bias, grad_weight, grad_output + + then these are assumed to be contiguous without checking + because of these tensors are made contiguous by calling + .contiguous() method or by resizing of zero-sized tensors in + forward/backward functions. + + When grad_weight is defined then it is assumed without + checking to have the same shape as weight, see backward + functions. + */ + // Check size arguments + TORCH_CHECK( + kernel_size.size() == dim, + "kernel sizes length should be ", + dim, + ", but got ", + kernel_size.size()); + TORCH_CHECK( + stride_size.size() == dim, + "strides length should be ", + dim, + ", but got ", + stride_size.size()); + TORCH_CHECK( + dilation_size.size() == dim, + "dilations length should be ", + dim, + ", but got ", + dilation_size.size()); + TORCH_CHECK( + pad_size.size() == dim, + "pads length should be ", + dim, + ", but got ", + pad_size.size()); + + TORCH_CHECK( + all_positive(kernel_size), + "kernel size should be greater than zero, but got ", + kernel_size); + TORCH_CHECK( + all_positive(stride_size), + "stride should be greater than zero, but got ", + stride_size); + TORCH_CHECK( + all_positive(dilation_size), + "dilation should be greater than zero, but got ", + dilation_size); + + // check input + TORCH_CHECK(input.defined(), "input must be defined"); + bool is_batch = input.dim() == dim + 2; + int64_t n = (is_batch ? 2 : 1); + int64_t ndim = n + dim; + if (!is_batch) { + // input dim has to be dim + 1 if not batched + TORCH_CHECK( + input.dim() == dim + 1, + "input must be 4D or 5D tensor but got ", + input.dim(), + "D tensor"); + } + + // check output sizes + auto output_size = get_output_size( + input, kernel_size, stride_size, pad_size, dilation_size); + + TORCH_CHECK( + all_nonnegative(output_size), + "calculated output size ", + output_size, + " is too small (all sizes must be non-negative)"); + + // check weight + TORCH_CHECK(weight.defined(), "weight must be defined"); + TORCH_CHECK( + weight.dim() == dim + 2, + "weight must be ", + dim + 2, + "D tensor but got ", + weight.dim(), + "D tensor dim=", + dim); + TORCH_CHECK( + weight.sizes().slice(2) == kernel_size, + "weight[2:] shape ", + weight.sizes().slice(2), + " must be equal to kernel_size ", + kernel_size); + + TORCH_CHECK_DIM_SIZE(input, input.dim(), (is_batch ? 1 : 0), weight.size(1)); + + // check bias when present + if (bias.defined()) { + TORCH_CHECK( + bias.dim() == 1, + "bias must be 1D tensor but got ", + bias.dim(), + "D tensor"); + TORCH_CHECK_DIM_SIZE(bias, 1, 0, weight.size(0)); + } + + // check grad_output when present + if (grad_output.defined()) { + TORCH_CHECK( + grad_output.dim() == ndim, + "grad_output must be ", + ndim, + "D tensor but got ", + grad_output.dim(), + "D tensor"); + if (is_batch) { + TORCH_CHECK( + grad_output.size(0) == input.size(0), + "grad_output.size(0)=", + grad_output.size(0), + " must be input.size(0)=", + input.size(0)); + } + TORCH_CHECK( + grad_output.size(n - 1) == weight.size(0), + "grad_output.size(", + n - 1, + ")=", + grad_output.size(n - 1), + " must be weight.size(0)=", + weight.size(0)); + TORCH_CHECK( + grad_output.sizes().slice(n) == output_size, + "grad_output[", + n, + ":] shape", + grad_output.sizes().slice(n), + " must be equal to output size ", + output_size); + } +} + +} // namespace at::native::internal diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h new file mode 100644 index 0000000000000000000000000000000000000000..38c171e56dfae97f5162a5b70e6ceedd0a91db7d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h @@ -0,0 +1,394 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#include +#include +#endif + +namespace at::native::templates { + +// ==================================================== Random ======================================================== + +// The purpose of `update_from` and `update_to` is to find the closest valid int64_t number that can be used as actual `from`. +// The current implementation of `random_` uses uint64_t arithmetics and casts the result to the target dtype(scalar_t). +// This casting can result in generating numbers that happen to be greater or equal to `to` value. For instance: +// +// auto actual = torch::empty({3, 3}, torch::half); +// actual.random_(0, 65504); +// +// If random's uint64_t arithmetics produces 65503 as a random value after casting to torch::half it becomes 65504 +// and violates the requirement that random value must be less than `to`. To resolve this issue `update_from` and `update_to` +// moves `from` to the right and `to` to the left to the next closest value that won't go outside [from, to) after casting to +// the target dtype. For `to` = 65504 it moves left for (1 << (log2(to) - 11 + 1)) = 32 and becomes 65472, which is previous +// available number for torch::half dtype. +template +int64_t update_from(int64_t from) { + static_assert( + std::is_floating_point::value || + std::is_same::value || + std::is_same::value, "scalar_t must be floating-point type"); + const auto from_plus_1 = static_cast(static_cast(from + 1)); + if (from_plus_1 < from) { + int64_t from_ = std::abs(from + 1); + int n = 0; + while (from_ >>= 1) ++n; + // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult) + from = from_plus_1 + (1LL << (n - std::numeric_limits::digits + 1)); + } + return from; +} + +template +int64_t update_to(int64_t to) { + static_assert( + std::is_floating_point::value || + std::is_same::value || + std::is_same::value, "scalar_t must be floating-point type"); + const auto to_minus_1 = static_cast(static_cast(to - 1)); + if (to_minus_1 >= to) { + int64_t to_ = std::abs(to - 1); + int n = 0; + while (to_ >>= 1) ++n; + // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult) + to = to_minus_1 - (1LL << (n - std::numeric_limits::digits + 1)); + } + return to; +} + +// Return earlier for not invoking kernel. +// See https://github.com/pytorch/pytorch/issues/103418 for more details +#define CHECK_EMPTY_AND_RETURN(tensor) \ + if (tensor.numel() == 0) { \ + return tensor; \ + } + +template class random_kernel, typename RNG> +at::Tensor& random_impl(at::Tensor& self, std::optional generator) { + CHECK_EMPTY_AND_RETURN(self); + auto iter = at::TensorIterator::borrowing_nullary_op(self); + random_kernel()(iter, generator); + return self; +} + +#define CHECK_OUT_OF_BOUNDS(var, name, min, max, dtype) \ + TORCH_CHECK(var >= min && var <= max, name , " is out of bounds for ", dtype); \ + +#define WARN_OUT_OF_BOUNDS(var, name, digits, dtype) \ + if (var < -(1LL << digits) || var > (1LL << digits)) { \ + TORCH_WARN(name , " is out of bounds [-(2^", digits, "), 2^", digits, "]. ", \ + "Due to precision limitations ", dtype, " can support discrete uniform distribution only within this range. ", \ + "This warning will become an error in version 1.7 release, please fix the code in advance"); \ + } + +inline void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) { + const auto scalar_type = typeMetaToScalarType(dtype); + if (isFloatingType(scalar_type)) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] { + const auto min = static_cast(std::numeric_limits::lowest()); + const auto max = static_cast(std::numeric_limits::max()); + CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype); + CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype); + + constexpr auto digits = std::numeric_limits::digits; + WARN_OUT_OF_BOUNDS(from, "from", digits, dtype); + WARN_OUT_OF_BOUNDS(to_inc, "to - 1", digits, dtype); + }); + } else if (scalar_type == kUInt64) { + // When you do a comparison between int64_t and uint64_t, the usual + // arithmetic conversions say that the int64_t value is promoted to + // unsigned. But this conversion wraps around: if I had -1 as my int64_t, + // then it will promote to 0xFFFFFFFFFFFFFFFF in uint64_t. This is never + // the right thing to do. + CHECK_OUT_OF_BOUNDS(from, "from", 0, INT64_MAX, dtype); + CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", 0, INT64_MAX, dtype); + } else if (isIntegralType(scalar_type, /*includeBool=*/true)) { + AT_DISPATCH_V2(scalar_type, "check_random_integral_bounds", AT_WRAP([&]() { + const auto min = static_cast(std::numeric_limits::lowest()); + const auto max = static_cast(std::numeric_limits::max()); + CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype); + CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype); + }), AT_EXPAND(AT_INTEGRAL_TYPES), kUInt16, kUInt32, kBool); + } else { + TORCH_CHECK(false, "check_random_bounds handles only integral, floating-point and boolean types"); + } +} + +template class random_from_to_kernel, typename RNG> +at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, std::optional to_opt, std::optional generator) { + uint64_t range = 0; + auto iter = at::TensorIterator::borrowing_nullary_op(self); + if (to_opt.has_value()) { + // [from, to) + int64_t to = *to_opt; + TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to); + if (isFloatingType(iter.dtype())) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_update_from_to", [&] { + from = update_from(from); + to = update_to(to); + TORCH_CHECK(from < to, "random_ expects 'from' casted to dtype to be less than 'to' casted to dtype, but got from=", from, " >= to=", to); + }); + } + check_from_to_in_range(from, to - 1, self.dtype()); + CHECK_EMPTY_AND_RETURN(self); + range = static_cast(to) - static_cast(from); + random_from_to_kernel()(iter, range, from, generator); + } else if (from != std::numeric_limits::lowest()) { + // [from, std::numeric_limits::max()] + int64_t to_inc = 0; + if (isFloatingType(iter.dtype())) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_from_to_range_calc", [&] { + constexpr int64_t scalar_t_max = static_cast(1) << std::numeric_limits::digits; + to_inc = scalar_t_max > std::numeric_limits::max() ? std::numeric_limits::max() : static_cast(scalar_t_max); + from = update_from(from); + TORCH_CHECK(from < to_inc, "random_ expects 'from' casted to dtype to be less than or equal to 'to_inc' casted to dtype, but got from=", from, " > to_inc=", to_inc); + }); + } else if (isIntegralType(iter.dtype(), /*includeBool=*/true)) { + AT_DISPATCH_V2(self.scalar_type(), "random_from_to_range_calc", AT_WRAP([&] { + if constexpr (std::is_same_v) { + to_inc = static_cast(true); + } else { + to_inc = static_cast(std::numeric_limits::max()); + } + }), AT_EXPAND(AT_INTEGRAL_TYPES_V2), kBool); + } else { + TORCH_CHECK(false, "random_from_to_impl handles only integral, floating-point and boolean types"); + } + check_from_to_in_range(from, to_inc, self.dtype()); + CHECK_EMPTY_AND_RETURN(self); + range = static_cast(to_inc) - static_cast(from) + 1; + random_from_to_kernel()(iter, range, from, generator); + } else { + // [std::numeric_limits::lowest(), std::numeric_limits::max()] + // range = 2^64 + CHECK_EMPTY_AND_RETURN(self); + random_from_to_kernel()(iter, generator); + } + return self; +} + +// ==================================================== Normal ======================================================== + +#define CHECK_NORMAL_TENSOR_STD(std) \ + do { \ + TORCH_CHECK( \ + !std.is_complex(), \ + "normal expects standard deviation to be non-complex"); \ + TORCH_CHECK( \ + std.numel() == 0 || std.is_meta() || std.min().ge(0).item(), \ + "normal expects all elements of std >= 0.0"); \ + } while (0) + +#define CHECK_NORMAL_STD(std) \ + TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std); + +template class normal_kernel, typename RNG> +Tensor& normal_impl_(Tensor& self, double mean, double std, std::optional gen) { + CHECK_NORMAL_STD(std); + CHECK_EMPTY_AND_RETURN(self); + + if (self.is_complex()) { + auto float_tensor = at::view_as_real(self); + // variance for normal distribution of the real and imaginary values + // is half of the input variance + normal_kernel()(float_tensor, mean, std/(std::sqrt(2)), gen); + } else { + normal_kernel()(self, mean, std, gen); + } + return self; +} + +template class normal_kernel, typename RNG> +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, std::optional gen) { + CHECK_NORMAL_STD(std); + auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous); + auto shape = at::infer_size(mean.sizes(), std_tensor.sizes()); + at::native::resize_output(output, shape); + normal_impl_(output, 0, std, gen); + output.add_(mean); + return output; +} + +template class normal_kernel, typename RNG> +Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, std::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + auto mean_tensor = at::full({}, mean, output.options()); + auto shape = at::infer_size(mean_tensor.sizes(), std.sizes()); + at::native::resize_output(output, shape); + normal_impl_(output, 0, 1, gen); + // CUDA NB: addcmul_out copies the tensor to be added into the output. + // The previous function here was addcmul_out(output, mean_tensor, output, std, 1); + // The third argument is not a constant reference and hence the samples in output are overwritten. + // Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std + output.mul_(std).add_(mean_tensor); + return output; +} + +template class normal_kernel, typename RNG> +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, std::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + auto shape = at::infer_size(mean.sizes(), std.sizes()); + at::native::resize_output(output, shape); + normal_impl_(output, 0, 1, gen); + // CUDA NB: addcmul_out copies the tensor to be added into the output. + // The previous function here was addcmul_out(output, mean, output, std, 1); + // The third argument is not a constant reference and hence the samples in output are overwritten. + // Consequently, the computation performed is mean + mean * std instead of mean + output * std + output.mul_(std).add_(mean); + return output; +} + +template class normal_kernel, typename RNG> +Tensor normal_impl(const Tensor& mean, double std, std::optional gen) { + CHECK_NORMAL_STD(std); + Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous); + normal_out_impl(ret, mean, std, gen); + return ret; +} + +template class normal_kernel, typename RNG> +Tensor normal_impl(double mean, const Tensor& std, std::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + Tensor ret = at::empty_like(std, MemoryFormat::Contiguous); + normal_out_impl(ret, mean, std, gen); + return ret; +} + +template class normal_kernel, typename RNG> +Tensor normal_impl(const Tensor& mean, const Tensor& std, std::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + auto shape = at::infer_size(mean.sizes(), std.sizes()); + Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous); + normal_out_impl(ret, mean, std, gen); + return ret; +} + +// ==================================================== Uniform ======================================================= + +template class uniform_kernel, typename RNG> +at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, std::optional generator) { + if (self.is_complex()) { + CHECK_EMPTY_AND_RETURN(self); + auto float_tensor = at::view_as_real(self); + uniform_impl_(float_tensor, from, to, generator); + } else { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] { + [[maybe_unused]] const auto dtype = self.dtype(); + const auto min = static_cast(std::numeric_limits::lowest()); + const auto max = static_cast(std::numeric_limits::max()); + CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype); + CHECK_OUT_OF_BOUNDS(to, "to", min, max, dtype); + TORCH_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to); + TORCH_CHECK((to - from) <= std::numeric_limits::max(), + "uniform_ expects to-from <= std::numeric_limits<", toString(self.scalar_type()), + ">::max(), but found to=", to, " and from=", from, + " which result in to-from to exceed the limit"); + from = std::min(std::max(from, min), max); + to = std::max(std::min(to, max), min); + }); + CHECK_EMPTY_AND_RETURN(self); + auto iter = at::TensorIterator::borrowing_nullary_op(self); + uniform_kernel()(iter, from, to, generator); + } + return self; +} + +// ================================================== LogNormal ======================================================= + +template class log_normal_kernel, typename RNG> +at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, std::optional gen) { + TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std); + CHECK_EMPTY_AND_RETURN(self); + auto iter = TensorIterator::borrowing_nullary_op(self); + log_normal_kernel()(iter, mean, std, gen); + return self; +} + +// =================================================== Geometric ====================================================== + +template class geometric_kernel, typename RNG> +Tensor& geometric_impl_(Tensor& self, double p, std::optional gen) { + TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p); + CHECK_EMPTY_AND_RETURN(self); + auto iter = TensorIterator::borrowing_nullary_op(self); + geometric_kernel()(iter, p, gen); + return self; +} + +// ================================================== Exponential ===================================================== + +template class exponential_kernel, typename RNG> +Tensor& exponential_impl_(Tensor& self, double lambda, std::optional gen) { + TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda); + CHECK_EMPTY_AND_RETURN(self); + auto iter = TensorIterator::borrowing_nullary_op(self); + exponential_kernel()(iter, lambda, gen); + return self; +} + +// ==================================================== Cauchy ======================================================== + +template class cauchy_kernel, typename RNG> +Tensor& cauchy_impl_(Tensor& self, double median, double sigma, std::optional gen) { + // TODO: instead of variable name 'sigma', use 'gamma' or 'scale' + // the variance, squared sigma, is undefined for cauchy distribution + TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma); + TORCH_CHECK(at::isFloatingType(self.scalar_type()), "Cauchy distribution is a continuous probability distribution. dtype must be a floating point but you specified ", self.dtype()); + CHECK_EMPTY_AND_RETURN(self); + auto iter = TensorIterator::borrowing_nullary_op(self); + cauchy_kernel()(iter, median, sigma, gen); + return self; +} + +// ==================================================== Bernoulli ===================================================== + +template class bernoulli_tensor_kernel, typename RNG> +Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, std::optional gen) { + CHECK_EMPTY_AND_RETURN(self); + NoNamesGuard guard; + at::assert_no_internal_overlap(self); + bernoulli_tensor_kernel()(self, p_, gen); + return self; +} + +template class bernoulli_scalar_kernel, typename RNG> +Tensor& bernoulli_impl_(Tensor& self, double p, std::optional gen) { + TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p); + CHECK_EMPTY_AND_RETURN(self); + at::assert_no_internal_overlap(self); + bernoulli_scalar_kernel()(self, p, gen); + return self; +} + +template class bernoulli_tensor_kernel, typename RNG> +Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, std::optional gen) { + // result.resize_as_(self) requires self to have same dtype as result, so we + // use resize_ instead. + // TODO: Fix resize_as_. See pytorch/pytorch#11665. + result.resize_(self.sizes()); + bernoulli_impl_(result, self, gen); + namedinference::propagate_names(result, self); + return result; +} + +#undef CHECK_OUT_OF_BOUNDS +#undef WARN_OUT_OF_BOUNDS + +} // namespace at::native::templates diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h new file mode 100644 index 0000000000000000000000000000000000000000..eb29e1171dcd60a5d9fbc5cb35b488603c1b0886 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h @@ -0,0 +1,153 @@ +#include +#include +#include + +#ifdef USE_FBGEMM +#include +#endif + +namespace at::native { + +enum class EmbeddingBagMode { + SUM = 0, + MEAN = 1, + MAX = 2, +}; + +[[maybe_unused]] static bool operator==(int64_t op1, EmbeddingBagMode op2) { + return op1 == static_cast(op2); +} + +[[maybe_unused]] static bool operator!=(int64_t op1, EmbeddingBagMode op2) { + return !(op1 == op2); +} + +void check_arguments( + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const int64_t mode, + const std::optional& per_sample_weights, + bool include_last_offset); + +void make_bag_size_out( + Tensor& bag_size_out, + const Tensor& offsets, + const Tensor& indices, + const int64_t mode, + const bool include_last_offset, + const bool requires_grad); + +void make_max_indices_out( + Tensor& max_indices_out, + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const Tensor& bag_size, + const int64_t mode, + bool include_last_offset); + +void make_offset2bag_out( + Tensor& offset2bag, + Tensor& output, + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const int64_t mode, + const std::optional& per_sample_weights, + const int64_t padding_idx = -1); + +#ifdef USE_FBGEMM + +template +struct _CallbackAndBlockSize { + using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature::Type; + + int64_t blockSize = -1; + TCallback callback = nullptr; + + static TCallback generateCallback(int64_t block_size) { + return fbgemm::GenerateEmbeddingSpMDM( + block_size, + has_weight, + /* normalize_by_lengths */false, + /* prefetch */16, + /* is_weight_positional */false, + /* use_offsets */true); + } + + _CallbackAndBlockSize() = default; + + explicit _CallbackAndBlockSize(std::optional maybe_block_size) + : blockSize(maybe_block_size.value_or(-1)) + , callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr) + {} +}; + +template +struct _EmbeddingBagKernelCacheImpl : private StorageMixins... { + + _EmbeddingBagKernelCacheImpl() = default; + // use each of the mixins to store corresponding kernel and block size + explicit _EmbeddingBagKernelCacheImpl(std::optional maybe_block_size) + : StorageMixins(maybe_block_size)... + {} + + // this method is thread safe (call sites may call from different threads) + template + typename _CallbackAndBlockSize::TCallback + getCallback(int64_t block_size) const { + // if the cache doesn't store the kernel for the incoming block size + // (so it is different from the one stored in corresponding mixin) + // regenerate the kernel (not writing it into the cache so we avoid locks) + if (block_size != _CallbackAndBlockSize::blockSize) { + return _CallbackAndBlockSize::generateCallback(block_size); + } + // else retrieve the cached kernel from the corresponding mixin + return _CallbackAndBlockSize::callback; + } +}; + +// instantiate the cache with the list of storage mixins +// for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file +using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl< + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize>; +#else +struct _EmbeddingBagKernelCache { + explicit _EmbeddingBagKernelCache(std::optional /* maybe_block_size */) {} +}; +#endif + +void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag, + Tensor& bag_size, Tensor* max_indices, + const Tensor &weight, const Tensor &indices, + const Tensor &offsets, const int64_t mode = 0, + const std::optional& per_sample_weights = std::nullopt, + bool include_last_offset = false, + int64_t padding_idx = -1, + _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); + +void _embedding_bag_cpu_out( + at::Tensor& output, + at::Tensor& offset2bag, + at::Tensor& bag_size, + at::Tensor* p_max_indices, + const at::Tensor& weight, + const at::Tensor& indices, + const at::Tensor& offsets, + const bool scale_grad_by_freq, + const int64_t mode, + const bool sparse, + const std::optional& per_sample_weights, + const bool include_last_offset, + const std::optional& padding_idx, + _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/FusedSGD.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/FusedSGD.h new file mode 100644 index 0000000000000000000000000000000000000000..c58d10b6a4a4e585cb6196f553c73aa667d5150a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/FusedSGD.h @@ -0,0 +1,21 @@ +#include +#include + +namespace at::native { + +using fused_sgd_fn = void (*)( + const at::Tensor& param, + const at::Tensor& grad, + const at::Tensor& momentum_buffer, + const double weight_decay, + const double momentum, + const double lr, + const double dampening, + const bool nesterov, + const bool maximize, + const bool is_first_step, + const float* grad_scale_ptr); + +DECLARE_DISPATCH(fused_sgd_fn, fused_sgd_stub); + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h new file mode 100644 index 0000000000000000000000000000000000000000..509a305fe4b5ed33c128b06fec8473816eeca46a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h @@ -0,0 +1,298 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace at::native { + +using detail::GridSamplerInterpolation; +using detail::GridSamplerPadding; + +// Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value, +// where we view each pixel as an area between (idx - 0.5) and (idx + 0.5). +// if align_corners: -1 and +1 get sent to the centers of the corner pixels +// -1 --> 0 +// +1 --> (size - 1) +// scale_factor = (size - 1) / 2 +// if not align_corners: -1 and +1 get sent to the image edges +// -1 --> -0.5 +// +1 --> (size - 1) + 0.5 == size - 0.5 +// scale_factor = size / 2 +template +static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size, + bool align_corners) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + return ((coord + 1) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + return ((coord + 1) * size - 1) / 2; + } +} + +// grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize +// except that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size, + bool align_corners, scalar_t *grad_in) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + *grad_in = static_cast(size - 1) / 2; + return ((coord + 1) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + *grad_in = static_cast(size) / 2; + return ((coord + 1) * size - 1) / 2; + } +} + +// Clips coordinates to between 0 and clip_limit - 1 +template +static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) { + return std::min(static_cast(clip_limit - 1), std::max(in, static_cast(0))); +} + +// clip_coordinates_set_grad works similarly to clip_coordinates except that +// it also returns the `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit, + scalar_t *grad_in) { + // Note that it is important for the gradient calculation that borders + // are considered out of bounds. + if (in <= static_cast(0)) { + *grad_in = static_cast(0); + return static_cast(0); + } else { + scalar_t max = static_cast(clip_limit - 1); + if (in >= max) { + *grad_in = static_cast(0); + return max; + } else { + *grad_in = static_cast(1); + return in; + } + } +} + +// Reflects coordinates until they fall between low and high (inclusive). +// The bounds are passed as twice their value so that half-integer values +// can be represented as ints. +template +static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low, + int64_t twice_high) { + if (twice_low == twice_high) { + return static_cast(0); + } + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = std::fabs(in - min); + // `fmod` returns same sign as `in`, which is positive after the `fabs` above. + scalar_t extra = std::fmod(in, span); + int flips = static_cast(std::floor(in / span)); + if (flips % 2 == 0) { + return extra + min; + } else { + return span - extra + min; + } +} + +// reflect_coordinates_set_grad works similarly to reflect_coordinates except +// that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low, + int64_t twice_high, scalar_t *grad_in) { + if (twice_low == twice_high) { + *grad_in = static_cast(0); + return static_cast(0); + } + int grad_in_mult_; + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = in - min; + if (in < static_cast(0)) { + grad_in_mult_ = -1; + in = -in; + } else { + grad_in_mult_ = 1; + } + // `fmod` returns same sign as `in`, which is positive after the `if` above. + scalar_t extra = std::fmod(in, span); + int flips = static_cast(std::floor(in / span)); + if (flips % 2 == 0) { + *grad_in = static_cast(grad_in_mult_); + return extra + min; + } else { + *grad_in = static_cast(-grad_in_mult_); + return span - extra + min; + } +} + +// Mapping the out-of-boundary points back into boundary +// This would only affect padding_mode=border or reflection +template +static inline scalar_t compute_coordinates(scalar_t coord, int64_t size, + GridSamplerPadding padding_mode, + bool align_corners) { + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates(coord, 0, 2*(size - 1)); + } else { + coord = reflect_coordinates(coord, -1, 2*size - 1); + } + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } + return coord; +} + +// Computes the pixel source index value for a grid coordinate +template +static inline scalar_t grid_sampler_compute_source_index( + scalar_t coord, + int64_t size, + GridSamplerPadding padding_mode, + bool align_corners) { + coord = grid_sampler_unnormalize(coord, size, align_corners); + coord = compute_coordinates(coord, size, padding_mode, align_corners); + return coord; +} + +// grid_sampler_compute_source_index_set_grad works similarly to +// grid_sampler_compute_source_index except that it also returns the +// `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t grid_sampler_compute_source_index_set_grad( + scalar_t coord, + int64_t size, + GridSamplerPadding padding_mode, + bool align_corners, + scalar_t *grad_in) { + scalar_t grad_clip, grad_refl; + coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_clip; + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl); + } else { + coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl); + } + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_refl * grad_clip; + } + return coord; +} + +static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) { + return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; +} + +template +static inline scalar_t get_value_bounded( + const scalar_t* data, + scalar_t x, + scalar_t y, + int64_t W, + int64_t H, + int64_t sW, + int64_t sH, + GridSamplerPadding padding_mode, + bool align_corners) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int64_t ix = static_cast(x); + int64_t iy = static_cast(y); + + if (within_bounds_2d(iy, ix, H, W)) { + return data[iy * sH + ix * sW]; + } + return static_cast(0); +} + +template +static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w, + int64_t sH, int64_t sW, int64_t H, int64_t W, + scalar_t delta) { + if (within_bounds_2d(h, w, H, W)) { + data[h * sH + w * sW] += delta; + } +} + +template +static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w, + int64_t sD, int64_t sH, int64_t sW, + int64_t D, int64_t H, int64_t W, + scalar_t delta) { + if (within_bounds_3d(d, h, w, D, H, W)) { + data[d * sD + h * sH + w * sW] += delta; + } +} + +template +static inline void add_value_bounded( + scalar_t* data, + scalar_t x, + scalar_t y, + int64_t W, + int64_t H, + int64_t sW, + int64_t sH, + scalar_t delta, + GridSamplerPadding padding_mode, + bool align_corners) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int64_t ix = static_cast(x); + int64_t iy = static_cast(y); + + safe_add_2d(data, iy, ix, sH, sW, H, W, delta); +} + +// Calculate the differential of the cubic convolution, i.e. `d coeff / d x` +template +static inline void get_cubic_coefficients_grad( + scalar_t coeffs[4], + scalar_t t) { + + // Must be the same as forward calculation in + // aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients + scalar_t A = -0.75; + + scalar_t x; + x = -1 - t; // 1 < x = |-1 - tx| < 2 + coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A; + x = -t; // x = |0 - tx| <= 1 + coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 1 - t; // x = |1 - tx| <= 1 + coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 2 - t; // 1 < x = |2 - tx| < 2 + coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A; +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/GridSamplerUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/GridSamplerUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..f783043c7961222302ed0ff2514ffbf138cafac2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/GridSamplerUtils.h @@ -0,0 +1,105 @@ +#pragma once + +// See NOTE: [Tensor vs. TensorBase] +// https://github.com/pytorch/pytorch/pull/66979 +#include +#include +#include + +namespace at::native { + +namespace detail { + +enum class GridSamplerInterpolation {Bilinear, Nearest, Bicubic}; +enum class GridSamplerPadding {Zeros, Border, Reflection}; + +} // namespace detail + +using detail::GridSamplerInterpolation; +using detail::GridSamplerPadding; + +// See NOTE [ grid_sampler Native Functions ]. +inline void check_grid_sampler_common( + const TensorBase& input, + const TensorBase& grid +) { + auto input_opt = input.options(); + auto grid_opt = grid.options(); + + TORCH_CHECK( + input.defined(), + "grid_sampler(): expected input to not be undefined"); + TORCH_CHECK( + grid.defined(), + "grid_sampler(): expected grid to not be undefined"); + TORCH_CHECK( + input_opt.device() == grid_opt.device(), + "grid_sampler(): expected input and grid to be on same device, but input " + "is on ", input_opt.device(), " and grid is on ", grid_opt.device()); + TORCH_CHECK( + input_opt.layout() == kStrided && grid_opt.layout() == kStrided, + "grid_sampler(): expected input and grid to have torch.strided layout, but " + "input has ", input_opt.layout(), " and grid has ", grid_opt.layout()); + TORCH_CHECK( + input.size(0) == grid.size(0), + "grid_sampler(): expected grid and input to have same batch size, but got " + "input with sizes ", input.sizes(), " and grid with sizes ", grid.sizes()); + TORCH_CHECK( + grid.size(-1) == input.dim() - 2, + "grid_sampler(): expected grid to have size ", input.dim() - 2, " in last " + "dimension, but got grid with sizes ", grid.sizes()); + + for (const auto i : c10::irange(2, input.dim())) { + TORCH_CHECK(input.size(i) > 0, + "grid_sampler(): expected input to have non-empty spatial dimensions, " + "but input has sizes ", input.sizes(), " with dimension ", i, " being " + "empty"); + } +} + +// See NOTE [ grid_sampler Native Functions ]. +inline void check_grid_sampler_2d( + const TensorBase& input, + const TensorBase& grid +) { + TORCH_CHECK( + input.dim() == 4 && input.dim() == grid.dim(), + "grid_sampler(): expected 4D input and grid with same number of " + "dimensions, but got input with sizes ", input.sizes(), + " and grid with sizes ", grid.sizes()); +} + +// See NOTE [ grid_sampler Native Functions ]. +inline void check_grid_sampler_3d( + const TensorBase& input, + const TensorBase& grid, + int64_t interpolation_mode +) { + TORCH_CHECK( + input.dim() == 5 && input.dim() == grid.dim(), + "grid_sampler(): expected 5D input and grid with same number of " + "dimensions, but got input with sizes ", input.sizes(), + " and grid with sizes ", grid.sizes()); + TORCH_CHECK( + !(input.dim() == 5 && + static_cast(interpolation_mode) == + GridSamplerInterpolation::Bicubic), + "grid_sampler(): bicubic interpolation only supports 4D input"); +} + +// See NOTE [ grid_sampler Native Functions ]. +// cudnn does not support inputs larger than 1024. +inline bool cond_cudnn_grid_sampler( + const TensorBase& input, + const TensorBase& grid +) { + return ( + at::native::cudnn_is_acceptable(input) && + at::native::cudnn_is_acceptable(grid) && + at::native::canUse32BitIndexMath(input) && + at::native::canUse32BitIndexMath(grid) && + input.dim() == 4 && + input.sym_size(1) <= 1024); +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..8b0a787f0e87a64bced49d9c920ebd66262bd26a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h @@ -0,0 +1,41 @@ +#pragma once +#include +#include + +namespace at { +class Tensor; +class TensorBase; +struct TensorIterator; +struct TensorIteratorBase; +} + +namespace c10 { +class Scalar; +} + +namespace at::native { + +using index_fn = void(*)(TensorIteratorBase &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides); +using index_fill_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source); +using index_copy_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride); +using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides, bool accumulate); +using put_fn = void(*)(TensorIterator & iter, const TensorBase& self, const bool accumulate); +using take_fn = void(*)(TensorIterator & iter, const TensorBase& input); +using flip_fn = void(*)(TensorIterator &, const bool); +using masked_fill_fn = void(*)(TensorIterator &, const Scalar& scalar); +using masked_select_fn = void(*)(TensorIterator &, int64_t orig_stride); +using masked_scatter_fn = void(*)(TensorIterator &, const TensorBase &); + +DECLARE_DISPATCH(index_fn, index_stub); +DECLARE_DISPATCH(index_fill_fn, index_fill_stub); +DECLARE_DISPATCH(index_copy_fn, index_copy_stub); +DECLARE_DISPATCH(index_put_fn, index_put_stub); +DECLARE_DISPATCH(put_fn, put_stub); +DECLARE_DISPATCH(take_fn, take_stub); +DECLARE_DISPATCH(flip_fn, flip_stub); +DECLARE_DISPATCH(masked_fill_fn, masked_fill_stub); +DECLARE_DISPATCH(masked_select_fn, masked_select_serial_stub); +DECLARE_DISPATCH(masked_select_fn, masked_select_stub); +DECLARE_DISPATCH(masked_scatter_fn, masked_scatter_stub); + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h new file mode 100644 index 0000000000000000000000000000000000000000..6db4f60b88ea1e19a1cc744f9f8c1e7b31c66a82 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include +#include + +namespace at::native { + +template +C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(scalar_t weight) { + return std::abs(weight) < scalar_t(0.5); +} +template +C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(c10::complex weight) { + // Avoid the sqrt in abs(weight) + return (weight.real() * weight.real() + weight.imag() * weight.imag()) < scalar_t(0.25); +} + +template +C10_HOST_DEVICE C10_ALWAYS_INLINE scalar_t lerp(scalar_t self_, scalar_t end_, weight_t weight_) { + using opmath_t = at::opmath_type; + using opmath_weight_t = at::opmath_type; + + opmath_t self = self_; + opmath_t end = end_; + opmath_weight_t weight = weight_; + + // Conditional for better numeric. This has been discussed in + // https://github.com/pytorch/pytorch/pull/18871 + return is_lerp_weight_small(weight) + ? self + weight * (end - self) + : end - (end - self) * (opmath_t(1) - weight); +} + +using lerp_fn_scalar = void (*)( + at::TensorIteratorBase& iter, + const Scalar& weight); + +using lerp_fn_tensor = void (*)( + at::TensorIteratorBase& iter); + +DECLARE_DISPATCH(lerp_fn_scalar, lerp_kernel_scalar_weight); +DECLARE_DISPATCH(lerp_fn_tensor, lerp_kernel_tensor_weight); + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..48d9c31129654815c0542f63bf9d1d9cb9ab1dd3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h @@ -0,0 +1,623 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#include +#include +#include +#endif + +namespace at::native { + +inline c10::MaybeOwned expect_resolved_conj(const Tensor& tensor) { + if (tensor.is_conj()) { + return c10::MaybeOwned::owned(tensor.resolve_conj()); + } else { + return c10::MaybeOwned::borrowed(tensor); + } +} + +inline DimVector batched_matrix_contiguous_strides( + const IntArrayRef sizes, + const bool f_contig = false) { + // f_contig chooses between the strides of a batch of Fortran (F-contiguous) + // and C-contiguous matrices + auto strides = c10::contiguous_strides(sizes); + auto dim = strides.size(); + + if (f_contig && dim >= 2) { + // Fix the strides of the last two dimensions, so that we return + // C-contiguous batches of F-contiguous matrices. + strides[dim - 1] = std::max(sizes[dim - 2], static_cast(1)); + strides[dim - 2] = 1; + } + return strides; +} + +/* + * Clones a Tensor so that the following conditions hold: + * If we think of a Tensor of having size (B, M, N), where B is any number + * of batch dimensions, then: + * - Each (M, N) matrix is in column major form + * - Let Tensor P have size (B, M, N) and Q have size (B, M', N'). + * Then when laid out in memory, the M by N matrix starting at + * P.data_ptr()[B * M * N] is of the same corresponding batch as the M' by N' + * matrix starting at Q.data_ptr()[B * M' * N']. + */ +inline Tensor cloneBatchedColumnMajor(const Tensor& src) { + // If src is already in batched column major format, then + // this will be efficient (no reordering of the data will occur) + // because the first transpose will make the tensor contiguous, + // and cloning a contiguous tensor is fast. + auto result = src.mT().clone(at::MemoryFormat::Contiguous); + result.transpose_(-2, -1); + return result; +} + +/* + * contig chooses between C-contig (true) and F-contig (false) + */ +inline c10::MaybeOwned borrow_else_clone(const bool cond, const Tensor& borrow, const Tensor& clone, const bool contig) { + return cond ? c10::MaybeOwned::borrowed(borrow) + : c10::MaybeOwned::owned(contig ? clone.clone(MemoryFormat::Contiguous) + : cloneBatchedColumnMajor(clone)); +} + +/* + * This method is designed to be a faster alternative to + * `cloneBatchedColumnMajor` with some additional features, + * namely: + * 1. It uses `copy` instead of `clone` which could be much faster. + * 2. `nrows` parameter used to create inputs with the number of rows larger + * than the original input, which is required for some LAPACK/MAGMA methods. + * 3. `desired_batch_size` is used to create copies with the batch size + * which is either the original batch size of the input, or its larger + * broadcasted shape. + */ +inline Tensor copyBatchedColumnMajor(const Tensor& src, int64_t nrows = -1, + at::OptionalIntArrayRef desired_batch_sizes = std::nullopt) { + nrows = (nrows == -1) ? src.size(-2) : nrows; + auto copy_sizes = desired_batch_sizes.has_value() + ? desired_batch_sizes.value().vec() + : IntArrayRef(src.sizes().data(), src.dim() - 2).vec(); + copy_sizes.insert(copy_sizes.end(), {nrows, src.size(-1)}); + const auto copy_strides = batched_matrix_contiguous_strides(copy_sizes, /*f-contig*/true); + auto copy = at::empty_strided(copy_sizes, copy_strides, src.options()); + copy.narrow(-2, 0, src.size(-2)).copy_(src); + return copy; +} + +/* + * Given batches of matrices with arbitrary batch dim, + * computes the number of batches. + */ +inline int64_t batchCount(const Tensor& batched_matrices) { + int64_t result = 1; + for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) { + result *= batched_matrices.size(i); + } + return result; +} + +// Computes the number of elements of a matrix in a batched matrix tensor +inline int64_t matrixStride(const Tensor& batched_matrices) { + return batched_matrices.size(-1) * batched_matrices.size(-2); +} + +// Validates input shapes for operations on batches of square matrices (inverse, cholesky, symeig, eig) +inline void checkIsMatrix(const Tensor& A, const char* const f_name, const char* const arg_name = "A") { + TORCH_CHECK(A.dim() >= 2, f_name, ": The input tensor ", arg_name, " must have at least 2 dimensions."); +} +inline void squareCheckInputs(const Tensor& self, const char* const f_name, const char* const arg_name = "A") { + checkIsMatrix(self, f_name, arg_name); + TORCH_CHECK(self.sym_size(-1) == self.sym_size(-2), + f_name, + ": ", arg_name, " must be batches of square matrices, " + "but they are ", self.sym_size(-2), " by ", self.sym_size(-1), " matrices"); +} + +inline void checkInputsSolver(const Tensor& A, + const Tensor& B, + const bool left, + const char* const f_name) { + squareCheckInputs(A, f_name, "A"); + checkIsMatrix(B, f_name, "B"); + TORCH_CHECK(left ? A.size(-2) == B.size(-2) : A.size(-1) == B.size(-1), + f_name, ": Incompatible shapes of A and B for the equation ", + left ? "AX = B" : "XA = B", + " (", A.size(-2), "x", A.size(-1), " and ", B.size(-2), "x", B.size(-1), ")"); +} + +inline bool is_row_or_column_contiguous(const Tensor& t) { + // This could be made more general, similar to how it's checked in matmul, which would allow to + // ellide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky. + // We choose to be conservative for simplicity + return t.is_contiguous() || t.transpose(-2, -1).is_contiguous(); +} + +inline TransposeType to_transpose_type(const bool contig, const bool conj) { + if (conj) { + if (contig) { TORCH_INTERNAL_ASSERT(false, "Invalid transpose type"); } + else { return TransposeType::ConjTranspose; } + } else { + if (contig) { return TransposeType::NoTranspose; } + else { return TransposeType::Transpose; } + } +} + + +// This function is designed to be used with linear algebra methods that minimize +// L(ax - b) = 0, where L is generally the identity map (`solve`, for example) +// or the L2 norm (`lstsq`). +// It is expected that `a` and `b` are contiguous tensors of column-major matrices +// (so that a.view({-1, a.size(-2), a.size(-1)}) succeeds, same for `b`), +// with the following additional properties: +// +// 1. a.dim() == b.dim() +// 2. a.shape[:-2] broadcasts over b.shape[:-2] +// 3. a.size(i) <= b.size(i) for i=0,..., a.dim() - 3 (only for batch dimensions) +// +// MAGMA/LAPACK modify tensor `a` in-place, and the main goal of this method +// is to be memory efficient, which means that if there exists an index i such that +// a.shape[i] < b.shape[i], 0 <= i <= a.dim() - 3, +// then instead of materializing copies of `a` in the broadcasted shape, we keep +// a buffer copy of `a` along with flags that check whether specific batch dimension +// indices for `a` were already accessed. If they were, we copy the data from the buffer +// into `a`. The number of copies does not exceed +// prod(max(a.shape[:-2], b.shape[:-2]) - a.shape[:-2] + 1) +// and this value is attained by tensors with non-empty batch dimensions. +// +// func_t `f` is a callable that is being supplied with +// scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx. +// a_working_ptr and b_working_ptr can directly be passed to LAPACK/MAGMA routines, +// and a_linear_batch_idx is an index in the 3d representation which corresponds to +// the memory a_working_ptr points to, in other words: +// a_working_ptr == a.view({-1, a.size(-2), a.size(-1)}.select(0, a_linear_batch_idx).data_ptr(); +// a_linear_batch_idx is useful to store metadata related to `a`, such as, for example, +// its rank or singular values (see linalg_lstsq). +template +void batch_iterator_with_broadcasting(const Tensor& a, const Tensor& b, const func_t& f) { + IntArrayRef a_batch_sizes(a.sizes().data(), a.dim() - 2); + IntArrayRef b_batch_sizes(b.sizes().data(), b.dim() - 2); + + auto a_linear_batch_idx = at::arange(batchCount(a)).view(a_batch_sizes); + auto b_linear_batch_idx = at::arange(batchCount(b)).view(b_batch_sizes); + + TensorIterator iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_output(b_linear_batch_idx) + .add_input(a_linear_batch_idx) + .build(); + + auto m = a.size(-2); + auto n = a.size(-1); + auto a_3d = a.view({batchCount(a), m, n}); + auto b_3d = b.view({batchCount(b), b.size(-2), b.size(-1)}); + + auto a_broadcasts_over_b = (a_batch_sizes != b_batch_sizes); + Tensor a_buffer, a_was_accessed, a_buffer_3d; + std::function check_if_copy_needed_for_a + = [](int64_t /*a_curr_linear_batch_idx*/){}; + if (a_broadcasts_over_b) { + a_buffer = at::empty_strided(a.sizes(), a.strides(), a.options()) + .copy_(a); + a_was_accessed = at::zeros(batchCount(a), at::kBool); + a_buffer_3d = a_buffer.view({batchCount(a), m, n}); + check_if_copy_needed_for_a = [&](int64_t a_curr_linear_batch_idx) { + auto* a_was_accessed_flag = a_was_accessed + .select(0, a_curr_linear_batch_idx) + .data_ptr(); + if (!(*a_was_accessed_flag)) { + *a_was_accessed_flag = true; + } + else { + a_3d.select(0, a_curr_linear_batch_idx) + .copy_(a_buffer_3d.select(0, a_curr_linear_batch_idx)); + } + }; + } + + auto loop = [&](char** data, const int64_t* strides, int64_t nelems) { + auto* b_batch_idx_ptr = data[0]; + auto* a_batch_idx_ptr = data[1]; + + for (const auto elem C10_UNUSED : c10::irange(nelems)) { + auto b_curr_linear_batch_idx = *reinterpret_cast(b_batch_idx_ptr); + auto a_curr_linear_batch_idx = *reinterpret_cast(a_batch_idx_ptr); + + check_if_copy_needed_for_a(a_curr_linear_batch_idx); + + auto* a_working_ptr = a_3d.select(0, a_curr_linear_batch_idx) + .data_ptr(); + auto* b_working_ptr = b_3d.select(0, b_curr_linear_batch_idx) + .data_ptr(); + f(a_working_ptr, b_working_ptr, a_curr_linear_batch_idx); + + b_batch_idx_ptr += strides[0]; + a_batch_idx_ptr += strides[1]; + } + }; + iter.serial_for_each(loop, {0, batchCount(b)}); +} + +// Returns the epsilon value for floating types except half +inline double _get_epsilon(const ScalarType& sc_type) { + switch (sc_type) { + case at::ScalarType::Float: + return static_cast(std::numeric_limits::epsilon()); + case at::ScalarType::Double: + return std::numeric_limits::epsilon(); + default: + AT_ERROR("This function doesn't handle types other than float and double"); + } +} + +// Validates input shapes and devices +// for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve) +inline void linearSolveCheckInputs(const Tensor& self, const Tensor& A, const char* name) { + TORCH_CHECK(self.device() == A.device(), + "Expected b and A to be on the same device, but found b on ", + self.device(), " and A on ", A.device(), " instead."); + + TORCH_CHECK(self.scalar_type() == A.scalar_type(), + "Expected b and A to have the same dtype, but found b of type ", + self.scalar_type(), " and A of type ", A.scalar_type(), " instead."); + + TORCH_CHECK(A.size(-1) == A.size(-2), + "A must be batches of square matrices, " + "but they are ", A.size(-2), " by ", A.size(-1), " matrices"); + + TORCH_CHECK(A.size(-1) == self.size(-2), + "Incompatible matrix sizes for ", name, ": each A " + "matrix is ", A.size(-1), " by ", A.size(-1), + " but each b matrix is ", self.size(-2), " by ", self.size(-1)); +} + +inline void checkFloatingOrComplex(const Tensor& t, const char* const f_name, const bool allow_low_precision_dtypes=true) { + auto dtype = t.scalar_type(); + TORCH_CHECK((at::isFloatingType(dtype) || at::isComplexType(dtype)), + f_name, ": Expected a floating point or complex tensor as input. Got ", dtype); + if (!allow_low_precision_dtypes) { + TORCH_CHECK(dtype == kFloat || dtype == kDouble || dtype == kComplexFloat || dtype == kComplexDouble, + f_name, ": Low precision dtypes not supported. Got ", dtype); + } +} + + +// Checks if all the Tensors in a TensorList are of the same dimensions +inline void checkAllSameDim(TensorList tensors, int64_t dim) { + for (auto &t : tensors) { + TORCH_CHECK(t.dim() == dim, "Tensor dimension is ", t.dim(), ", expected ", dim, " instead."); + } +} + +inline std::tuple, std::vector> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2) { + // broadcast the batch dimensions of arg1 and arg2. + IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2); + IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2); + std::vector expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes); + + std::vector arg1_expand_size({expand_batch_portion}); + arg1_expand_size.insert(arg1_expand_size.end(), { arg1.size(-2), arg1.size(-1) }); + + std::vector arg2_expand_size({expand_batch_portion}); + arg2_expand_size.insert(arg2_expand_size.end(), { arg2.size(-2), arg2.size(-1) }); + return std::make_tuple(std::move(arg1_expand_size), std::move(arg2_expand_size)); +} + +inline std::tuple _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2, const char* name) { + // If there's no name we assume we don't want to check the errors + if (name != nullptr) { + linearSolveCheckInputs(arg1, arg2, name); + } + + auto [arg1_expand_size, arg2_expand_size] = at::native::_linalg_broadcast_batch_dims(arg1, arg2); + + auto arg1_broadcasted = arg1_expand_size == arg1.sizes() ? arg1 : arg1.expand(arg1_expand_size); + auto arg2_broadcasted = arg2_expand_size == arg2.sizes() ? arg2 : arg2.expand(arg2_expand_size); + return std::make_tuple(arg1_broadcasted, arg2_broadcasted); +} + +inline std::vector broadcast_batch_size(const Tensor& t1, const Tensor& t2, int64_t n_batch_dims) { + IntArrayRef t1_batch_sizes(t1.sizes().data(), n_batch_dims); + IntArrayRef t2_batch_sizes(t2.sizes().data(), n_batch_dims); + auto broadcasted_batch_sizes = infer_size(t1_batch_sizes, t2_batch_sizes); + return broadcasted_batch_sizes; +} + +// Return a permutation with the given axes moved to the end. +inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) { + const std::vector a = axes.vec(); + const int64_t ndim = self.ndimension(); + std::vector perm; + + for (const auto i : c10::irange(ndim)) { + auto it = std::find(a.begin(), a.end(), i); + if (it == a.end()) { + perm.push_back(i); + } + } + for (auto i : a) { + perm.push_back(i); + } + + TORCH_CHECK((int64_t)perm.size() == ndim, + "duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim); + + return self.permute(perm); +} + +// parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced) +inline std::tuple _parse_qr_mode(c10::string_view mode) { + bool compute_q; + bool reduced; + if (mode == "reduced") { + compute_q = true; + reduced = true; + } else if (mode == "complete") { + compute_q = true; + reduced = false; + } else if (mode == "r") { + compute_q = false; + reduced = true; // this is actually irrelevant in this mode + } else { + TORCH_CHECK(false, "qr received unrecognized mode '", mode, + "' but expected one of 'reduced' (default), 'r', or 'complete'"); + } + return std::make_tuple(compute_q, reduced); +} + +// Function to compute sizes, strides and the extra columns for the Q matrix in the QR Decomposition +inline std::tuple _compute_geometry_for_Q( + const Tensor& input, + bool reduced) { + int64_t m = input.size(-2), n = input.size(-1); + int64_t n_columns_q; + + // We need to compute the required size of Q based on the `reduced` option + DimVector q_sizes(input.sizes()); + if (!reduced && m > n) { + q_sizes[input.dim() - 1] = m; + n_columns_q = m; + } else { + q_sizes[input.dim() - 1] = n; + n_columns_q = std::min(m, n); + } + auto q_strides = batched_matrix_contiguous_strides(q_sizes, /*f-contig*/true); + return std::make_tuple(q_sizes, q_strides, n_columns_q); +} + +inline bool svd_uses_cusolver(const Tensor& A) { + // if cusolver is available, it is used unconditionally + return A.is_cuda() + && at::globalContext().hasCuSOLVER() + && at::globalContext().linalgPreferredBackend() != at::LinalgBackend::Magma; +} + + +// Function used instead of .to so that the original strides are retained +// .to doesn't retain strides and make the output tensor contiguous +inline Tensor same_stride_to(const Tensor& original_tensor, const at::TensorOptions& options) { + auto strided_to = at::empty_strided(original_tensor.sizes(), + original_tensor.strides(), + options); + strided_to.copy_(original_tensor); + return strided_to; +} + +// Creates a dimension permutation array that can be given to `at::permute()`, which will shift +// the two specified dimensions to the end of a tensor, without changing the order of +// the other dimensions. `dim1` will be placed at the very end, and `dim0` will be +// placed just to the left of it. +// +// For instance, given a 4-D tensor, dimensions 1 and 3 can be shifted to the end by +// calling `create_dim_backshift_permutation(1, 3, 4)`. The resulting vector will +// be `vec(0, 2, 1, 3)`. +inline std::vector create_dim_backshift_permutation(int64_t dim0, int64_t dim1, int64_t ndim) { + TORCH_CHECK( + (dim0 != dim1) && (dim0 < ndim) && (dim0 >= 0) && (dim1 < ndim) && (dim1 >= 0), + "duplicate or invalid dimensions"); + std::vector permutation(ndim); + int64_t cur_permuted_dim = 0; + for (const auto dim_ind : c10::irange(ndim)) { + if ((dim_ind != dim0) && (dim_ind != dim1)) { + permutation[cur_permuted_dim++] = dim_ind; + } + } + permutation[cur_permuted_dim++] = dim0; + permutation[cur_permuted_dim] = dim1; + return permutation; +} + +// Creates a dimension permutation array that can be given to `at::permute()`, which +// will reverse a given permutation. +// The reverse permutation array is created by swapping the indices and their +// associated values from the given permutation array. +inline std::vector create_reverse_permutation(std::vector permutation) { + int64_t ndim = permutation.size(); + std::vector reverse_permutation(ndim); + for (const auto dim_ind : c10::irange(ndim)) { + reverse_permutation[permutation[dim_ind]] = dim_ind; + } + return reverse_permutation; +} + +// Compute R-work array size for MAGMA/LAPACK cgesdd/zgesdd +// See https://github.com/Reference-LAPACK/lapack/blob/122506cd8b6ce050a200920c3d4c0b153b150fd8/SRC/cgesdd.f#L186 +inline int64_t computeLRWorkDim(const char jobz, int64_t m, int64_t n) { + auto mn = std::min(m, n); + auto mx = std::max(m, n); + if (jobz == 'N') { +#ifdef __APPLE__ + // According to `vecLib.framework/Headers/clapack.h` Accelerate.framework is based on LAPACK 3.2.1 + return 7 * mn; +#else + // These setting is valid for on LAPACK 3.6+ + return 5 * mn; +#endif + } + if (mx > 10 * mn) { + return 5 * mn * mn + 5 * mn; + } + return std::max(5 * mn * mn + 5 * mn, 2 * mx * mn + 2 * mn * mn + mn); +} + +// This function checks whether the uplo argument input is valid +// Allowed strings are "u", "U", "l", "L" +inline void checkUplo(const c10::string_view uplo) { + // To use std::toupper safely with plain chars (or signed chars), the argument should first be converted to unsigned char + char uplo_uppercase = static_cast(std::toupper(static_cast(uplo[0]))); + TORCH_CHECK(uplo.size() == 1 && (uplo_uppercase == 'U' || uplo_uppercase == 'L'), + "Expected UPLO argument to be 'L' or 'U', but got ", uplo); +} + +inline void checkSameDevice(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") { + TORCH_CHECK( + result.device() == input.device(), + fn_name, + ": Expected ", result_name, " and input tensors to be on the same device, but got ", + result_name, " on ", result.device(), " and input on ", input.device()); +} + +// Check the dtype of result and input tensors (for _out variants). +// Most linear algebra functions have the same dtype for input and output +// (either floating or complex type input), so we can check whether input's dtype can be casted to result's dtype. +// According to https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch +// c10::canCast is used for checking the "safe copy" dtype requirements. +inline void checkLinalgCompatibleDtype(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") { + bool can_cast = c10::canCast(input.scalar_type(), result.scalar_type()); + TORCH_CHECK( + can_cast, + fn_name, + ": Expected ", result_name, " to be safely castable from ", input.scalar_type(), " dtype, but got ", + result_name, " with dtype ", result.scalar_type()); +} + +// Alternatively, we can check whether the specific expected output type (result_type) can be safely casted to out tensor dtype (out_type) +inline void checkLinalgCompatibleDtype(const std::string& fn_name, ScalarType out_type, ScalarType result_type, const std::string& out_name = "result") { + bool can_cast = c10::canCast(result_type, out_type); + TORCH_CHECK( + can_cast, + fn_name, + ": Expected ", out_name, " to be safely castable from ", result_type, " dtype, but got ", + out_name, " with dtype ", out_type); +} + +inline void checkNotComplexTolerance(const Tensor& tol, const c10::string_view f_name, const c10::string_view tol_name) { + TORCH_CHECK(!at::isComplexType(tol.scalar_type()), + f_name, ": ", tol_name, " tensor of complex type is not supported. Got ", tol.scalar_type()); +} + +/* + Two types of 'other' tensors are supported when solving + a system of linear equations matmul(input, x) = other: + * 1-dimensional (1D) tensor or batch of 1D tensors (vector case) + * 2-dimensional (2D) tensor or batch of 2D tensors (matrix case). + The original torch.solve supported only the matrix case, while NumPy works for both cases. + For the batched input we need to be able to distinguish them. + Let input.shape = (batch_dimensions, m, n), then 'other' is of vector type if other.shape == (batch_dimensions, m). + This rule is compatible with NumPy, see https://github.com/numpy/numpy/blob/v1.20.0/numpy/linalg/linalg.py#L384-L389 +*/ +inline bool linalg_solve_is_vector_rhs(const Tensor& input, const Tensor& other) { + auto expected_batched_rhs_shape = SymIntArrayRef(input.sym_sizes().data(), input.dim() - 1); // input.shape[:-1] + bool vector_case = other.dim() == 1 || (input.dim() - 1 == other.dim() && other.sym_sizes().equals(expected_batched_rhs_shape)); + return vector_case; +} + +/* + Computes linear indices for a tensor with original_shape to access its elements like it was a materialized broadcast tensor. +*/ +inline Tensor get_linear_indices(int64_t numel, IntArrayRef original_shape, IntArrayRef broadcast_shape) { + TensorOptions options = at::TensorOptions().dtype(at::kLong).device(at::kCPU); + return at::arange(numel, options).view(original_shape).broadcast_to(broadcast_shape).contiguous(); +} + +class BroadcastLinearIndices { + private: + Tensor linear_indices_; + bool is_broadcasting_; + + public: + BroadcastLinearIndices( + int64_t numel, + IntArrayRef original_shape, + IntArrayRef broadcast_shape) : is_broadcasting_(!original_shape.equals(broadcast_shape)) { + // The assumption is that the broadcast_shape is a materialized broadcast + // shape of the original_shape. We need to compute the linear indices + // compatible with the original_shape to access the elements in the original + // tensor corresponding to the broadcast tensor. + if (is_broadcasting_) { + linear_indices_ = + get_linear_indices(numel, original_shape, broadcast_shape); + } + } + int64_t operator()(int64_t broadcast_linear_index) { + return is_broadcasting_ + ? linear_indices_.data_ptr()[broadcast_linear_index] + : broadcast_linear_index; + } +}; + +inline bool is_blas_compatible_column_major_order(const Tensor& input) { + IntArrayRef input_strides = input.strides(); + IntArrayRef input_sizes = input.sizes(); + auto ndim = input.dim(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2); + if (ndim > 3) { + return input.transpose(-2, -1).is_contiguous(); + } + auto leading_dimension = input_strides[ndim - 1]; + auto rows = input_sizes[ndim - 2]; + bool batch_stride_compatible = true; + if (ndim == 3) { + auto cols = input_sizes[ndim - 1]; + batch_stride_compatible = + input_strides[ndim - 3] >= leading_dimension * cols; + } + return (input_strides[ndim - 2] == 1) && + (leading_dimension >= std::max(1, rows)) && + batch_stride_compatible; +} + +inline bool is_blas_compatible_row_major_order(const Tensor& input) { + IntArrayRef input_strides = input.strides(); + IntArrayRef input_sizes = input.sizes(); + auto ndim = input.dim(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2); + if (ndim > 3) { + return input.is_contiguous(); + } + auto leading_dimension = input_strides[ndim - 2]; + auto cols = input_sizes[ndim - 1]; + bool batch_stride_compatible = true; + if (ndim == 3) { + auto rows = input_sizes[ndim - 2]; + batch_stride_compatible = + input_strides[ndim - 3] >= leading_dimension * rows; + } + return (input_strides[ndim - 1] == 1) && + (leading_dimension >= std::max(1, cols)) && + batch_stride_compatible; +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Math.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Math.h new file mode 100644 index 0000000000000000000000000000000000000000..e86a9aea411af523af3c94e8a3ec03f61da47d6e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Math.h @@ -0,0 +1,3901 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion") +#endif + +/* The next function is taken from https://github.com/antelopeusersgroup/antelope_contrib/blob/master/lib/location/libgenloc/erfinv.c. +Below is the copyright. +Output was modified to be inf or -inf when input is 1 or -1. */ + + +/* + Copyright (c) 2014 Indiana University + All rights reserved. + + Written by Prof. Gary L. Pavlis, Dept. of Geol. Sci., + Indiana University, Bloomington, IN + + This software is licensed under the New BSD license: + + Redistribution and use in source and binary forms, + with or without modification, are permitted provided + that the following conditions are met: + + Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + Redistributions in binary form must reproduce the + above copyright notice, this list of conditions and + the following disclaimer in the documentation and/or + other materials provided with the distribution. + + Neither the name of Indiana University nor + the names of its contributors may be used to endorse + or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED + WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +namespace { +/* + * This function is derived from the implementation of the i0e function in the + * Cephes Math Library. See note [3-Clause BSD License for the Cephes Math + * Library]. + * + * Computes an approximation of the exponentially scaled zeroth order modified + * Bessel function of the first kind. The approximation is actually two + * (sub)approximations, both using a Chebyshev polynomial expansion. One + * approximates the function over [0, 8], and the other over (8, infinity). This + * function takes the absolute value of all inputs to convert them into the + * domain of the approximation. + */ +jiterator_also_stringify_as(jiterator_code( + template + JITERATOR_HOST_DEVICE T chbevl(T x, const T array[], const int len) { + T b0, b1, b2; + + b0 = array[0]; + b1 = 0; + + for (int i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = x * b1 - b2 + array[i]; + } + + return T{0.5} * (b0 - b2); + } + + template + JITERATOR_HOST_DEVICE T calc_i0e(T _x) { + T x = std::fabs(_x); + + if (x <= T{8.0}) { + static const T coefficients[] = { + -4.41534164647933937950E-18, 3.33079451882223809783E-17, + -2.43127984654795469359E-16, 1.71539128555513303061E-15, + -1.16853328779934516808E-14, 7.67618549860493561688E-14, + -4.85644678311192946090E-13, 2.95505266312963983461E-12, + -1.72682629144155570723E-11, 9.67580903537323691224E-11, + -5.18979560163526290666E-10, 2.65982372468238665035E-9, + -1.30002500998624804212E-8, 6.04699502254191894932E-8, + -2.67079385394061173391E-7, 1.11738753912010371815E-6, + -4.41673835845875056359E-6, 1.64484480707288970893E-5, + -5.75419501008210370398E-5, 1.88502885095841655729E-4, + -5.76375574538582365885E-4, 1.63947561694133579842E-3, + -4.32430999505057594430E-3, 1.05464603945949983183E-2, + -2.37374148058994688156E-2, 4.93052842396707084878E-2, + -9.49010970480476444210E-2, 1.71620901522208775349E-1, + -3.04682672343198398683E-1, 6.76795274409476084995E-1}; + + T y = (x / T{2.0}) - T{2.0}; + return chbevl(y, coefficients, int{30}); + } + + // x > 8 + static const T coefficients[] = { + -7.23318048787475395456E-18, -4.83050448594418207126E-18, + 4.46562142029675999901E-17, 3.46122286769746109310E-17, + -2.82762398051658348494E-16, -3.42548561967721913462E-16, + 1.77256013305652638360E-15, 3.81168066935262242075E-15, + -9.55484669882830764870E-15, -4.15056934728722208663E-14, + 1.54008621752140982691E-14, 3.85277838274214270114E-13, + 7.18012445138366623367E-13, -1.79417853150680611778E-12, + -1.32158118404477131188E-11, -3.14991652796324136454E-11, + 1.18891471078464383424E-11, 4.94060238822496958910E-10, + 3.39623202570838634515E-9, 2.26666899049817806459E-8, + 2.04891858946906374183E-7, 2.89137052083475648297E-6, + 6.88975834691682398426E-5, 3.36911647825569408990E-3, + 8.04490411014108831608E-1}; + + return chbevl(T{32.0} / x - T{2.0}, coefficients, int{25}) / std::sqrt(x); + }), + i0e_string); // i0e_string +} + +#define CENTRAL_RANGE 0.7 + +template +inline typename std::enable_if::value, T>::type +calc_erfinv(T y) { +/* Function to calculate inverse error function. Rational approximation +is used to generate an initial approximation, which is then improved to +full accuracy by two steps of Newton's method. Code is a direct +translation of the erfinv m file in matlab version 2.0. +Author: Gary L. Pavlis, Indiana University +Date: February 1996 +*/ + T x, z, num, dem; /*working variables */ + /* coefficients in rational expansion */ + T a[4] = { T(0.886226899), T(-1.645349621), T(0.914624893), T(-0.140543331) }; + T b[4] = { T(-2.118377725), T(1.442710462), T(-0.329097515), T(0.012229801) }; + T c[4] = { T(-1.970840454), T(-1.624906493), T(3.429567803), T(1.641345311) }; + T d[2] = { T(3.543889200), T(1.637067800) }; + T y_abs = std::abs(y); + if(y_abs > 1.0) return std::numeric_limits::quiet_NaN(); +#ifdef _WIN32 + // error C2039: '_copysign': is not a member of 'std' + if(y_abs == 1.0) return copysign(std::numeric_limits::infinity(), y); +#else + if(y_abs == 1.0) return std::copysign(std::numeric_limits::infinity(), y); +#endif + if(y_abs <= static_cast(CENTRAL_RANGE)) { + z = y * y; + num = (((a[3]*z + a[2])*z + a[1])*z + a[0]); + dem = ((((b[3]*z + b[2])*z + b[1])*z +b[0]) * z + static_cast(1.0)); + x = y * num / dem; + } + else{ + z = std::sqrt(-std::log((static_cast(1.0)-y_abs)/static_cast(2.0))); + num = ((c[3]*z + c[2])*z + c[1]) * z + c[0]; + dem = (d[1]*z + d[0])*z + static_cast(1.0); +#ifdef _WIN32 + // error C2039: '_copysign': is not a member of 'std' + x = copysign(num, y) / dem; +#else + x = std::copysign(num, y) / dem; +#endif + } + /* Two steps of Newton-Raphson correction */ + x = x - (std::erf(x) - y) / ((static_cast(2.0)/static_cast(std::sqrt(c10::pi)))*std::exp(-x*x)); + x = x - (std::erf(x) - y) / ((static_cast(2.0)/static_cast(std::sqrt(c10::pi)))*std::exp(-x*x)); + + return(x); +} + +#undef CENTRAL_RANGE + +/* + * Note [3-Clause BSD License for the Cephes Math Library] + * Code derived from implementations in the Cephes Math Library should mention its derivation and reference + * this note (ex. 'This function is derived from the implementation of X in the Cephes Math Library. See note + * [3-Clause BSD License for the Cephes Math Library]. The license is: + * Copyright (c) 2018, Steven Moshier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Steven Moshier BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This function is derived from the implementation of the zeta function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +template +C10_HOST_DEVICE inline scalar_t zeta(scalar_t x, scalar_t q) __ubsan_ignore_float_divide_by_zero__ { + using acc_t = at::acc_type; + const acc_t MACHEP = acc_t{1.11022302462515654042E-16}; + constexpr acc_t zero = acc_t{0.0}; + constexpr acc_t half = acc_t{0.5}; + constexpr acc_t one = acc_t{1.0}; + static const acc_t A[] = { + 12.0, + -720.0, + 30240.0, + -1209600.0, + 47900160.0, + -1.8924375803183791606e9, /*1.307674368e12/691*/ + 7.47242496e10, + -2.950130727918164224e12, /*1.067062284288e16/3617*/ + 1.1646782814350067249e14, /*5.109094217170944e18/43867*/ + -4.5979787224074726105e15, /*8.028576626982912e20/174611*/ + 1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/ + -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/ + }; + + int i = 0; + acc_t a, b, k, s, t, w; + if (x == one) { + return std::numeric_limits::infinity(); + } + + if (x < one) { + return std::numeric_limits::quiet_NaN(); + } + + if (q <= zero) { + if (q == std::floor(q)) { + return std::numeric_limits::infinity(); + } + if (x != std::floor(x)) { + return std::numeric_limits::quiet_NaN(); + } + } + + s = std::pow(q, -x); + a = q; + i = 0; + b = zero; + while ((i < 9) || (a <= acc_t{9.0})) { + i += 1; + a += one; + b = ::pow(a, -x); + s += b; + if ((-MACHEP * s < b) && (b < MACHEP * s)) { + return static_cast(s); + } + }; + + w = a; + s += b * w / (x - one); + s -= half * b; + a = one; + k = zero; + for (int i = 0; i < 12; i++) { + a *= x + k; + b /= w; + t = a * b / A[i]; + s = s + t; + t = ::fabs(t / s); + if (t < MACHEP) { + return static_cast(s); + } + k += one; + a *= x + k; + b /= w; + k += one; + } + return static_cast(s); +} + +/* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Evaluates polynomial of degree N: + * + * 2 N + * y = C + C x + C x +...+ C x + * 0 1 2 N + * + * Coefficients are stored in reverse order: + * + * coef[0] = C , ..., coef[N] = C . + * N 0 + */ +template +C10_HOST_DEVICE inline T polevl(const T x, const T A[], size_t len) { + T result = 0; + for (size_t i = 0; i <= len; i++) { + result = result * x + A[i]; + } + return result; +} + +inline double trigamma(double x) __ubsan_ignore_float_divide_by_zero__ { + double sign = +1; + double result = 0; + if (x < 0.5) { + sign = -1; + const double sin_pi_x = sin(c10::pi * x); + result -= (c10::pi * c10::pi) / (sin_pi_x * sin_pi_x); + x = 1 - x; + } + for (int i = 0; i < 6; ++i) { + result += 1 / (x * x); + x += 1; + } + const double ixx = 1 / (x*x); + result += (1 + 1 / (2*x) + ixx * (1./6 - ixx * (1./30 - ixx * (1./42)))) / x; + return sign * result; +} + +inline float trigamma(float x) __ubsan_ignore_float_divide_by_zero__ { + float sign = +1; + float result = 0; + if (x < 0.5f) { + sign = -1; + const float sin_pi_x = sinf(c10::pi * x); + result -= (c10::pi * c10::pi) / (sin_pi_x * sin_pi_x); + x = 1 - x; + } + for (int i = 0; i < 6; ++i) { + result += 1 / (x * x); + x += 1; + } + const float ixx = 1 / (x*x); + result += (1 + 1 / (2*x) + ixx * (1.f/6 - ixx * (1.f/30 - ixx * (1.f/42)))) / x; + return sign * result; +} + +/* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +inline double calc_digamma(double x) { + // [C++ Standard Reference: Gamma Function] https://en.cppreference.com/w/cpp/numeric/math/tgamma + static double PSI_10 = 2.25175258906672110764; + if (x == 0) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is ±0, ±∞ is returned + return std::copysign(INFINITY, -x); + } + + bool x_is_integer = x == trunc(x); + if (x < 0) { + if (x_is_integer) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is a negative integer, NaN is returned + return std::numeric_limits::quiet_NaN(); + } + // Extracts the fractional part of x as r, since tan(pi * r) is more numerically + // accurate than tan(pi * x). While these operations are mathematically equivalent + // since both x and r are in radians and tan() has a periodicity of pi, in practice + // the computation of pi * x is a source of error (when |x| > 1). + double q, r; + r = std::modf(x, &q); + return calc_digamma(1 - x) - c10::pi / tan(c10::pi * r); + } + + // Push x to be >= 10 + double result = 0; + while (x < 10) { + result -= 1 / x; + x += 1; + } + if (x == 10) { + return result + PSI_10; + } + + // Compute asymptotic digamma + static const double A[] = { + 8.33333333333333333333E-2, + -2.10927960927960927961E-2, + 7.57575757575757575758E-3, + -4.16666666666666666667E-3, + 3.96825396825396825397E-3, + -8.33333333333333333333E-3, + 8.33333333333333333333E-2, + }; + + double y = 0; + if (x < 1.0e17) { + double z = 1.0 / (x * x); + y = z * polevl(z, A, 6); + } + return result + log(x) - (0.5 / x) - y; +} + +/* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +inline float calc_digamma(float x) { + // See [C++ Standard Reference: Gamma Function] + static float PSI_10 = 2.25175258906672110764f; + if (x == 0) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is ±0, ±∞ is returned + return std::copysign(INFINITY, -x); + } + + bool x_is_integer = x == truncf(x); + if (x < 0) { + if (x_is_integer) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is a negative integer, NaN is returned + return std::numeric_limits::quiet_NaN(); + } + // Extracts the fractional part of x as r, since tan(pi * r) is more numerically + // accurate than tan(pi * x). While these operations are mathematically equivalent + // since both x and r are in radians and tan() has a periodicity of pi, in practice + // the computation of pi * x is a source of error (when |x| > 1). + double q, r; + r = std::modf(x, &q); + float pi_over_tan_pi_x = (float)(c10::pi / tan(c10::pi * r)); + return calc_digamma(1 - x) - pi_over_tan_pi_x; + } + + // Push x to be >= 10 + float result = 0; + while (x < 10) { + result -= 1 / x; + x += 1; + } + if (x == 10) { + return result + PSI_10; + } + + // Compute asymptotic digamma + static const float A[] = { + 8.33333333333333333333E-2f, + -2.10927960927960927961E-2f, + 7.57575757575757575758E-3f, + -4.16666666666666666667E-3f, + 3.96825396825396825397E-3f, + -8.33333333333333333333E-3f, + 8.33333333333333333333E-2f, + }; + + float y = 0; + if (x < 1.0e17f) { + float z = 1 / (x * x); + y = z * polevl(z, A, 6); + } + return result + logf(x) - (0.5f / x) - y; +} + +inline c10::BFloat16 calc_digamma(c10::BFloat16 a) { + return calc_digamma(static_cast(a)); +} + +inline c10::Half calc_digamma(c10::Half a) { + return calc_digamma(static_cast(a)); +} + +template +inline C10_HOST_DEVICE scalar_t calc_polygamma(scalar_t x, int n) { + // already blocked if n <= 1 + const auto one = scalar_t{1}; + return ((n % 2) ? one : -one) * + std::exp(std::lgamma(static_cast(n) + one)) * + zeta(static_cast(n + 1), x); +} + +// regularized lower incomplete gamma +// the regularized lower, upper incomplete gamma, as well as their +// helper functions follow SciPy's implementation + +/* References + * [igam1] "The Digital Library of Mathematical Functions", dlmf.nist.gov + * [igam2] Maddock et al., "Incomplete Gamma Functions", + * https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + */ + +/* + * This implementation of the regularized incomplete gamma functions and + * their helper functions are derived from the implementation of SciPy's + * gammainc, Cephes's igam and igamc, and Boost's Lanczos approximations. + * See NOTICE for the licenses. + */ +template +scalar_t ratevl(scalar_t x, const scalar_t num[], int64_t M, + const scalar_t denom[], int64_t N) { + // evaluating rational function, i.e., the ratio of two polynomials + // the coefficients for numerator are given by `num` while coeffs for + // denumerator are given by `denom` + + int64_t i, dir; + scalar_t y, num_ans, denom_ans; + scalar_t absx = std::fabs(x); + const scalar_t *p; + + if (absx > 1) { + /* Evaluate as a polynomial in 1/x. */ + dir = -1; + p = num + M; + y = 1 / x; + } + else { + dir = 1; + p = num; + y = x; + } + + /* Evaluate the numerator */ + num_ans = *p; + p += dir; + for (i = 1; i <= M; i++) { + num_ans = num_ans * y + *p; + p += dir; + } + /* Evaluate the denominator */ + if (absx > 1) { + p = denom + N; + } + else { + p = denom; + } + + denom_ans = *p; + p += dir; + for (i = 1; i <= N; i++) { + denom_ans = denom_ans * y + *p; + p += dir; + } + if (absx > 1) { + i = N - M; + return std::pow(x, i) * num_ans / denom_ans; + } + else { + return num_ans / denom_ans; + } +} + +// SciPy's lanczos implementation is taken from Boost +/* (C) Copyright John Maddock 2006. + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. See + * https://www.boost.org/LICENSE_1_0.txt or see NOTICE. + */ +template +static scalar_t lanczos_sum_expg_scaled(scalar_t x) { + // lanczos approximation + static const scalar_t lanczos_sum_expg_scaled_num[13] = { + 0.006061842346248906525783753964555936883222, + 0.5098416655656676188125178644804694509993, + 19.51992788247617482847860966235652136208, + 449.9445569063168119446858607650988409623, + 6955.999602515376140356310115515198987526, + 75999.29304014542649875303443598909137092, + 601859.6171681098786670226533699352302507, + 3481712.15498064590882071018964774556468, + 14605578.08768506808414169982791359218571, + 43338889.32467613834773723740590533316085, + 86363131.28813859145546927288977868422342, + 103794043.1163445451906271053616070238554, + 56906521.91347156388090791033559122686859 + }; + static const scalar_t lanczos_sum_expg_scaled_denom[13] = { + 1., + 66., + 1925., + 32670., + 357423., + 2637558., + 13339535., + 45995730., + 105258076., + 150917976., + 120543840., + 39916800., + 0. + }; + return ratevl(x, lanczos_sum_expg_scaled_num, + sizeof(lanczos_sum_expg_scaled_num) / sizeof(lanczos_sum_expg_scaled_num[0]) - 1, + lanczos_sum_expg_scaled_denom, + sizeof(lanczos_sum_expg_scaled_denom) / sizeof(lanczos_sum_expg_scaled_denom[0]) - 1); +} + +template +static scalar_t _igam_helper_fac(scalar_t a, scalar_t x) { + // compute x^a * exp(-a) / gamma(a) + // corrected from (15) and (16) in [igam2] by replacing exp(x - a) with + // exp(a - x). + + scalar_t ax, fac, res, num, numfac; + static scalar_t MAXLOG = std::is_same::value ? + 7.09782712893383996843E2 : 88.72283905206835; + static scalar_t EXP1 = 2.718281828459045; + static scalar_t lanczos_g = 6.024680040776729583740234375; + + if (std::fabs(a - x) > 0.4 * std::fabs(a)) { + ax = a * std::log(x) - x - std::lgamma(a); + if (ax < -MAXLOG) { + return 0.0; + } + return std::exp(ax); + } + + fac = a + lanczos_g - 0.5; + res = std::sqrt(fac / EXP1) / lanczos_sum_expg_scaled(a); + + if ((a < 200) && (x < 200)) { + res *= std::exp(a - x) * std::pow(x / fac, a); + } + else { + num = x - a - lanczos_g + 0.5; + numfac = num / fac; + res *= std::exp(a * (std::log1p(numfac) - numfac) + x * (0.5 - lanczos_g) / fac); + } + return res; +} + +template +static scalar_t _igam_helper_series(scalar_t a, scalar_t x) { + // Compute igam using DLMF 8.11.4. [igam1] + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + static int MAXITER = 2000; + + int i; + scalar_t ans, ax, c, r; + + ax = _igam_helper_fac(a, x); + if (ax == 0.0) { + return 0.0; + } + + /* power series */ + r = a; + c = 1.0; + ans = 1.0; + + for (i = 0; i < MAXITER; i++) { + r += 1.0; + c *= x / r; + ans += c; + if (c <= MACHEP * ans) { + break; + } + } + return (ans * ax / a); +} + +template +static scalar_t _igamc_helper_series(scalar_t a, scalar_t x) { + // Compute igamc using DLMF 8.7.3 [igam1]. This is related to the series in + // _igam_helper_series but extra care is taken to avoid cancellation. + + int n; + scalar_t fac = 1; + scalar_t sum = 0; + scalar_t term, logx; + static scalar_t MAXITER = 2000; + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + + for (n = 1; n < MAXITER; n++) { + fac *= -x / n; + term = fac / (a + n); + sum += term; + if (std::fabs(term) <= MACHEP * std::fabs(sum)) { + break; + } + } + + logx = std::log(x); + term = -std::expm1(a * logx - std::lgamma(1+a)); + return term - std::exp(a * logx - std::lgamma(a)) * sum; +} + +template +static scalar_t _igam_helper_asymptotic_series(scalar_t a, scalar_t x, bool igam) { + // Compute igam/igamc using DLMF 8.12.3/8.12.4 [igam1] + static const scalar_t d[25][25] = + {{-3.3333333333333333e-1, 8.3333333333333333e-2, -1.4814814814814815e-2, + 1.1574074074074074e-3, 3.527336860670194e-4, -1.7875514403292181e-4, + 3.9192631785224378e-5, -2.1854485106799922e-6, -1.85406221071516e-6, + 8.296711340953086e-7, -1.7665952736826079e-7, 6.7078535434014986e-9, + 1.0261809784240308e-8, -4.3820360184533532e-9, 9.1476995822367902e-10, + -2.551419399494625e-11, -5.8307721325504251e-11, 2.4361948020667416e-11, + -5.0276692801141756e-12, 1.1004392031956135e-13, 3.3717632624009854e-13, + -1.3923887224181621e-13, 2.8534893807047443e-14, -5.1391118342425726e-16, + -1.9752288294349443e-15}, + {-1.8518518518518519e-3, -3.4722222222222222e-3, 2.6455026455026455e-3, + -9.9022633744855967e-4, 2.0576131687242798e-4, -4.0187757201646091e-7, + -1.8098550334489978e-5, 7.6491609160811101e-6, -1.6120900894563446e-6, + 4.6471278028074343e-9, 1.378633446915721e-7, -5.752545603517705e-8, + 1.1951628599778147e-8, -1.7543241719747648e-11, -1.0091543710600413e-9, + 4.1627929918425826e-10, -8.5639070264929806e-11, 6.0672151016047586e-14, + 7.1624989648114854e-12, -2.9331866437714371e-12, 5.9966963656836887e-13, + -2.1671786527323314e-16, -4.9783399723692616e-14, 2.0291628823713425e-14, + -4.13125571381061e-15}, + {4.1335978835978836e-3, -2.6813271604938272e-3, 7.7160493827160494e-4, + 2.0093878600823045e-6, -1.0736653226365161e-4, 5.2923448829120125e-5, + -1.2760635188618728e-5, 3.4235787340961381e-8, 1.3721957309062933e-6, + -6.298992138380055e-7, 1.4280614206064242e-7, -2.0477098421990866e-10, + -1.4092529910867521e-8, 6.228974084922022e-9, -1.3670488396617113e-9, + 9.4283561590146782e-13, 1.2872252400089318e-10, -5.5645956134363321e-11, + 1.1975935546366981e-11, -4.1689782251838635e-15, -1.0940640427884594e-12, + 4.6622399463901357e-13, -9.905105763906906e-14, 1.8931876768373515e-17, + 8.8592218725911273e-15}, + {6.4943415637860082e-4, 2.2947209362139918e-4, -4.6918949439525571e-4, + 2.6772063206283885e-4, -7.5618016718839764e-5, -2.3965051138672967e-7, + 1.1082654115347302e-5, -5.6749528269915966e-6, 1.4230900732435884e-6, + -2.7861080291528142e-11, -1.6958404091930277e-7, 8.0994649053880824e-8, + -1.9111168485973654e-8, 2.3928620439808118e-12, 2.0620131815488798e-9, + -9.4604966618551322e-10, 2.1541049775774908e-10, -1.388823336813903e-14, + -2.1894761681963939e-11, 9.7909989511716851e-12, -2.1782191880180962e-12, + 6.2088195734079014e-17, 2.126978363279737e-13, -9.3446887915174333e-14, + 2.0453671226782849e-14}, + {-8.618882909167117e-4, 7.8403922172006663e-4, -2.9907248030319018e-4, + -1.4638452578843418e-6, 6.6414982154651222e-5, -3.9683650471794347e-5, + 1.1375726970678419e-5, 2.5074972262375328e-10, -1.6954149536558306e-6, + 8.9075075322053097e-7, -2.2929348340008049e-7, 2.956794137544049e-11, + 2.8865829742708784e-8, -1.4189739437803219e-8, 3.4463580499464897e-9, + -2.3024517174528067e-13, -3.9409233028046405e-10, 1.8602338968504502e-10, + -4.356323005056618e-11, 1.2786001016296231e-15, 4.6792750266579195e-12, + -2.1492464706134829e-12, 4.9088156148096522e-13, -6.3385914848915603e-18, + -5.0453320690800944e-14}, + {-3.3679855336635815e-4, -6.9728137583658578e-5, 2.7727532449593921e-4, + -1.9932570516188848e-4, 6.7977804779372078e-5, 1.419062920643967e-7, + -1.3594048189768693e-5, 8.0184702563342015e-6, -2.2914811765080952e-6, + -3.252473551298454e-10, 3.4652846491085265e-7, -1.8447187191171343e-7, + 4.8240967037894181e-8, -1.7989466721743515e-14, -6.3061945000135234e-9, + 3.1624176287745679e-9, -7.8409242536974293e-10, 5.1926791652540407e-15, + 9.3589442423067836e-11, -4.5134262161632782e-11, 1.0799129993116827e-11, + -3.661886712685252e-17, -1.210902069055155e-12, 5.6807435849905643e-13, + -1.3249659916340829e-13}, + {5.3130793646399222e-4, -5.9216643735369388e-4, 2.7087820967180448e-4, + 7.9023532326603279e-7, -8.1539693675619688e-5, 5.6116827531062497e-5, + -1.8329116582843376e-5, -3.0796134506033048e-9, 3.4651553688036091e-6, + -2.0291327396058604e-6, 5.7887928631490037e-7, 2.338630673826657e-13, + -8.8286007463304835e-8, 4.7435958880408128e-8, -1.2545415020710382e-8, + 8.6496488580102925e-14, 1.6846058979264063e-9, -8.5754928235775947e-10, + 2.1598224929232125e-10, -7.6132305204761539e-16, -2.6639822008536144e-11, + 1.3065700536611057e-11, -3.1799163902367977e-12, 4.7109761213674315e-18, + 3.6902800842763467e-13}, + {3.4436760689237767e-4, 5.1717909082605922e-5, -3.3493161081142236e-4, + 2.812695154763237e-4, -1.0976582244684731e-4, -1.2741009095484485e-7, + 2.7744451511563644e-5, -1.8263488805711333e-5, 5.7876949497350524e-6, + 4.9387589339362704e-10, -1.0595367014026043e-6, 6.1667143761104075e-7, + -1.7562973359060462e-7, -1.2974473287015439e-12, 2.695423606288966e-8, + -1.4578352908731271e-8, 3.887645959386175e-9, -3.8810022510194121e-17, + -5.3279941738772867e-10, 2.7437977643314845e-10, -6.9957960920705679e-11, + 2.5899863874868481e-17, 8.8566890996696381e-12, -4.403168815871311e-12, + 1.0865561947091654e-12}, + {-6.5262391859530942e-4, 8.3949872067208728e-4, -4.3829709854172101e-4, + -6.969091458420552e-7, 1.6644846642067548e-4, -1.2783517679769219e-4, + 4.6299532636913043e-5, 4.5579098679227077e-9, -1.0595271125805195e-5, + 6.7833429048651666e-6, -2.1075476666258804e-6, -1.7213731432817145e-11, + 3.7735877416110979e-7, -2.1867506700122867e-7, 6.2202288040189269e-8, + 6.5977038267330006e-16, -9.5903864974256858e-9, 5.2132144922808078e-9, + -1.3991589583935709e-9, 5.382058999060575e-16, 1.9484714275467745e-10, + -1.0127287556389682e-10, 2.6077347197254926e-11, -5.0904186999932993e-18, + -3.3721464474854592e-12}, + {-5.9676129019274625e-4, -7.2048954160200106e-5, 6.7823088376673284e-4, + -6.4014752602627585e-4, 2.7750107634328704e-4, 1.8197008380465151e-7, + -8.4795071170685032e-5, 6.105192082501531e-5, -2.1073920183404862e-5, + -8.8585890141255994e-10, 4.5284535953805377e-6, -2.8427815022504408e-6, + 8.7082341778646412e-7, 3.6886101871706965e-12, -1.5344695190702061e-7, + 8.862466778790695e-8, -2.5184812301826817e-8, -1.0225912098215092e-14, + 3.8969470758154777e-9, -2.1267304792235635e-9, 5.7370135528051385e-10, + -1.887749850169741e-19, -8.0931538694657866e-11, 4.2382723283449199e-11, + -1.1002224534207726e-11}, + {1.3324454494800656e-3, -1.9144384985654775e-3, 1.1089369134596637e-3, + 9.932404122642299e-7, -5.0874501293093199e-4, 4.2735056665392884e-4, + -1.6858853767910799e-4, -8.1301893922784998e-9, 4.5284402370562147e-5, + -3.127053674781734e-5, 1.044986828530338e-5, 4.8435226265680926e-11, + -2.1482565873456258e-6, 1.329369701097492e-6, -4.0295693092101029e-7, + -1.7567877666323291e-13, 7.0145043163668257e-8, -4.040787734999483e-8, + 1.1474026743371963e-8, 3.9642746853563325e-18, -1.7804938269892714e-9, + 9.7480262548731646e-10, -2.6405338676507616e-10, 5.794875163403742e-18, + 3.7647749553543836e-11}, + {1.579727660730835e-3, 1.6251626278391582e-4, -2.0633421035543276e-3, + 2.1389686185689098e-3, -1.0108559391263003e-3, -3.9912705529919201e-7, + 3.6235025084764691e-4, -2.8143901463712154e-4, 1.0449513336495887e-4, + 2.1211418491830297e-9, -2.5779417251947842e-5, 1.7281818956040463e-5, + -5.6413773872904282e-6, -1.1024320105776174e-11, 1.1223224418895175e-6, + -6.8693396379526735e-7, 2.0653236975414887e-7, 4.6714772409838506e-14, + -3.5609886164949055e-8, 2.0470855345905963e-8, -5.8091738633283358e-9, + -1.332821287582869e-16, 9.0354604391335133e-10, -4.9598782517330834e-10, + 1.3481607129399749e-10}, + {-4.0725121195140166e-3, 6.4033628338080698e-3, -4.0410161081676618e-3, + -2.183732802866233e-6, 2.1740441801254639e-3, -1.9700440518418892e-3, + 8.3595469747962458e-4, 1.9445447567109655e-8, -2.5779387120421696e-4, + 1.9009987368139304e-4, -6.7696499937438965e-5, -1.4440629666426572e-10, + 1.5712512518742269e-5, -1.0304008744776893e-5, 3.304517767401387e-6, + 7.9829760242325709e-13, -6.4097794149313004e-7, 3.8894624761300056e-7, + -1.1618347644948869e-7, -2.816808630596451e-15, 1.9878012911297093e-8, + -1.1407719956357511e-8, 3.2355857064185555e-9, 4.1759468293455945e-20, + -5.0423112718105824e-10}, + {-5.9475779383993003e-3, -5.4016476789260452e-4, 8.7910413550767898e-3, + -9.8576315587856125e-3, 5.0134695031021538e-3, 1.2807521786221875e-6, + -2.0626019342754683e-3, 1.7109128573523058e-3, -6.7695312714133799e-4, + -6.9011545676562133e-9, 1.8855128143995902e-4, -1.3395215663491969e-4, + 4.6263183033528039e-5, 4.0034230613321351e-11, -1.0255652921494033e-5, + 6.612086372797651e-6, -2.0913022027253008e-6, -2.0951775649603837e-13, + 3.9756029041993247e-7, -2.3956211978815887e-7, 7.1182883382145864e-8, + 8.925574873053455e-16, -1.2101547235064676e-8, 6.9350618248334386e-9, + -1.9661464453856102e-9}, + {1.7402027787522711e-2, -2.9527880945699121e-2, 2.0045875571402799e-2, + 7.0289515966903407e-6, -1.2375421071343148e-2, 1.1976293444235254e-2, + -5.4156038466518525e-3, -6.3290893396418616e-8, 1.8855118129005065e-3, + -1.473473274825001e-3, 5.5515810097708387e-4, 5.2406834412550662e-10, + -1.4357913535784836e-4, 9.9181293224943297e-5, -3.3460834749478311e-5, + -3.5755837291098993e-12, 7.1560851960630076e-6, -4.5516802628155526e-6, + 1.4236576649271475e-6, 1.8803149082089664e-14, -2.6623403898929211e-7, + 1.5950642189595716e-7, -4.7187514673841102e-8, -6.5107872958755177e-17, + 7.9795091026746235e-9}, + {3.0249124160905891e-2, 2.4817436002649977e-3, -4.9939134373457022e-2, + 5.9915643009307869e-2, -3.2483207601623391e-2, -5.7212968652103441e-6, + 1.5085251778569354e-2, -1.3261324005088445e-2, 5.5515262632426148e-3, + 3.0263182257030016e-8, -1.7229548406756723e-3, 1.2893570099929637e-3, + -4.6845138348319876e-4, -1.830259937893045e-10, 1.1449739014822654e-4, + -7.7378565221244477e-5, 2.5625836246985201e-5, 1.0766165333192814e-12, + -5.3246809282422621e-6, 3.349634863064464e-6, -1.0381253128684018e-6, + -5.608909920621128e-15, 1.9150821930676591e-7, -1.1418365800203486e-7, + 3.3654425209171788e-8}, + {-9.9051020880159045e-2, 1.7954011706123486e-1, -1.2989606383463778e-1, + -3.1478872752284357e-5, 9.0510635276848131e-2, -9.2828824411184397e-2, + 4.4412112839877808e-2, 2.7779236316835888e-7, -1.7229543805449697e-2, + 1.4182925050891573e-2, -5.6214161633747336e-3, -2.39598509186381e-9, + 1.6029634366079908e-3, -1.1606784674435773e-3, 4.1001337768153873e-4, + 1.8365800754090661e-11, -9.5844256563655903e-5, 6.3643062337764708e-5, + -2.076250624489065e-5, -1.1806020912804483e-13, 4.2131808239120649e-6, + -2.6262241337012467e-6, 8.0770620494930662e-7, 6.0125912123632725e-16, + -1.4729737374018841e-7}, + {-1.9994542198219728e-1, -1.5056113040026424e-2, 3.6470239469348489e-1, + -4.6435192311733545e-1, 2.6640934719197893e-1, 3.4038266027147191e-5, + -1.3784338709329624e-1, 1.276467178337056e-1, -5.6213828755200985e-2, + -1.753150885483011e-7, 1.9235592956768113e-2, -1.5088821281095315e-2, + 5.7401854451350123e-3, 1.0622382710310225e-9, -1.5335082692563998e-3, + 1.0819320643228214e-3, -3.7372510193945659e-4, -6.6170909729031985e-12, + 8.4263617380909628e-5, -5.5150706827483479e-5, 1.7769536448348069e-5, + 3.8827923210205533e-14, -3.53513697488768e-6, 2.1865832130045269e-6, + -6.6812849447625594e-7}, + {7.2438608504029431e-1, -1.3918010932653375, 1.0654143352413968, + 1.876173868950258e-4, -8.2705501176152696e-1, 8.9352433347828414e-1, + -4.4971003995291339e-1, -1.6107401567546652e-6, 1.9235590165271091e-1, + -1.6597702160042609e-1, 6.8882222681814333e-2, 1.3910091724608687e-8, + -2.146911561508663e-2, 1.6228980898865892e-2, -5.9796016172584256e-3, + -1.1287469112826745e-10, 1.5167451119784857e-3, -1.0478634293553899e-3, + 3.5539072889126421e-4, 8.1704322111801517e-13, -7.7773013442452395e-5, + 5.0291413897007722e-5, -1.6035083867000518e-5, 1.2469354315487605e-14, + 3.1369106244517615e-6}, + {1.6668949727276811, 1.165462765994632e-1, -3.3288393225018906, + 4.4692325482864037, -2.6977693045875807, -2.600667859891061e-4, + 1.5389017615694539, -1.4937962361134612, 6.8881964633233148e-1, + 1.3077482004552385e-6, -2.5762963325596288e-1, 2.1097676102125449e-1, + -8.3714408359219882e-2, -7.7920428881354753e-9, 2.4267923064833599e-2, + -1.7813678334552311e-2, 6.3970330388900056e-3, 4.9430807090480523e-11, + -1.5554602758465635e-3, 1.0561196919903214e-3, -3.5277184460472902e-4, + 9.3002334645022459e-14, 7.5285855026557172e-5, -4.8186515569156351e-5, + 1.5227271505597605e-5}, + {-6.6188298861372935, 1.3397985455142589e+1, -1.0789350606845146e+1, + -1.4352254537875018e-3, 9.2333694596189809, -1.0456552819547769e+1, + 5.5105526029033471, 1.2024439690716742e-5, -2.5762961164755816, + 2.3207442745387179, -1.0045728797216284, -1.0207833290021914e-7, + 3.3975092171169466e-1, -2.6720517450757468e-1, 1.0235252851562706e-1, + 8.4329730484871625e-10, -2.7998284958442595e-2, 2.0066274144976813e-2, + -7.0554368915086242e-3, 1.9402238183698188e-12, 1.6562888105449611e-3, + -1.1082898580743683e-3, 3.654545161310169e-4, -5.1290032026971794e-11, + -7.6340103696869031e-5}, + {-1.7112706061976095e+1, -1.1208044642899116, 3.7131966511885444e+1, + -5.2298271025348962e+1, 3.3058589696624618e+1, 2.4791298976200222e-3, + -2.061089403411526e+1, 2.088672775145582e+1, -1.0045703956517752e+1, + -1.2238783449063012e-5, 4.0770134274221141, -3.473667358470195, + 1.4329352617312006, 7.1359914411879712e-8, -4.4797257159115612e-1, + 3.4112666080644461e-1, -1.2699786326594923e-1, -2.8953677269081528e-10, + 3.3125776278259863e-2, -2.3274087021036101e-2, 8.0399993503648882e-3, + -1.177805216235265e-9, -1.8321624891071668e-3, 1.2108282933588665e-3, + -3.9479941246822517e-4}, + {7.389033153567425e+1, -1.5680141270402273e+2, 1.322177542759164e+2, + 1.3692876877324546e-2, -1.2366496885920151e+2, 1.4620689391062729e+2, + -8.0365587724865346e+1, -1.1259851148881298e-4, 4.0770132196179938e+1, + -3.8210340013273034e+1, 1.719522294277362e+1, 9.3519707955168356e-7, + -6.2716159907747034, 5.1168999071852637, -2.0319658112299095, + -4.9507215582761543e-9, 5.9626397294332597e-1, -4.4220765337238094e-1, + 1.6079998700166273e-1, -2.4733786203223402e-8, -4.0307574759979762e-2, + 2.7849050747097869e-2, -9.4751858992054221e-3, 6.419922235909132e-6, + 2.1250180774699461e-3}, + {2.1216837098382522e+2, 1.3107863022633868e+1, -4.9698285932871748e+2, + 7.3121595266969204e+2, -4.8213821720890847e+2, -2.8817248692894889e-2, + 3.2616720302947102e+2, -3.4389340280087117e+2, 1.7195193870816232e+2, + 1.4038077378096158e-4, -7.52594195897599e+1, 6.651969984520934e+1, + -2.8447519748152462e+1, -7.613702615875391e-7, 9.5402237105304373, + -7.5175301113311376, 2.8943997568871961, -4.6612194999538201e-7, + -8.0615149598794088e-1, 5.8483006570631029e-1, -2.0845408972964956e-1, + 1.4765818959305817e-4, 5.1000433863753019e-2, -3.3066252141883665e-2, + 1.5109265210467774e-2}, + {-9.8959643098322368e+2, 2.1925555360905233e+3, -1.9283586782723356e+3, + -1.5925738122215253e-1, 1.9569985945919857e+3, -2.4072514765081556e+3, + 1.3756149959336496e+3, 1.2920735237496668e-3, -7.525941715948055e+2, + 7.3171668742208716e+2, -3.4137023466220065e+2, -9.9857390260608043e-6, + 1.3356313181291573e+2, -1.1276295161252794e+2, 4.6310396098204458e+1, + -7.9237387133614756e-6, -1.4510726927018646e+1, 1.1111771248100563e+1, + -4.1690817945270892, 3.1008219800117808e-3, 1.1220095449981468, + -7.6052379926149916e-1, 3.6262236505085254e-1, 2.216867741940747e-1, + 4.8683443692930507e-1}}; + + int k, n, sgn; + int maxpow = 0; + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + scalar_t lambda = x / a; + scalar_t sigma = (x - a) / a; + scalar_t eta, res, ck, ckterm, term, absterm; + scalar_t absoldterm = INFINITY; + scalar_t etapow[25] = {1}; + scalar_t sum = 0; + scalar_t afac = 1; + + if (igam) { + sgn = -1; + } + else { + sgn = 1; + } + + if (lambda > 1) { + eta = std::sqrt(-2 * (std::log1p(sigma) - sigma)); + } + else if (lambda < 1) { + eta = -std::sqrt(-2 * (std::log1p(sigma) - sigma)); + } + else { + eta = 0; + } + res = 0.5 * std::erfc(sgn * eta * std::sqrt(a / 2)); + + for (k = 0; k < 25; k++) { + ck = d[k][0]; + for (n = 1; n < 25; n++) { + if (n > maxpow) { + etapow[n] = eta * etapow[n-1]; + maxpow += 1; + } + ckterm = d[k][n]*etapow[n]; + ck += ckterm; + if (std::fabs(ckterm) < MACHEP * std::fabs(ck)) { + break; + } + } + term = ck * afac; + absterm = std::fabs(term); + if (absterm > absoldterm) { + break; + } + sum += term; + if (absterm < MACHEP * std::fabs(sum)) { + break; + } + absoldterm = absterm; + afac /= a; + } + res += sgn * std::exp(-0.5 * a * eta * eta) * sum / std::sqrt(2 * c10::pi * a); + + return res; +} + +template +static scalar_t _igamc_helper_continued_fraction(scalar_t a, scalar_t x) { + // Compute igamc using DLMF 8.9.2. [igam1] + int i; + scalar_t ans, ax, c, yc, r, t, y, z; + scalar_t pk, pkm1, pkm2, qk, qkm1, qkm2; + int MAXITER = 2000; + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + static scalar_t BIG = std::is_same::value ? + 4.503599627370496e15 : 16777216.; + static scalar_t BIGINV = std::is_same::value ? + 2.22044604925031308085e-16 : 5.9604644775390625E-8; + + ax = _igam_helper_fac(a, x); + if (ax == 0.0) { + return 0.0; + } + + /* continued fraction */ + y = 1.0 - a; + z = x + y + 1.0; + c = 0.0; + pkm2 = 1.0; + qkm2 = x; + pkm1 = x + 1.0; + qkm1 = z * x; + ans = pkm1 / qkm1; + + for (i = 0; i < MAXITER; i++) { + c += 1.0; + y += 1.0; + z += 2.0; + yc = y * c; + pk = pkm1 * z - pkm2 * yc; + qk = qkm1 * z - qkm2 * yc; + if (qk != 0) { + r = pk / qk; + t = std::fabs((ans - r) / r); + ans = r; + } + else { + t = 1.0; + } + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + if (std::fabs(pk) > BIG) { + pkm2 *= BIGINV; + pkm1 *= BIGINV; + qkm2 *= BIGINV; + qkm1 *= BIGINV; + } + if (t <= MACHEP) { + break; + } + } + return ans * ax; +} + +template +inline scalar_t calc_igammac(scalar_t a, scalar_t x) { + /* the calculation of the regularized upper incomplete gamma function + * is done differently based on the values of a and x: + * - if x and/or a is at the boundary of defined region, then assign the + * result at the boundary + * - if a is large and a ~ x, then using Uniform Asymptotic Expansions for + * Large Parameter (see DLMF 8.12.4 [igam1]) + * - if x > 1.1 and x < a, using the substraction from the regularized lower + * incomplete gamma + * - otherwise, calculate the series from [igam2] eq (5) + */ + scalar_t absxma_a; + + static scalar_t SMALL = 20.0; + static scalar_t LARGE = 200.0; + static scalar_t SMALLRATIO = 0.3; + static scalar_t LARGERATIO = 4.5; + + // note that in SciPy, a and x are non-negative, with exclusive 0s (i.e., + // at most 1 of them can be 0), where igammac(0, x) = 0.0 iff x > 0. + if ((x < 0) || (a < 0)) { + // out of defined-region of the function + return std::numeric_limits::quiet_NaN(); + } + else if (a == 0) { + if (x > 0) { + return 0.0; + } + else { + return std::numeric_limits::quiet_NaN(); + } + } + else if (x == 0) { + return 1.0; + } + else if (std::isinf(a)) { + if (std::isinf(x)) { + return std::numeric_limits::quiet_NaN(); + } + return 1.0; + } + else if (std::isinf(x)) { + return 0.0; + } + + absxma_a = std::fabs(x - a) / a; + if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) { + return _igam_helper_asymptotic_series(a, x, 0); + } + else if ((a > LARGE) && (absxma_a < LARGERATIO / std::sqrt(a))) { + return _igam_helper_asymptotic_series(a, x, 0); + } + + if (x > 1.1) { + if (x < a) { + return 1.0 - _igam_helper_series(a, x); + } + else { + return _igamc_helper_continued_fraction(a, x); + } + } + else if (x <= 0.5) { + if (-0.4 / std::log(x) < a) { + return 1.0 - _igam_helper_series(a, x); + } + else { + return _igamc_helper_series(a, x); + } + } + else { + if (x * 1.1 < a) { + return 1.0 - _igam_helper_series(a, x); + } + else { + return _igamc_helper_series(a, x); + } + } +} + +template +scalar_t calc_igamma(scalar_t a, scalar_t x) { + /* the calculation of the regularized lower incomplete gamma function + * is done differently based on the values of a and x: + * - if x and/or a is at the boundary of defined region, then assign the + * result at the boundary + * - if a is large and a ~ x, then using Uniform Asymptotic Expansions for + * Large Parameter (see DLMF 8.12.3 [igam1]) + * - if x > 1 and x > a, using the substraction from the regularized upper + * incomplete gamma + * - otherwise, calculate the series from [igam2] eq (4) + */ + scalar_t absxma_a; + static scalar_t SMALL = 20.0; + static scalar_t LARGE = 200.0; + static scalar_t SMALLRATIO = 0.3; + static scalar_t LARGERATIO = 4.5; + + // boundary values following SciPy + // note that in SciPy, a and x are non-negative, with exclusive 0s (i.e., + // at most 1 of them can be 0), where igamma(0, x) = 1.0 iff x > 0. + if ((x < 0) || (a < 0)) { + // out of defined-region of the function + return std::numeric_limits::quiet_NaN(); + } + else if (a == 0) { + if (x > 0) { + return 1.0; + } + else { + return std::numeric_limits::quiet_NaN(); + } + } + else if (x == 0) { + return 0.0; // zero integration limit + } + else if (std::isinf(a)) { + if (std::isinf(x)) { + return std::numeric_limits::quiet_NaN(); + } + return 0.0; + } + else if (std::isinf(x)) { + return 1.0; + } + + /* Asymptotic regime where a ~ x. See [igam2] */ + absxma_a = std::fabs(x - a) / a; + if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) { + return _igam_helper_asymptotic_series(a, x, 1); + } + else if ((a > LARGE) && (absxma_a < LARGERATIO / std::sqrt(a))) { + return _igam_helper_asymptotic_series(a, x, 1); + } + + if ((x > 1.0) && (x > a)) { + return 1.0 - calc_igammac(a, x); + } + + return _igam_helper_series(a, x); +} + +template <> +C10_UNUSED inline c10::BFloat16 calc_igamma(c10::BFloat16 a, c10::BFloat16 x) { + return calc_igamma(float(a), float(x)); +} + +template <> +C10_UNUSED inline c10::Half calc_igamma(c10::Half a, c10::Half x) { + return calc_igamma(float(a), float(x)); +} + +template <> +C10_UNUSED inline c10::BFloat16 calc_igammac(c10::BFloat16 a, c10::BFloat16 x) { + return calc_igammac(float(a), float(x)); +} + +template <> +C10_UNUSED inline c10::Half calc_igammac(c10::Half a, c10::Half x) { + return calc_igammac(float(a), float(x)); +} + +inline c10::BFloat16 calc_erfinv(c10::BFloat16 a) { return calc_erfinv(float(a)); } + +template +inline T abs_impl(T v) { + return std::abs(v); +} + +template <> +C10_UNUSED inline uint8_t abs_impl(uint8_t v) { + return v; +} + +template +inline typename std::enable_if::value, T>::type +calc_gcd(T a, T b) { + a = abs_impl(a); + b = abs_impl(b); + while (a != 0) { + T c = a; + a = b % a; + b = c; + } + return b; +} + +template +C10_HOST_DEVICE T exp2_impl(T x) { + return std::exp2(x); +} + +template +C10_HOST_DEVICE c10::complex exp2_impl(c10::complex x) { + // There is no std::exp2 overload for complex, so instead + // use the identity 2^x = e^(ln(2) * x) + constexpr auto ln2 = c10::ln_2; + return std::exp(ln2 * x); +} + +/* + * This function is derived from the implementation of the chbevl function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Evaluates the series + * + * len-1 + * - ' + * y = > array[i] T (x/2) + * - i + * i=0 + * + * of Chebyshev polynomials Ti at argument x/2. + * + * Coefficients are stored in reverse order, i.e. the zero order term is last in the array. Note len is the number of + * coefficients, not the order. + * + * If coefficients are for the interval a to b, x must have been transformed to x -> 2(2x - b - a)/(b-a) before + * entering the routine. This maps x from (a, b) to (-1, 1), over which the Chebyshev polynomials are defined. + * + * If the coefficients are for the inverted interval, in which (a, b) is mapped to (1/b, 1/a), the transformation + * required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, this becomes x -> 4a/x - 1. + */ +template +inline typename std::enable_if::value, T>::type +chbevl(const T x, const T array[], size_t len) { + T b0, b1, b2; + + b0 = array[0]; + b1 = static_cast(0.0); + + for (size_t i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = x * b1 - b2 + array[i]; + } + + return (static_cast(0.5) * (b0 - b2)); +} + +/* + * This function is derived from the implementation of the i0 function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes an approximation of the zeroth order modified Bessel function of the first kind. + * The approximation is actually two (sub)approximations, both using a Chebyshev polynomial expansion. + * One approximates the function over [0, 8], and the other over (8, infinity). This function takes the absolute value + * of all inputs to convert them into the domain of the approximation. + */ +template +inline std::tuple chebyshev_coefficients_i0e_A() { + /* Chebyshev coefficients for exp(-x) I0(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I0(x) } = 1. + */ + static const T coeff[] = { + -4.41534164647933937950E-18, 3.33079451882223809783E-17, + -2.43127984654795469359E-16, 1.71539128555513303061E-15, + -1.16853328779934516808E-14, 7.67618549860493561688E-14, + -4.85644678311192946090E-13, 2.95505266312963983461E-12, + -1.72682629144155570723E-11, 9.67580903537323691224E-11, + -5.18979560163526290666E-10, 2.65982372468238665035E-9, + -1.30002500998624804212E-8, 6.04699502254191894932E-8, + -2.67079385394061173391E-7, 1.11738753912010371815E-6, + -4.41673835845875056359E-6, 1.64484480707288970893E-5, + -5.75419501008210370398E-5, 1.88502885095841655729E-4, + -5.76375574538582365885E-4, 1.63947561694133579842E-3, + -4.32430999505057594430E-3, 1.05464603945949983183E-2, + -2.37374148058994688156E-2, 4.93052842396707084878E-2, + -9.49010970480476444210E-2, 1.71620901522208775349E-1, + -3.04682672343198398683E-1, 6.76795274409476084995E-1}; + return std::make_tuple(coeff, 30); +}; + +template +inline std::tuple chebyshev_coefficients_i0e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + */ + static const T coeff[] = { + -7.23318048787475395456E-18, -4.83050448594418207126E-18, + 4.46562142029675999901E-17, 3.46122286769746109310E-17, + -2.82762398051658348494E-16, -3.42548561967721913462E-16, + 1.77256013305652638360E-15, 3.81168066935262242075E-15, + -9.55484669882830764870E-15, -4.15056934728722208663E-14, + 1.54008621752140982691E-14, 3.85277838274214270114E-13, + 7.18012445138366623367E-13, -1.79417853150680611778E-12, + -1.32158118404477131188E-11, -3.14991652796324136454E-11, + 1.18891471078464383424E-11, 4.94060238822496958910E-10, + 3.39623202570838634515E-9, 2.26666899049817806459E-8, + 2.04891858946906374183E-7, 2.89137052083475648297E-6, + 6.88975834691682398426E-5, 3.36911647825569408990E-3, + 8.04490411014108831608E-1}; + + return std::make_tuple(coeff, 25); +}; + +template +inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_A() { + /* Chebyshev coefficients for exp(-x) I1(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I1(x) / x } = 1/2. + */ + static const T coeff[] = { + 2.77791411276104639959E-18, -2.11142121435816608115E-17, + 1.55363195773620046921E-16, -1.10559694773538630805E-15, + 7.60068429473540693410E-15, -5.04218550472791168711E-14, + 3.22379336594557470981E-13, -1.98397439776494371520E-12, + 1.17361862988909016308E-11, -6.66348972350202774223E-11, + 3.62559028155211703701E-10, -1.88724975172282928790E-9, + 9.38153738649577178388E-9, -4.44505912879632808065E-8, + 2.00329475355213526229E-7, -8.56872026469545474066E-7, + 3.47025130813767847674E-6, -1.32731636560394358279E-5, + 4.78156510755005422638E-5, -1.61760815825896745588E-4, + 5.12285956168575772895E-4, -1.51357245063125314899E-3, + 4.15642294431288815669E-3, -1.05640848946261981558E-2, + 2.47264490306265168283E-2, -5.29459812080949914269E-2, + 1.02643658689847095384E-1, -1.76416518357834055153E-1, + 2.52587186443633654823E-1}; + return std::make_tuple(coeff, 29); +}; + +template +inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_A() { + /* Chebyshev coefficients for exp(-x) I1(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I1(x) / x } = 1/2. + */ + static const T coeff[] = { + 9.38153738649577178388E-9f, + -4.44505912879632808065E-8f, + 2.00329475355213526229E-7f, + -8.56872026469545474066E-7f, + 3.47025130813767847674E-6f, + -1.32731636560394358279E-5f, + 4.78156510755005422638E-5f, + -1.61760815825896745588E-4f, + 5.12285956168575772895E-4f, + -1.51357245063125314899E-3f, + 4.15642294431288815669E-3f, + -1.05640848946261981558E-2f, + 2.47264490306265168283E-2f, + -5.29459812080949914269E-2f, + 1.02643658689847095384E-1f, + -1.76416518357834055153E-1f, + 2.52587186443633654823E-1f}; + return std::make_tuple(coeff, 17); +}; + +template +inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + */ + static const T coeff[] = { + 7.51729631084210481353E-18, 4.41434832307170791151E-18, + -4.65030536848935832153E-17, -3.20952592199342395980E-17, + 2.96262899764595013876E-16, 3.30820231092092828324E-16, + -1.88035477551078244854E-15, -3.81440307243700780478E-15, + 1.04202769841288027642E-14, 4.27244001671195135429E-14, + -2.10154184277266431302E-14, -4.08355111109219731823E-13, + -7.19855177624590851209E-13, 2.03562854414708950722E-12, + 1.41258074366137813316E-11, 3.25260358301548823856E-11, + -1.89749581235054123450E-11, -5.58974346219658380687E-10, + -3.83538038596423702205E-9, -2.63146884688951950684E-8, + -2.51223623787020892529E-7, -3.88256480887769039346E-6, + -1.10588938762623716291E-4, -9.76109749136146840777E-3, + 7.78576235018280120474E-1}; + + return std::make_tuple(coeff, 25); +}; + +template +inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + */ + static const T coeff[] = { + -3.83538038596423702205E-9f, + -2.63146884688951950684E-8f, + -2.51223623787020892529E-7f, + -3.88256480887769039346E-6f, + -1.10588938762623716291E-4f, + -9.76109749136146840777E-3f, + 7.78576235018280120474E-1f}; + + return std::make_tuple(coeff, 7); +}; + +template +inline typename std::enable_if::value, T>::type +calc_i0(T _x) { + T x = std::abs(_x); + + if (x <= T{8.0}) { + auto coeff_pair = chebyshev_coefficients_i0e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + T y = (x / T{2.0}) - T{2.0}; + return static_cast(std::exp(x) * chbevl(y, A, len)); + } + auto coeff_pair = chebyshev_coefficients_i0e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + return std::exp(x) * chbevl(T{32.0} / x - T{2.0}, B, len) / std::sqrt(x); +} + +// Upcast bfloat16 input to float for numerical accuracy purposes +inline c10::BFloat16 calc_i0(c10::BFloat16 a) { return calc_i0(static_cast(a)); } + +/* + * This function is derived from the implementation of the i1 function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes an approximation of the first order modified Bessel function of the first kind. + * The approximation is actually two (sub)approximations, both using a Chebyshev polynomial expansion. + * One approximates the function over [0, 8], and the other over (8, infinity). This function takes the absolute value + * of all inputs to convert them into the domain of the approximation. + */ +template +inline typename std::enable_if::value, T>::type +calc_i1(T _x) { + T x = std::abs(_x); + + if (x <= T{8.0}) { + auto coeff_pair = chebyshev_coefficients_i1e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + T y = (x / T{2.0}) - T{2.0}; + const T out = std::exp(x) * x * chbevl(y, A, len); + return (_x < T{0.0}) ? -out : out; + } + auto coeff_pair = chebyshev_coefficients_i1e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + const T out = (std::exp(x) * chbevl(T{32.0} / x - T{2.0}, B, len)) / std::sqrt(x); + return (_x < T{0.0}) ? -out : out; +} + +/* + * This function is derived from the implementation of the i1e function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes an approximation of the exponentially scaled first order modified Bessel function of the first kind. + * The approximation is actually two (sub)approximations, both using a Chebyshev polynomial expansion. + * One approximates the function over [0, 8], and the other over (8, infinity). This function takes the absolute value + * of all inputs to convert them into the domain of the approximation. + */ +template +inline typename std::enable_if::value, T>::type +calc_i1e(T _x) { + T x = std::abs(_x); + + if (x <= T{8.0}) { + auto coeff_pair = chebyshev_coefficients_i1e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + T y = (x / T{2.0}) - T{2.0}; + const T out = chbevl(y, A, len) * x; + return (_x < T{0.0}) ? -out : out; + } + auto coeff_pair = chebyshev_coefficients_i1e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + const auto out = chbevl(T{32.0} / x - T{2.0}, B, len) / std::sqrt(x); + return (_x < T{0.0}) ? -out : out; +} + +/* + * This function is derived from the implementation of the i1e function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes the argument, x, for which the area under the Gaussian probability density function + * (integrated from minus infinity to x) is equal to y. + */ +template +inline C10_HOST_DEVICE T calc_ndtri(T y0) { + + /* sqrt(2pi) */ + constexpr T s2pi = 2.50662827463100050242E0; + constexpr T one = 1; + constexpr T zero = 0; + + /* approximation for 0 <= |y - 0.5| <= 3/8 */ + static const T P0[5] = { + -5.99633501014107895267E1, + 9.80010754185999661536E1, + -5.66762857469070293439E1, + 1.39312609387279679503E1, + -1.23916583867381258016E0, + }; + + static const T Q0[9] = { + 1.00000000000000000000E0, + 1.95448858338141759834E0, + 4.67627912898881538453E0, + 8.63602421390890590575E1, + -2.25462687854119370527E2, + 2.00260212380060660359E2, + -8.20372256168333339912E1, + 1.59056225126211695515E1, + -1.18331621121330003142E0, + }; + + /* Approximation for interval z = sqrt(-2 log y ) between 2 and 8 + * i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14. + */ + static const T P1[9] = { + 4.05544892305962419923E0, + 3.15251094599893866154E1, + 5.71628192246421288162E1, + 4.40805073893200834700E1, + 1.46849561928858024014E1, + 2.18663306850790267539E0, + -1.40256079171354495875E-1, + -3.50424626827848203418E-2, + -8.57456785154685413611E-4, + }; + + static const T Q1[9] = { + 1.00000000000000000000E0, + 1.57799883256466749731E1, + 4.53907635128879210584E1, + 4.13172038254672030440E1, + 1.50425385692907503408E1, + 2.50464946208309415979E0, + -1.42182922854787788574E-1, + -3.80806407691578277194E-2, + -9.33259480895457427372E-4, + }; + + /* Approximation for interval z = sqrt(-2 log y ) between 8 and 64 + * i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890. + */ + + static const T P2[9] = { + 3.23774891776946035970E0, + 6.91522889068984211695E0, + 3.93881025292474443415E0, + 1.33303460815807542389E0, + 2.01485389549179081538E-1, + 1.23716634817820021358E-2, + 3.01581553508235416007E-4, + 2.65806974686737550832E-6, + 6.23974539184983293730E-9, + }; + + static const T Q2[9] = { + 1.00000000000000000000E0, + 6.02427039364742014255E0, + 3.67983563856160859403E0, + 1.37702099489081330271E0, + 2.16236993594496635890E-1, + 1.34204006088543189037E-2, + 3.28014464682127739104E-4, + 2.89247864745380683936E-6, + 6.79019408009981274425E-9, + }; + + if (y0 == zero) { + return -std::numeric_limits::infinity(); + } + if (y0 == one) { + return std::numeric_limits::infinity(); + } + if (y0 < zero || y0 > one) { + return std::numeric_limits::quiet_NaN(); + } + bool code = true; + T y = y0; + if (y > one - T{0.13533528323661269189}) { /* 0.135... = exp(-2) */ + y = one - y; + code = false; + } + + if (y > T{0.13533528323661269189}) { + y = y - T{0.5}; + const T y2 = y * y; + T x = y + y * (y2 * polevl(y2, P0, 4) / polevl(y2, Q0, 8)); + return (x * s2pi); + } + + T x = ::sqrt(T{-2.0} * ::log(y)); + const T x0 = x - ::log(x) / x; + + const T z = one / x; + T x1; + if (x < T{8.0}) /* y > exp(-32) = 1.2664165549e-14 */ + { + x1 = z * polevl(z, P1, 8) / polevl(z, Q1, 8); + } else { + x1 = z * polevl(z, P2, 8) / polevl(z, Q2, 8); + } + x = x0 - x1; + if (code) { + x = -x; + } + return x; +} + +/* The next function is taken from http://ab-initio.mit.edu/Faddeev */ + +/* Copyright (c) 2012 Massachusetts Institute of Technology + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* erfcx(x) = exp(x^2) erfc(x) function, for real x, written by + Steven G. Johnson, October 2012. + + This function combines a few different ideas. + + First, for x > 50, it uses a continued-fraction expansion (same as + for the Faddeeva function, but with algebraic simplifications for z=i*x). + + Second, for 0 <= x <= 50, it uses Chebyshev polynomial approximations, + but with two twists: + + a) It maps x to y = 4 / (4+x) in [0,1]. This simple transformation, + inspired by a similar transformation in the octave-forge/specfun + erfcx by Soren Hauberg, results in much faster Chebyshev convergence + than other simple transformations I have examined. + + b) Instead of using a single Chebyshev polynomial for the entire + [0,1] y interval, we break the interval up into 100 equal + subintervals, with a switch/lookup table, and use much lower + degree Chebyshev polynomials in each subinterval. This greatly + improves performance in my tests. + + For x < 0, we use the relationship erfcx(-x) = 2 exp(x^2) - erfc(x), + with the usual checks for overflow etcetera. + + Performance-wise, it seems to be substantially faster than either + the SLATEC DERFC function [or an erfcx function derived therefrom] + or Cody's CALERF function (from netlib.org/specfun), while + retaining near machine precision in accuracy. */ + +/* Given y100=100*y, where y = 4/(4+x) for x >= 0, compute erfc(x). + + Uses a look-up table of 100 different Chebyshev polynomials + for y intervals [0,0.01], [0.01,0.02], ...., [0.99,1], generated + with the help of Maple and a little shell script. This allows + the Chebyshev polynomials to be of significantly lower degree (about 1/4) + compared to fitting the whole [0,1] interval with a single polynomial. */ + + +template +C10_HOST_DEVICE inline typename std::enable_if::value, T>::type +erfcx_y100(T y100) +{ + switch (static_cast(y100)) { +case 0: { +T t = 2*y100 - 1; +return 0.70878032454106438663e-3 + (0.71234091047026302958e-3 + (0.35779077297597742384e-5 + (0.17403143962587937815e-7 + (0.81710660047307788845e-10 + (0.36885022360434957634e-12 + 0.15917038551111111111e-14 * t) * t) * t) * t) * t) * t; +} +case 1: { +T t = 2*y100 - 3; +return 0.21479143208285144230e-2 + (0.72686402367379996033e-3 + (0.36843175430938995552e-5 + (0.18071841272149201685e-7 + (0.85496449296040325555e-10 + (0.38852037518534291510e-12 + 0.16868473576888888889e-14 * t) * t) * t) * t) * t) * t; +} +case 2: { +T t = 2*y100 - 5; +return 0.36165255935630175090e-2 + (0.74182092323555510862e-3 + (0.37948319957528242260e-5 + (0.18771627021793087350e-7 + (0.89484715122415089123e-10 + (0.40935858517772440862e-12 + 0.17872061464888888889e-14 * t) * t) * t) * t) * t) * t; +} +case 3: { +T t = 2*y100 - 7; +return 0.51154983860031979264e-2 + (0.75722840734791660540e-3 + (0.39096425726735703941e-5 + (0.19504168704300468210e-7 + (0.93687503063178993915e-10 + (0.43143925959079664747e-12 + 0.18939926435555555556e-14 * t) * t) * t) * t) * t) * t; +} +case 4: { +T t = 2*y100 - 9; +return 0.66457513172673049824e-2 + (0.77310406054447454920e-3 + (0.40289510589399439385e-5 + (0.20271233238288381092e-7 + (0.98117631321709100264e-10 + (0.45484207406017752971e-12 + 0.20076352213333333333e-14 * t) * t) * t) * t) * t) * t; +} +case 5: { +T t = 2*y100 - 11; +return 0.82082389970241207883e-2 + (0.78946629611881710721e-3 + (0.41529701552622656574e-5 + (0.21074693344544655714e-7 + (0.10278874108587317989e-9 + (0.47965201390613339638e-12 + 0.21285907413333333333e-14 * t) * t) * t) * t) * t) * t; +} +case 6: { +T t = 2*y100 - 13; +return 0.98039537275352193165e-2 + (0.80633440108342840956e-3 + (0.42819241329736982942e-5 + (0.21916534346907168612e-7 + (0.10771535136565470914e-9 + (0.50595972623692822410e-12 + 0.22573462684444444444e-14 * t) * t) * t) * t) * t) * t; +} +case 7: { +T t = 2*y100 - 15; +return 0.11433927298290302370e-1 + (0.82372858383196561209e-3 + (0.44160495311765438816e-5 + (0.22798861426211986056e-7 + (0.11291291745879239736e-9 + (0.53386189365816880454e-12 + 0.23944209546666666667e-14 * t) * t) * t) * t) * t) * t; +} +case 8: { +T t = 2*y100 - 17; +return 0.13099232878814653979e-1 + (0.84167002467906968214e-3 + (0.45555958988457506002e-5 + (0.23723907357214175198e-7 + (0.11839789326602695603e-9 + (0.56346163067550237877e-12 + 0.25403679644444444444e-14 * t) * t) * t) * t) * t) * t; +} +case 9: { +T t = 2*y100 - 19; +return 0.14800987015587535621e-1 + (0.86018092946345943214e-3 + (0.47008265848816866105e-5 + (0.24694040760197315333e-7 + (0.12418779768752299093e-9 + (0.59486890370320261949e-12 + 0.26957764568888888889e-14 * t) * t) * t) * t) * t) * t; +} +case 10: { +T t = 2*y100 - 21; +return 0.16540351739394069380e-1 + (0.87928458641241463952e-3 + (0.48520195793001753903e-5 + (0.25711774900881709176e-7 + (0.13030128534230822419e-9 + (0.62820097586874779402e-12 + 0.28612737351111111111e-14 * t) * t) * t) * t) * t) * t; +} +case 11: { +T t = 2*y100 - 23; +return 0.18318536789842392647e-1 + (0.89900542647891721692e-3 + (0.50094684089553365810e-5 + (0.26779777074218070482e-7 + (0.13675822186304615566e-9 + (0.66358287745352705725e-12 + 0.30375273884444444444e-14 * t) * t) * t) * t) * t) * t; +} +case 12: { +T t = 2*y100 - 25; +return 0.20136801964214276775e-1 + (0.91936908737673676012e-3 + (0.51734830914104276820e-5 + (0.27900878609710432673e-7 + (0.14357976402809042257e-9 + (0.70114790311043728387e-12 + 0.32252476000000000000e-14 * t) * t) * t) * t) * t) * t; +} +case 13: { +T t = 2*y100 - 27; +return 0.21996459598282740954e-1 + (0.94040248155366777784e-3 + (0.53443911508041164739e-5 + (0.29078085538049374673e-7 + (0.15078844500329731137e-9 + (0.74103813647499204269e-12 + 0.34251892320000000000e-14 * t) * t) * t) * t) * t) * t; +} +case 14: { +T t = 2*y100 - 29; +return 0.23898877187226319502e-1 + (0.96213386835900177540e-3 + (0.55225386998049012752e-5 + (0.30314589961047687059e-7 + (0.15840826497296335264e-9 + (0.78340500472414454395e-12 + 0.36381553564444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 15: { +T t = 2*y100 - 31; +return 0.25845480155298518485e-1 + (0.98459293067820123389e-3 + (0.57082915920051843672e-5 + (0.31613782169164830118e-7 + (0.16646478745529630813e-9 + (0.82840985928785407942e-12 + 0.38649975768888888890e-14 * t) * t) * t) * t) * t) * t; +} +case 16: { +T t = 2*y100 - 33; +return 0.27837754783474696598e-1 + (0.10078108563256892757e-2 + (0.59020366493792212221e-5 + (0.32979263553246520417e-7 + (0.17498524159268458073e-9 + (0.87622459124842525110e-12 + 0.41066206488888888890e-14 * t) * t) * t) * t) * t) * t; +} +case 17: { +T t = 2*y100 - 35; +return 0.29877251304899307550e-1 + (0.10318204245057349310e-2 + (0.61041829697162055093e-5 + (0.34414860359542720579e-7 + (0.18399863072934089607e-9 + (0.92703227366365046533e-12 + 0.43639844053333333334e-14 * t) * t) * t) * t) * t) * t; +} +case 18: { +T t = 2*y100 - 37; +return 0.31965587178596443475e-1 + (0.10566560976716574401e-2 + (0.63151633192414586770e-5 + (0.35924638339521924242e-7 + (0.19353584758781174038e-9 + (0.98102783859889264382e-12 + 0.46381060817777777779e-14 * t) * t) * t) * t) * t) * t; +} +case 19: { +T t = 2*y100 - 39; +return 0.34104450552588334840e-1 + (0.10823541191350532574e-2 + (0.65354356159553934436e-5 + (0.37512918348533521149e-7 + (0.20362979635817883229e-9 + (0.10384187833037282363e-11 + 0.49300625262222222221e-14 * t) * t) * t) * t) * t) * t; +} +case 20: { +T t = 2*y100 - 41; +return 0.36295603928292425716e-1 + (0.11089526167995268200e-2 + (0.67654845095518363577e-5 + (0.39184292949913591646e-7 + (0.21431552202133775150e-9 + (0.10994259106646731797e-11 + 0.52409949102222222221e-14 * t) * t) * t) * t) * t) * t; +} +case 21: { +T t = 2*y100 - 43; +return 0.38540888038840509795e-1 + (0.11364917134175420009e-2 + (0.70058230641246312003e-5 + (0.40943644083718586939e-7 + (0.22563034723692881631e-9 + (0.11642841011361992885e-11 + 0.55721092871111111110e-14 * t) * t) * t) * t) * t) * t; +} +case 22: { +T t = 2*y100 - 45; +return 0.40842225954785960651e-1 + (0.11650136437945673891e-2 + (0.72569945502343006619e-5 + (0.42796161861855042273e-7 + (0.23761401711005024162e-9 + (0.12332431172381557035e-11 + 0.59246802364444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 23: { +T t = 2*y100 - 47; +return 0.43201627431540222422e-1 + (0.11945628793917272199e-2 + (0.75195743532849206263e-5 + (0.44747364553960993492e-7 + (0.25030885216472953674e-9 + (0.13065684400300476484e-11 + 0.63000532853333333334e-14 * t) * t) * t) * t) * t) * t; +} +case 24: { +T t = 2*y100 - 49; +return 0.45621193513810471438e-1 + (0.12251862608067529503e-2 + (0.77941720055551920319e-5 + (0.46803119830954460212e-7 + (0.26375990983978426273e-9 + (0.13845421370977119765e-11 + 0.66996477404444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 25: { +T t = 2*y100 - 51; +return 0.48103121413299865517e-1 + (0.12569331386432195113e-2 + (0.80814333496367673980e-5 + (0.48969667335682018324e-7 + (0.27801515481905748484e-9 + (0.14674637611609884208e-11 + 0.71249589351111111110e-14 * t) * t) * t) * t) * t) * t; +} +case 26: { +T t = 2*y100 - 53; +return 0.50649709676983338501e-1 + (0.12898555233099055810e-2 + (0.83820428414568799654e-5 + (0.51253642652551838659e-7 + (0.29312563849675507232e-9 + (0.15556512782814827846e-11 + 0.75775607822222222221e-14 * t) * t) * t) * t) * t) * t; +} +case 27: { +T t = 2*y100 - 55; +return 0.53263363664388864181e-1 + (0.13240082443256975769e-2 + (0.86967260015007658418e-5 + (0.53662102750396795566e-7 + (0.30914568786634796807e-9 + (0.16494420240828493176e-11 + 0.80591079644444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 28: { +T t = 2*y100 - 57; +return 0.55946601353500013794e-1 + (0.13594491197408190706e-2 + (0.90262520233016380987e-5 + (0.56202552975056695376e-7 + (0.32613310410503135996e-9 + (0.17491936862246367398e-11 + 0.85713381688888888890e-14 * t) * t) * t) * t) * t) * t; +} +case 29: { +T t = 2*y100 - 59; +return 0.58702059496154081813e-1 + (0.13962391363223647892e-2 + (0.93714365487312784270e-5 + (0.58882975670265286526e-7 + (0.34414937110591753387e-9 + (0.18552853109751857859e-11 + 0.91160736711111111110e-14 * t) * t) * t) * t) * t) * t; +} +case 30: { +T t = 2*y100 - 61; +return 0.61532500145144778048e-1 + (0.14344426411912015247e-2 + (0.97331446201016809696e-5 + (0.61711860507347175097e-7 + (0.36325987418295300221e-9 + (0.19681183310134518232e-11 + 0.96952238400000000000e-14 * t) * t) * t) * t) * t) * t; +} +case 31: { +T t = 2*y100 - 63; +return 0.64440817576653297993e-1 + (0.14741275456383131151e-2 + (0.10112293819576437838e-4 + (0.64698236605933246196e-7 + (0.38353412915303665586e-9 + (0.20881176114385120186e-11 + 0.10310784480000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 32: { +T t = 2*y100 - 65; +return 0.67430045633130393282e-1 + (0.15153655418916540370e-2 + (0.10509857606888328667e-4 + (0.67851706529363332855e-7 + (0.40504602194811140006e-9 + (0.22157325110542534469e-11 + 0.10964842115555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 33: { +T t = 2*y100 - 67; +return 0.70503365513338850709e-1 + (0.15582323336495709827e-2 + (0.10926868866865231089e-4 + (0.71182482239613507542e-7 + (0.42787405890153386710e-9 + (0.23514379522274416437e-11 + 0.11659571751111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 34: { +T t = 2*y100 - 69; +return 0.73664114037944596353e-1 + (0.16028078812438820413e-2 + (0.11364423678778207991e-4 + (0.74701423097423182009e-7 + (0.45210162777476488324e-9 + (0.24957355004088569134e-11 + 0.12397238257777777778e-13 * t) * t) * t) * t) * t) * t; +} +case 35: { +T t = 2*y100 - 71; +return 0.76915792420819562379e-1 + (0.16491766623447889354e-2 + (0.11823685320041302169e-4 + (0.78420075993781544386e-7 + (0.47781726956916478925e-9 + (0.26491544403815724749e-11 + 0.13180196462222222222e-13 * t) * t) * t) * t) * t) * t; +} +case 36: { +T t = 2*y100 - 73; +return 0.80262075578094612819e-1 + (0.16974279491709504117e-2 + (0.12305888517309891674e-4 + (0.82350717698979042290e-7 + (0.50511496109857113929e-9 + (0.28122528497626897696e-11 + 0.14010889635555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 37: { +T t = 2*y100 - 75; +return 0.83706822008980357446e-1 + (0.17476561032212656962e-2 + (0.12812343958540763368e-4 + (0.86506399515036435592e-7 + (0.53409440823869467453e-9 + (0.29856186620887555043e-11 + 0.14891851591111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 38: { +T t = 2*y100 - 77; +return 0.87254084284461718231e-1 + (0.17999608886001962327e-2 + (0.13344443080089492218e-4 + (0.90900994316429008631e-7 + (0.56486134972616465316e-9 + (0.31698707080033956934e-11 + 0.15825697795555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 39: { +T t = 2*y100 - 79; +return 0.90908120182172748487e-1 + (0.18544478050657699758e-2 + (0.13903663143426120077e-4 + (0.95549246062549906177e-7 + (0.59752787125242054315e-9 + (0.33656597366099099413e-11 + 0.16815130613333333333e-13 * t) * t) * t) * t) * t) * t; +} +case 40: { +T t = 2*y100 - 81; +return 0.94673404508075481121e-1 + (0.19112284419887303347e-2 + (0.14491572616545004930e-4 + (0.10046682186333613697e-6 + (0.63221272959791000515e-9 + (0.35736693975589130818e-11 + 0.17862931591111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 41: { +T t = 2*y100 - 83; +return 0.98554641648004456555e-1 + (0.19704208544725622126e-2 + (0.15109836875625443935e-4 + (0.10567036667675984067e-6 + (0.66904168640019354565e-9 + (0.37946171850824333014e-11 + 0.18971959040000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 42: { +T t = 2*y100 - 85; +return 0.10255677889470089531e0 + (0.20321499629472857418e-2 + (0.15760224242962179564e-4 + (0.11117756071353507391e-6 + (0.70814785110097658502e-9 + (0.40292553276632563925e-11 + 0.20145143075555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 43: { +T t = 2*y100 - 87; +return 0.10668502059865093318e0 + (0.20965479776148731610e-2 + (0.16444612377624983565e-4 + (0.11700717962026152749e-6 + (0.74967203250938418991e-9 + (0.42783716186085922176e-11 + 0.21385479360000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 44: { +T t = 2*y100 - 89; +return 0.11094484319386444474e0 + (0.21637548491908170841e-2 + (0.17164995035719657111e-4 + (0.12317915750735938089e-6 + (0.79376309831499633734e-9 + (0.45427901763106353914e-11 + 0.22696025653333333333e-13 * t) * t) * t) * t) * t) * t; +} +case 45: { +T t = 2*y100 - 91; +return 0.11534201115268804714e0 + (0.22339187474546420375e-2 + (0.17923489217504226813e-4 + (0.12971465288245997681e-6 + (0.84057834180389073587e-9 + (0.48233721206418027227e-11 + 0.24079890062222222222e-13 * t) * t) * t) * t) * t) * t; +} +case 46: { +T t = 2*y100 - 93; +return 0.11988259392684094740e0 + (0.23071965691918689601e-2 + (0.18722342718958935446e-4 + (0.13663611754337957520e-6 + (0.89028385488493287005e-9 + (0.51210161569225846701e-11 + 0.25540227111111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 47: { +T t = 2*y100 - 95; +return 0.12457298393509812907e0 + (0.23837544771809575380e-2 + (0.19563942105711612475e-4 + (0.14396736847739470782e-6 + (0.94305490646459247016e-9 + (0.54366590583134218096e-11 + 0.27080225920000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 48: { +T t = 2*y100 - 97; +return 0.12941991566142438816e0 + (0.24637684719508859484e-2 + (0.20450821127475879816e-4 + (0.15173366280523906622e-6 + (0.99907632506389027739e-9 + (0.57712760311351625221e-11 + 0.28703099555555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 49: { +T t = 2*y100 - 99; +return 0.13443048593088696613e0 + (0.25474249981080823877e-2 + (0.21385669591362915223e-4 + (0.15996177579900443030e-6 + (0.10585428844575134013e-8 + (0.61258809536787882989e-11 + 0.30412080142222222222e-13 * t) * t) * t) * t) * t) * t; +} +case 50: { +T t = 2*y100 - 101; +return 0.13961217543434561353e0 + (0.26349215871051761416e-2 + (0.22371342712572567744e-4 + (0.16868008199296822247e-6 + (0.11216596910444996246e-8 + (0.65015264753090890662e-11 + 0.32210394506666666666e-13 * t) * t) * t) * t) * t) * t; +} +case 51: { +T t = 2*y100 - 103; +return 0.14497287157673800690e0 + (0.27264675383982439814e-2 + (0.23410870961050950197e-4 + (0.17791863939526376477e-6 + (0.11886425714330958106e-8 + (0.68993039665054288034e-11 + 0.34101266222222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 52: { +T t = 2*y100 - 105; +return 0.15052089272774618151e0 + (0.28222846410136238008e-2 + (0.24507470422713397006e-4 + (0.18770927679626136909e-6 + (0.12597184587583370712e-8 + (0.73203433049229821618e-11 + 0.36087889048888888890e-13 * t) * t) * t) * t) * t) * t; +} +case 53: { +T t = 2*y100 - 107; +return 0.15626501395774612325e0 + (0.29226079376196624949e-2 + (0.25664553693768450545e-4 + (0.19808568415654461964e-6 + (0.13351257759815557897e-8 + (0.77658124891046760667e-11 + 0.38173420035555555555e-13 * t) * t) * t) * t) * t) * t; +} +case 54: { +T t = 2*y100 - 109; +return 0.16221449434620737567e0 + (0.30276865332726475672e-2 + (0.26885741326534564336e-4 + (0.20908350604346384143e-6 + (0.14151148144240728728e-8 + (0.82369170665974313027e-11 + 0.40360957457777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 55: { +T t = 2*y100 - 111; +return 0.16837910595412130659e0 + (0.31377844510793082301e-2 + (0.28174873844911175026e-4 + (0.22074043807045782387e-6 + (0.14999481055996090039e-8 + (0.87348993661930809254e-11 + 0.42653528977777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 56: { +T t = 2*y100 - 113; +return 0.17476916455659369953e0 + (0.32531815370903068316e-2 + (0.29536024347344364074e-4 + (0.23309632627767074202e-6 + (0.15899007843582444846e-8 + (0.92610375235427359475e-11 + 0.45054073102222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 57: { +T t = 2*y100 - 115; +return 0.18139556223643701364e0 + (0.33741744168096996041e-2 + (0.30973511714709500836e-4 + (0.24619326937592290996e-6 + (0.16852609412267750744e-8 + (0.98166442942854895573e-11 + 0.47565418097777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 58: { +T t = 2*y100 - 117; +return 0.18826980194443664549e0 + (0.35010775057740317997e-2 + (0.32491914440014267480e-4 + (0.26007572375886319028e-6 + (0.17863299617388376116e-8 + (0.10403065638343878679e-10 + 0.50190265831111111110e-13 * t) * t) * t) * t) * t) * t; +} +case 59: { +T t = 2*y100 - 119; +return 0.19540403413693967350e0 + (0.36342240767211326315e-2 + (0.34096085096200907289e-4 + (0.27479061117017637474e-6 + (0.18934228504790032826e-8 + (0.11021679075323598664e-10 + 0.52931171733333333334e-13 * t) * t) * t) * t) * t) * t; +} +case 60: { +T t = 2*y100 - 121; +return 0.20281109560651886959e0 + (0.37739673859323597060e-2 + (0.35791165457592409054e-4 + (0.29038742889416172404e-6 + (0.20068685374849001770e-8 + (0.11673891799578381999e-10 + 0.55790523093333333334e-13 * t) * t) * t) * t) * t) * t; +} +case 61: { +T t = 2*y100 - 123; +return 0.21050455062669334978e0 + (0.39206818613925652425e-2 + (0.37582602289680101704e-4 + (0.30691836231886877385e-6 + (0.21270101645763677824e-8 + (0.12361138551062899455e-10 + 0.58770520160000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 62: { +T t = 2*y100 - 125; +return 0.21849873453703332479e0 + (0.40747643554689586041e-2 + (0.39476163820986711501e-4 + (0.32443839970139918836e-6 + (0.22542053491518680200e-8 + (0.13084879235290858490e-10 + 0.61873153262222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 63: { +T t = 2*y100 - 127; +return 0.22680879990043229327e0 + (0.42366354648628516935e-2 + (0.41477956909656896779e-4 + (0.34300544894502810002e-6 + (0.23888264229264067658e-8 + (0.13846596292818514601e-10 + 0.65100183751111111110e-13 * t) * t) * t) * t) * t) * t; +} +case 64: { +T t = 2*y100 - 129; +return 0.23545076536988703937e0 + (0.44067409206365170888e-2 + (0.43594444916224700881e-4 + (0.36268045617760415178e-6 + (0.25312606430853202748e-8 + (0.14647791812837903061e-10 + 0.68453122631111111110e-13 * t) * t) * t) * t) * t) * t; +} +case 65: { +T t = 2*y100 - 131; +return 0.24444156740777432838e0 + (0.45855530511605787178e-2 + (0.45832466292683085475e-4 + (0.38352752590033030472e-6 + (0.26819103733055603460e-8 + (0.15489984390884756993e-10 + 0.71933206364444444445e-13 * t) * t) * t) * t) * t) * t; +} +case 66: { +T t = 2*y100 - 133; +return 0.25379911500634264643e0 + (0.47735723208650032167e-2 + (0.48199253896534185372e-4 + (0.40561404245564732314e-6 + (0.28411932320871165585e-8 + (0.16374705736458320149e-10 + 0.75541379822222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 67: { +T t = 2*y100 - 135; +return 0.26354234756393613032e0 + (0.49713289477083781266e-2 + (0.50702455036930367504e-4 + (0.42901079254268185722e-6 + (0.30095422058900481753e-8 + (0.17303497025347342498e-10 + 0.79278273368888888890e-13 * t) * t) * t) * t) * t) * t; +} +case 68: { +T t = 2*y100 - 137; +return 0.27369129607732343398e0 + (0.51793846023052643767e-2 + (0.53350152258326602629e-4 + (0.45379208848865015485e-6 + (0.31874057245814381257e-8 + (0.18277905010245111046e-10 + 0.83144182364444444445e-13 * t) * t) * t) * t) * t) * t; +} +case 69: { +T t = 2*y100 - 139; +return 0.28426714781640316172e0 + (0.53983341916695141966e-2 + (0.56150884865255810638e-4 + (0.48003589196494734238e-6 + (0.33752476967570796349e-8 + (0.19299477888083469086e-10 + 0.87139049137777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 70: { +T t = 2*y100 - 141; +return 0.29529231465348519920e0 + (0.56288077305420795663e-2 + (0.59113671189913307427e-4 + (0.50782393781744840482e-6 + (0.35735475025851713168e-8 + (0.20369760937017070382e-10 + 0.91262442613333333334e-13 * t) * t) * t) * t) * t) * t; +} +case 71: { +T t = 2*y100 - 143; +return 0.30679050522528838613e0 + (0.58714723032745403331e-2 + (0.62248031602197686791e-4 + (0.53724185766200945789e-6 + (0.37827999418960232678e-8 + (0.21490291930444538307e-10 + 0.95513539182222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 72: { +T t = 2*y100 - 145; +return 0.31878680111173319425e0 + (0.61270341192339103514e-2 + (0.65564012259707640976e-4 + (0.56837930287837738996e-6 + (0.40035151353392378882e-8 + (0.22662596341239294792e-10 + 0.99891109760000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 73: { +T t = 2*y100 - 147; +return 0.33130773722152622027e0 + (0.63962406646798080903e-2 + (0.69072209592942396666e-4 + (0.60133006661885941812e-6 + (0.42362183765883466691e-8 + (0.23888182347073698382e-10 + 0.10439349811555555556e-12 * t) * t) * t) * t) * t) * t; +} +case 74: { +T t = 2*y100 - 149; +return 0.34438138658041336523e0 + (0.66798829540414007258e-2 + (0.72783795518603561144e-4 + (0.63619220443228800680e-6 + (0.44814499336514453364e-8 + (0.25168535651285475274e-10 + 0.10901861383111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 75: { +T t = 2*y100 - 151; +return 0.35803744972380175583e0 + (0.69787978834882685031e-2 + (0.76710543371454822497e-4 + (0.67306815308917386747e-6 + (0.47397647975845228205e-8 + (0.26505114141143050509e-10 + 0.11376390933333333333e-12 * t) * t) * t) * t) * t) * t; +} +case 76: { +T t = 2*y100 - 153; +return 0.37230734890119724188e0 + (0.72938706896461381003e-2 + (0.80864854542670714092e-4 + (0.71206484718062688779e-6 + (0.50117323769745883805e-8 + (0.27899342394100074165e-10 + 0.11862637614222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 77: { +T t = 2*y100 - 155; +return 0.38722432730555448223e0 + (0.76260375162549802745e-2 + (0.85259785810004603848e-4 + (0.75329383305171327677e-6 + (0.52979361368388119355e-8 + (0.29352606054164086709e-10 + 0.12360253370666666667e-12 * t) * t) * t) * t) * t) * t; +} +case 78: { +T t = 2*y100 - 157; +return 0.40282355354616940667e0 + (0.79762880915029728079e-2 + (0.89909077342438246452e-4 + (0.79687137961956194579e-6 + (0.55989731807360403195e-8 + (0.30866246101464869050e-10 + 0.12868841946666666667e-12 * t) * t) * t) * t) * t) * t; +} +case 79: { +T t = 2*y100 - 159; +return 0.41914223158913787649e0 + (0.83456685186950463538e-2 + (0.94827181359250161335e-4 + (0.84291858561783141014e-6 + (0.59154537751083485684e-8 + (0.32441553034347469291e-10 + 0.13387957943111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 80: { +T t = 2*y100 - 161; +return 0.43621971639463786896e0 + (0.87352841828289495773e-2 + (0.10002929142066799966e-3 + (0.89156148280219880024e-6 + (0.62480008150788597147e-8 + (0.34079760983458878910e-10 + 0.13917107176888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 81: { +T t = 2*y100 - 163; +return 0.45409763548534330981e0 + (0.91463027755548240654e-2 + (0.10553137232446167258e-3 + (0.94293113464638623798e-6 + (0.65972492312219959885e-8 + (0.35782041795476563662e-10 + 0.14455745872000000000e-12 * t) * t) * t) * t) * t) * t; +} +case 82: { +T t = 2*y100 - 165; +return 0.47282001668512331468e0 + (0.95799574408860463394e-2 + (0.11135019058000067469e-3 + (0.99716373005509038080e-6 + (0.69638453369956970347e-8 + (0.37549499088161345850e-10 + 0.15003280712888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 83: { +T t = 2*y100 - 167; +return 0.49243342227179841649e0 + (0.10037550043909497071e-1 + (0.11750334542845234952e-3 + (0.10544006716188967172e-5 + (0.73484461168242224872e-8 + (0.39383162326435752965e-10 + 0.15559069118222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 84: { +T t = 2*y100 - 169; +return 0.51298708979209258326e0 + (0.10520454564612427224e-1 + (0.12400930037494996655e-3 + (0.11147886579371265246e-5 + (0.77517184550568711454e-8 + (0.41283980931872622611e-10 + 0.16122419680000000000e-12 * t) * t) * t) * t) * t) * t; +} +case 85: { +T t = 2*y100 - 171; +return 0.53453307979101369843e0 + (0.11030120618800726938e-1 + (0.13088741519572269581e-3 + (0.11784797595374515432e-5 + (0.81743383063044825400e-8 + (0.43252818449517081051e-10 + 0.16692592640000000000e-12 * t) * t) * t) * t) * t) * t; +} +case 86: { +T t = 2*y100 - 173; +return 0.55712643071169299478e0 + (0.11568077107929735233e-1 + (0.13815797838036651289e-3 + (0.12456314879260904558e-5 + (0.86169898078969313597e-8 + (0.45290446811539652525e-10 + 0.17268801084444444444e-12 * t) * t) * t) * t) * t) * t; +} +case 87: { +T t = 2*y100 - 175; +return 0.58082532122519320968e0 + (0.12135935999503877077e-1 + (0.14584223996665838559e-3 + (0.13164068573095710742e-5 + (0.90803643355106020163e-8 + (0.47397540713124619155e-10 + 0.17850211608888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 88: { +T t = 2*y100 - 177; +return 0.60569124025293375554e0 + (0.12735396239525550361e-1 + (0.15396244472258863344e-3 + (0.13909744385382818253e-5 + (0.95651595032306228245e-8 + (0.49574672127669041550e-10 + 0.18435945564444444444e-12 * t) * t) * t) * t) * t) * t; +} +case 89: { +T t = 2*y100 - 179; +return 0.63178916494715716894e0 + (0.13368247798287030927e-1 + (0.16254186562762076141e-3 + (0.14695084048334056083e-5 + (0.10072078109604152350e-7 + (0.51822304995680707483e-10 + 0.19025081422222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 90: { +T t = 2*y100 - 181; +return 0.65918774689725319200e0 + (0.14036375850601992063e-1 + (0.17160483760259706354e-3 + (0.15521885688723188371e-5 + (0.10601827031535280590e-7 + (0.54140790105837520499e-10 + 0.19616655146666666667e-12 * t) * t) * t) * t) * t) * t; +} +case 91: { +T t = 2*y100 - 183; +return 0.68795950683174433822e0 + (0.14741765091365869084e-1 + (0.18117679143520433835e-3 + (0.16392004108230585213e-5 + (0.11155116068018043001e-7 + (0.56530360194925690374e-10 + 0.20209663662222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 92: { +T t = 2*y100 - 185; +return 0.71818103808729967036e0 + (0.15486504187117112279e-1 + (0.19128428784550923217e-3 + (0.17307350969359975848e-5 + (0.11732656736113607751e-7 + (0.58991125287563833603e-10 + 0.20803065333333333333e-12 * t) * t) * t) * t) * t) * t; +} +case 93: { +T t = 2*y100 - 187; +return 0.74993321911726254661e0 + (0.16272790364044783382e-1 + (0.20195505163377912645e-3 + (0.18269894883203346953e-5 + (0.12335161021630225535e-7 + (0.61523068312169087227e-10 + 0.21395783431111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 94: { +T t = 2*y100 - 189; +return 0.78330143531283492729e0 + (0.17102934132652429240e-1 + (0.21321800585063327041e-3 + (0.19281661395543913713e-5 + (0.12963340087354341574e-7 + (0.64126040998066348872e-10 + 0.21986708942222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 95: { +T t = 2*y100 - 191; +return 0.81837581041023811832e0 + (0.17979364149044223802e-1 + (0.22510330592753129006e-3 + (0.20344732868018175389e-5 + (0.13617902941839949718e-7 + (0.66799760083972474642e-10 + 0.22574701262222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 96: { +T t = 2*y100 - 193; +return 0.85525144775685126237e0 + (0.18904632212547561026e-1 + (0.23764237370371255638e-3 + (0.21461248251306387979e-5 + (0.14299555071870523786e-7 + (0.69543803864694171934e-10 + 0.23158593688888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 97: { +T t = 2*y100 - 195; +return 0.89402868170849933734e0 + (0.19881418399127202569e-1 + (0.25086793128395995798e-3 + (0.22633402747585233180e-5 + (0.15008997042116532283e-7 + (0.72357609075043941261e-10 + 0.23737194737777777778e-12 * t) * t) * t) * t) * t) * t; +} +case 98: { +T t = 2*y100 - 197; +return 0.93481333942870796363e0 + (0.20912536329780368893e-1 + (0.26481403465998477969e-3 + (0.23863447359754921676e-5 + (0.15746923065472184451e-7 + (0.75240468141720143653e-10 + 0.24309291271111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 99: { +T t = 2*y100 - 199; +return 0.97771701335885035464e0 + (0.22000938572830479551e-1 + (0.27951610702682383001e-3 + (0.25153688325245314530e-5 + (0.16514019547822821453e-7 + (0.78191526829368231251e-10 + 0.24873652355555555556e-12 * t) * t) * t) * t) * t) * t; +} + } + // we only get here if y = 1, i.e. |x| < 4*eps, in which case + // erfcx is within 1e-15 of 1.. + return 1.0; +} + +template +C10_HOST_DEVICE inline typename std::enable_if::value, T>::type +calc_erfcx(T x) +{ + if (at::_isnan(x)) { + return x; + } + + if (x >= 0) { + if (x > 50) { // continued-fraction expansion is faster + const T ispi = 0.56418958354775628694807945156; // 1 / sqrt(pi) + if (x > 5e7) { // 1-term expansion, important to avoid overflow + return ispi / x; + } + /* 5-term expansion (rely on compiler for CSE), simplified from: + ispi / (x+0.5/(x+1/(x+1.5/(x+2/x)))) */ + return ispi*((x*x) * (x*x+4.5) + 2) / (x * ((x*x) * (x*x+5) + 3.75)); + } + return erfcx_y100(400/(4+x)); + } + else { + if (x < -26.7) { + return std::numeric_limits::infinity(); + } + else if (x < -6.1) { + return 2*exp(x*x); + } + else { + return 2*exp(x*x) - erfcx_y100(400/(4-x)); + } + } +} + +/* + * Logarithm of Gaussian cumulative distribution function. + + * This implementation of log_ndtr and its helper functions + * follow SciPy's implementation + * See NOTICE for the licenses. + */ +template +inline C10_HOST_DEVICE T calc_log_ndtr(T x) { + T t = x * c10::frac_sqrt_2; + if (x < T{-1.0}) { + return std::log(calc_erfcx(-t) / 2) - t * t; + } else { + return std::log1p(-std::erfc(t) / 2); + } +} + +template +inline C10_HOST_DEVICE T airy_ai_forward(T x) { + static const T AN[] = { + +3.46538101525629032477e-01, + +1.20075952739645805542e+01, + +7.62796053615234516538e+01, + +1.68089224934630576269e+02, + +1.59756391350164413639e+02, + +7.05360906840444183113e+01, + +1.40264691163389668864e+01, + +9.99999999999999995305e-01, + }; + + static const T AD[] = { + +5.67594532638770212846e-01, + +1.47562562584847203173e+01, + +8.45138970141474626562e+01, + +1.77318088145400459522e+02, + +1.64234692871529701831e+02, + +7.14778400825575695274e+01, + +1.40959135607834029598e+01, + +1.00000000000000000470e+00, + }; + + static const T AFN[] = { + -1.31696323418331795333e-01, + -6.26456544431912369773e-01, + -6.93158036036933542233e-01, + -2.79779981545119124951e-01, + -4.91900132609500318020e-02, + -4.06265923594885404393e-03, + -1.59276496239262096340e-04, + -2.77649108155232920844e-06, + -1.67787698489114633780e-08, + }; + + static const T AFD[] = { + +1.33560420706553243746e+01, + +3.26825032795224613948e+01, + +2.67367040941499554804e+01, + +9.18707402907259625840e+00, + +1.47529146771666414581e+00, + +1.15687173795188044134e-01, + +4.40291641615211203805e-03, + +7.54720348287414296618e-05, + +4.51850092970580378464e-07, + }; + + static const T AGN[] = { + +1.97339932091685679179e-02, + +3.91103029615688277255e-01, + +1.06579897599595591108e+00, + +9.39169229816650230044e-01, + +3.51465656105547619242e-01, + +6.33888919628925490927e-02, + +5.85804113048388458567e-03, + +2.82851600836737019778e-04, + +6.98793669997260967291e-06, + +8.11789239554389293311e-08, + +3.41551784765923618484e-10, + }; + + static const T AGD[] = { + +9.30892908077441974853e+00, + +1.98352928718312140417e+01, + +1.55646628932864612953e+01, + +5.47686069422975497931e+00, + +9.54293611618961883998e-01, + +8.64580826352392193095e-02, + +4.12656523824222607191e-03, + +1.01259085116509135510e-04, + +1.17166733214413521882e-06, + +4.91834570062930015649e-09, + }; + + int domain_flag = 0; + + T ai; + + if (std::isinf(x)) { + return std::numeric_limits::quiet_NaN(); + } + + if (x > T(103.892)) { + return T(0.0); + } + + T f; + T g; + T k; + + if (x < T(-2.09)) { + T z = T(1.0) / (T(-2.0) * x * std::sqrt(-x) / T(3.0)); + + T afn = 0.0; + + for (uint8_t index = 0; index <= 8; index++) { + afn = afn * (z * z) + AFN[index]; + } + + T afd = 0.0; + + for (uint8_t index = 0; index <= 8; index++) { + afd = afd * (z * z) + AFD[index]; + } + + T agn = 0.0; + + for (uint8_t index = 0; index <= 10 + 0; index++) { + agn = agn * (z * z) + AGN[index]; + } + + T agd = 0.0; + + for (uint8_t index = 0; index <= 10 - 1; index++) { + agd = agd * (z * z) + AGD[index]; + } + + T t = T(-2.0) * x * std::sqrt(-x) / T(3.0) + T(0.25) * c10::pi; + + return T(5.64189583547756286948e-01) / std::sqrt(std::sqrt(-x)) * (std::sin(t) * (T(1.0) + z * z * afn / afd) - std::cos(t) * (z * agn / agd)); + } + + if (x >= T(2.09)) { + domain_flag = 5; + + T zeta = T(2.0) * x * std::sqrt(x) / T(3.0); + + T an = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + an = an * (T(1.0) / zeta) + AN[index]; + } + + T ad = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + ad = ad * (T(1.0) / zeta) + AD[index]; + } + + ai = T(5.64189583547756286948e-01) * (an / ad) / (T(2.0) * std::sqrt(std::sqrt(x)) * std::exp(zeta)); + + if (x > T(8.3203353)) { + return ai; + } + } + + f = 1.0; + g = x; + k = 1.0; + + T m = 1.0; + T n = x; + T t = 1.0; + T z = x * x * x; + + while (t > std::numeric_limits::epsilon()) { + m *= z; + k += T(1.0); + m /= k; + n *= z; + k += T(1.0); + n /= k; + m /= k; + f += m; + k += T(1.0); + n /= k; + g += n; + + t = std::abs(m / f); + } + + if ((domain_flag & 1) == 0) { + return T(0.355028053887817239260) * f - T(0.258819403792806798405) * g; + } + + return ai; +} // T airy_ai(T x) + +template +inline C10_HOST_DEVICE T bessel_j0_forward(T x) { + static const T PP[] = { + +7.96936729297347051624e-04, + +8.28352392107440799803e-02, + +1.23953371646414299388e+00, + +5.44725003058768775090e+00, + +8.74716500199817011941e+00, + +5.30324038235394892183e+00, + +9.99999999999999997821e-01, + }; + + static const T PQ[] = { + +9.24408810558863637013e-04, + +8.56288474354474431428e-02, + +1.25352743901058953537e+00, + +5.47097740330417105182e+00, + +8.76190883237069594232e+00, + +5.30605288235394617618e+00, + +1.00000000000000000218e+00, + }; + + static const T QP[] = { + -1.13663838898469149931e-02, + -1.28252718670509318512e+00, + -1.95539544257735972385e+01, + -9.32060152123768231369e+01, + -1.77681167980488050595e+02, + -1.47077505154951170175e+02, + -5.14105326766599330220e+01, + -6.05014350600728481186e+00, + }; + + static const T QQ[] = { + +6.43178256118178023184e+01, + +8.56430025976980587198e+02, + +3.88240183605401609683e+03, + +7.24046774195652478189e+03, + +5.93072701187316984827e+03, + +2.06209331660327847417e+03, + +2.42005740240291393179e+02, + }; + + static const T RP[] = { + -4.79443220978201773821e+09, + +1.95617491946556577543e+12, + -2.49248344360967716204e+14, + +9.70862251047306323952e+15, + }; + + static const T RQ[] = { + +4.99563147152651017219e+02, + +1.73785401676374683123e+05, + +4.84409658339962045305e+07, + +1.11855537045356834862e+10, + +2.11277520115489217587e+12, + +3.10518229857422583814e+14, + +3.18121955943204943306e+16, + +1.71086294081043136091e+18, + }; + + if (x < T(0)) { + x = -x; + } + + if (x <= T(5.0)) { + if (x < T(0.00001)) { + return T(1.0) - x * x / T(4.0); + } + + T rp = 0.0; + + for (uint8_t index = 0; index <= 3; index++) { + rp = rp * (x * x) + RP[index]; + } + + T rq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + rq = rq * (x * x) + RQ[index]; + } + + return (x * x - T(5.78318596294678452118e+00)) * (x * x - T(3.04712623436620863991e+01)) * rp / rq; + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(25.0) / (x * x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(25.0) / (x * x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(25.0) / (x * x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(25.0) / (x * x)) + QQ[index]; + } + + return (pp / pq * std::cos(x - T(0.785398163397448309615660845819875721)) - T(5.0) / x * (qp / qq) * std::sin(x - T(0.785398163397448309615660845819875721))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_j0_forward(T x) + +template +inline C10_HOST_DEVICE T bessel_j1_forward(T x) { + static const T PP[] = { + +7.62125616208173112003e-04, + +7.31397056940917570436e-02, + +1.12719608129684925192e+00, + +5.11207951146807644818e+00, + +8.42404590141772420927e+00, + +5.21451598682361504063e+00, + +1.00000000000000000254e+00, + }; + + static const T PQ[] = { + +5.71323128072548699714e-04, + +6.88455908754495404082e-02, + +1.10514232634061696926e+00, + +5.07386386128601488557e+00, + +8.39985554327604159757e+00, + +5.20982848682361821619e+00, + +9.99999999999999997461e-01, + }; + + static const T QP[] = { + +5.10862594750176621635e-02, + +4.98213872951233449420e+00, + +7.58238284132545283818e+01, + +3.66779609360150777800e+02, + +7.10856304998926107277e+02, + +5.97489612400613639965e+02, + +2.11688757100572135698e+02, + +2.52070205858023719784e+01, + }; + + static const T QQ[] = { + +7.42373277035675149943e+01, + +1.05644886038262816351e+03, + +4.98641058337653607651e+03, + +9.56231892404756170795e+03, + +7.99704160447350683650e+03, + +2.82619278517639096600e+03, + +3.36093607810698293419e+02, + }; + + static const T RP[] = { + -8.99971225705559398224e+08, + +4.52228297998194034323e+11, + -7.27494245221818276015e+13, + +3.68295732863852883286e+15, + }; + + static const T RQ[] = { + +6.20836478118054335476e+02, + +2.56987256757748830383e+05, + +8.35146791431949253037e+07, + +2.21511595479792499675e+10, + +4.74914122079991414898e+12, + +7.84369607876235854894e+14, + +8.95222336184627338078e+16, + +5.32278620332680085395e+18, + }; + + if (x < T(0.0)) { + return -bessel_j1_forward(-x); + } + + if (x <= T(5.0)) { + T rp = 0.0; + + for (uint8_t index = 0; index <= 3; index++) { + rp = rp * (x * x) + RP[index]; + } + + T rq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + rq = rq * (x * x) + RQ[index]; + } + + return rp / rq * x * (x * x - T(1.46819706421238932572e+01)) * (x * x - T(4.92184563216946036703e+01)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(5.0) / x * (T(5.0) / x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(5.0) / x * (T(5.0) / x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(5.0) / x * (T(5.0) / x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(5.0) / x * (T(5.0) / x)) + QQ[index]; + } + + return (pp / pq * std::cos(x - T(2.356194490192344928846982537459627163)) - T(5.0) / x * (qp / qq) * std::sin(x - T(2.356194490192344928846982537459627163))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_j1_forward(T x) + +template +inline C10_HOST_DEVICE T bessel_y0_forward(T x) { + static const T PP[] = { + +7.96936729297347051624e-04, + +8.28352392107440799803e-02, + +1.23953371646414299388e+00, + +5.44725003058768775090e+00, + +8.74716500199817011941e+00, + +5.30324038235394892183e+00, + +9.99999999999999997821e-01, + }; + + static const T PQ[] = { + +9.24408810558863637013e-04, + +8.56288474354474431428e-02, + +1.25352743901058953537e+00, + +5.47097740330417105182e+00, + +8.76190883237069594232e+00, + +5.30605288235394617618e+00, + +1.00000000000000000218e+00, + }; + + static const T QP[] = { + -1.13663838898469149931e-02, + -1.28252718670509318512e+00, + -1.95539544257735972385e+01, + -9.32060152123768231369e+01, + -1.77681167980488050595e+02, + -1.47077505154951170175e+02, + -5.14105326766599330220e+01, + -6.05014350600728481186e+00, + }; + + static const T QQ[] = { + +6.43178256118178023184e+01, + +8.56430025976980587198e+02, + +3.88240183605401609683e+03, + +7.24046774195652478189e+03, + +5.93072701187316984827e+03, + +2.06209331660327847417e+03, + +2.42005740240291393179e+02, + }; + + static const T YP[] = { + +1.55924367855235737965e+04, + -1.46639295903971606143e+07, + +5.43526477051876500413e+09, + -9.82136065717911466409e+11, + +8.75906394395366999549e+13, + -3.46628303384729719441e+15, + +4.42733268572569800351e+16, + -1.84950800436986690637e+16, + }; + + static const T YQ[] = { + +1.04128353664259848412e+03, + +6.26107330137134956842e+05, + +2.68919633393814121987e+08, + +8.64002487103935000337e+10, + +2.02979612750105546709e+13, + +3.17157752842975028269e+15, + +2.50596256172653059228e+17, + }; + + if (x <= T(5.0)) { + if (x == T(0.0)) { + return -std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T yp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + yp = yp * (x * x) + YP[index]; + } + + T yq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + yq = yq * (x * x) + YQ[index]; + } + + return yp / yq + (T(0.636619772367581343075535053490057448) * std::log(x) * bessel_j0_forward(x)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(25.0) / (x * x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(25.0) / (x * x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(25.0) / (x * x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(25.0) / (x * x)) + QQ[index]; + } + + return (pp / pq * std::sin(x - T(0.785398163397448309615660845819875721)) + T(5.0) / x * (qp / qq) * std::cos(x - T(0.785398163397448309615660845819875721))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_y0_forward(T x) + +template +inline C10_HOST_DEVICE T bessel_y1_forward(T x) { + static const T PP[] = { + +7.62125616208173112003e-04, + +7.31397056940917570436e-02, + +1.12719608129684925192e+00, + +5.11207951146807644818e+00, + +8.42404590141772420927e+00, + +5.21451598682361504063e+00, + +1.00000000000000000254e+00, + }; + + static const T PQ[] = { + +5.71323128072548699714e-04, + +6.88455908754495404082e-02, + +1.10514232634061696926e+00, + +5.07386386128601488557e+00, + +8.39985554327604159757e+00, + +5.20982848682361821619e+00, + +9.99999999999999997461e-01, + }; + + static const T QP[] = { + +5.10862594750176621635e-02, + +4.98213872951233449420e+00, + +7.58238284132545283818e+01, + +3.66779609360150777800e+02, + +7.10856304998926107277e+02, + +5.97489612400613639965e+02, + +2.11688757100572135698e+02, + +2.52070205858023719784e+01, + }; + + static const T QQ[] = { + +7.42373277035675149943e+01, + +1.05644886038262816351e+03, + +4.98641058337653607651e+03, + +9.56231892404756170795e+03, + +7.99704160447350683650e+03, + +2.82619278517639096600e+03, + +3.36093607810698293419e+02, + }; + + static const T YP[] = { + +1.26320474790178026440e+09, + -6.47355876379160291031e+11, + +1.14509511541823727583e+14, + -8.12770255501325109621e+15, + +2.02439475713594898196e+17, + -7.78877196265950026825e+17, + }; + + static const T YQ[] = { + +5.94301592346128195359e+02, + +2.35564092943068577943e+05, + +7.34811944459721705660e+07, + +1.87601316108706159478e+10, + +3.88231277496238566008e+12, + +6.20557727146953693363e+14, + +6.87141087355300489866e+16, + +3.97270608116560655612e+18, + }; + + if (x <= T(5.0)) { + if (x == T(0.0)) { + return -std::numeric_limits::infinity(); + } + + if (x <= T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T yp = 0.0; + + for (uint8_t index = 0; index <= 5; index++) { + yp = yp * (x * x) + YP[index]; + } + + T yq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + yq = yq * (x * x) + YQ[index]; + } + + return x * (yp / yq) + (T(0.636619772367581343075535053490057448) * (bessel_j1_forward(x) * std::log(x) - T(1.0) / x)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(5.0) / x * (T(5.0) / x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(5.0) / x * (T(5.0) / x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(5.0) / x * (T(5.0) / x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(5.0) / x * (T(5.0) / x)) + QQ[index]; + } + + return (pp / pq * std::sin(x - T(2.356194490192344928846982537459627163)) + T(5.0) / x * (qp / qq) * std::cos(x - T(2.356194490192344928846982537459627163))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_y1_forward(T x) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_t_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 6) && (std::abs(x) < T(1.0))) { + return std::cos(n * std::acos(x)); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_t_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_t_forward(T x, T n) { + return chebyshev_polynomial_t_forward(x, static_cast(n)); +} // chebyshev_polynomial_t_forward(T x, T n) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_u_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return n + 1; + } + + return -(n + 1); + } + + if ((n > 8) && (std::abs(x) < T(1.0))) { + if (std::sin(std::acos(x)) != T(0.0)) { + return std::sin((n + 1) * std::acos(x)) / std::sin(std::acos(x)); + } + + return (n + 1) * std::cos((n + 1) * std::acos(x)) / x; + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x; + } + + T p = T(1.0); + T q = x + x; + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_u_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_u_forward(T x, T n) { + return chebyshev_polynomial_u_forward(x, static_cast(n)); +} // chebyshev_polynomial_u_forward(T x, T n) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_v_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0)) { + return T(1.0); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if ((n > 8) && (std::abs(x) < T(1.0))) { + if (std::sin(std::acos(x) / T(2.0)) != T(1.0)) { + return std::cos((n + T(0.5)) * std::acos(x)) / std::cos(std::acos(x) / T(2.0)); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_v_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_v_forward(T x, T n) { + return chebyshev_polynomial_v_forward(x, static_cast(n)); +} // chebyshev_polynomial_v_forward(T x, T n) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_w_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0)) { + return n + n + 1; + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 8) && (std::abs(x) < T(1.0))) { + if (std::cos(std::acos(x) / T(2.0)) != T(1.0)) { + return std::sin((n + T(0.5)) * std::acos(x)) / std::sin(std::acos(x) / T(2.0)); + } + + if (x > T(0.0)) { + return n + n + 1; + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x + T(1.0); + } + + T p = T(1.0); + T q = x + x + T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_w_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T chebyshev_polynomial_w_forward(T x, T n) { + return chebyshev_polynomial_w_forward(x, static_cast(n)); +} // chebyshev_polynomial_w_forward(T x, T n) + +template +inline C10_HOST_DEVICE T hermite_polynomial_h_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x; + } + + T p = T(1.0); + T q = x + x; + T r = T(0.0); + + for (int64_t k = 2; k < n + n; k += 2) { + r = (x + x) * q - k * p; + p = q; + q = r; + } + + return r; +} // hermite_polynomial_h_forward(T x, int64_t n) + +template::value, int> = 0> +inline C10_HOST_DEVICE T hermite_polynomial_h_forward(T x, T n) { + return hermite_polynomial_h_forward(x, static_cast(n)); +} // hermite_polynomial_h_forward(T x, T n) + +template::value, int> = 0> +inline C10_HOST_DEVICE T hermite_polynomial_h_forward(T x, T n) { + return hermite_polynomial_h_forward(x, ((!std::isinf(n)) && (!std::isnan(n))) ? static_cast(n) : static_cast(-1)); +} // hermite_polynomial_h_forward(T x, T n) + +template +inline C10_HOST_DEVICE T hermite_polynomial_he_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = x * q - k * p; + p = q; + q = r; + } + + return r; +} // hermite_polynomial_he_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T hermite_polynomial_he_forward(T x, T n) { + return hermite_polynomial_he_forward(x, static_cast(n)); +} // hermite_polynomial_he_forward(T x, T n) + +template +inline C10_HOST_DEVICE T laguerre_polynomial_l_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(0.0)) { + return T(1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return T(1.0) - x; + } + + T p = T(1.0); + T q = T(1.0) - x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = (((k + k) + (T(1.0) - x)) * q - k * p) / (k + 1); + p = q; + q = r; + } + + return r; +} // laguerre_polynomial_l_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T laguerre_polynomial_l_forward(T x, T n) { + return laguerre_polynomial_l_forward(x, static_cast(n)); +} // laguerre_polynomial_l_forward(T x, T n) + +template +inline C10_HOST_DEVICE T legendre_polynomial_p_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = ((k + k + 1) * x * q - k * p) / (k + 1); + p = q; + q = r; + } + + return r; +} // legendre_polynomial_p_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T legendre_polynomial_p_forward(T x, T n) { + return legendre_polynomial_p_forward(x, static_cast(n)); +} // legendre_polynomial_p_forward(T x, T n) + +template +inline C10_HOST_DEVICE T modified_bessel_i0_forward(T x) { + static const T A[] = { + -4.41534164647933937950e-18, + +3.33079451882223809783e-17, + -2.43127984654795469359e-16, + +1.71539128555513303061e-15, + -1.16853328779934516808e-14, + +7.67618549860493561688e-14, + -4.85644678311192946090e-13, + +2.95505266312963983461e-12, + -1.72682629144155570723e-11, + +9.67580903537323691224e-11, + -5.18979560163526290666e-10, + +2.65982372468238665035e-09, + -1.30002500998624804212e-08, + +6.04699502254191894932e-08, + -2.67079385394061173391e-07, + +1.11738753912010371815e-06, + -4.41673835845875056359e-06, + +1.64484480707288970893e-05, + -5.75419501008210370398e-05, + +1.88502885095841655729e-04, + -5.76375574538582365885e-04, + +1.63947561694133579842e-03, + -4.32430999505057594430e-03, + +1.05464603945949983183e-02, + -2.37374148058994688156e-02, + +4.93052842396707084878e-02, + -9.49010970480476444210e-02, + +1.71620901522208775349e-01, + -3.04682672343198398683e-01, + +6.76795274409476084995e-01, + }; + + static const T B[] = { + -7.23318048787475395456e-18, + -4.83050448594418207126e-18, + +4.46562142029675999901e-17, + +3.46122286769746109310e-17, + -2.82762398051658348494e-16, + -3.42548561967721913462e-16, + +1.77256013305652638360e-15, + +3.81168066935262242075e-15, + -9.55484669882830764870e-15, + -4.15056934728722208663e-14, + +1.54008621752140982691e-14, + +3.85277838274214270114e-13, + +7.18012445138366623367e-13, + -1.79417853150680611778e-12, + -1.32158118404477131188e-11, + -3.14991652796324136454e-11, + +1.18891471078464383424e-11, + +4.94060238822496958910e-10, + +3.39623202570838634515e-09, + +2.26666899049817806459e-08, + +2.04891858946906374183e-07, + +2.89137052083475648297e-06, + +6.88975834691682398426e-05, + +3.36911647825569408990e-03, + +8.04490411014108831608e-01, + }; + + T p; + T q = 0.0; + + if (std::abs(x) <= T(8.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 30; index++) { + p = q; + q = a; + a = ((std::abs(x) / T(2.0)) - T(2.0)) * q - p + A[index]; + } + + return std::exp(std::abs(x)) * (T(0.5) * (a - p)); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(32.0) / std::abs(x) - T(2.0)) * q - p + B[index]; + } + + return std::exp(std::abs(x)) * (T(0.5) * (b - p)) / std::sqrt(std::abs(x)); +} // modified_bessel_i0_forward(T x) + +template +inline C10_HOST_DEVICE T modified_bessel_i1_forward(T x) { + static const T A[] = { + +2.77791411276104639959e-18, + -2.11142121435816608115e-17, + +1.55363195773620046921e-16, + -1.10559694773538630805e-15, + +7.60068429473540693410e-15, + -5.04218550472791168711e-14, + +3.22379336594557470981e-13, + -1.98397439776494371520e-12, + +1.17361862988909016308e-11, + -6.66348972350202774223e-11, + +3.62559028155211703701e-10, + -1.88724975172282928790e-09, + +9.38153738649577178388e-09, + -4.44505912879632808065e-08, + +2.00329475355213526229e-07, + -8.56872026469545474066e-07, + +3.47025130813767847674e-06, + -1.32731636560394358279e-05, + +4.78156510755005422638e-05, + -1.61760815825896745588e-04, + +5.12285956168575772895e-04, + -1.51357245063125314899e-03, + +4.15642294431288815669e-03, + -1.05640848946261981558e-02, + +2.47264490306265168283e-02, + -5.29459812080949914269e-02, + +1.02643658689847095384e-01, + -1.76416518357834055153e-01, + +2.52587186443633654823e-01, + }; + + static const T B[] = { + +7.51729631084210481353e-18, + +4.41434832307170791151e-18, + -4.65030536848935832153e-17, + -3.20952592199342395980e-17, + +2.96262899764595013876e-16, + +3.30820231092092828324e-16, + -1.88035477551078244854e-15, + -3.81440307243700780478e-15, + +1.04202769841288027642e-14, + +4.27244001671195135429e-14, + -2.10154184277266431302e-14, + -4.08355111109219731823e-13, + -7.19855177624590851209e-13, + +2.03562854414708950722e-12, + +1.41258074366137813316e-11, + +3.25260358301548823856e-11, + -1.89749581235054123450e-11, + -5.58974346219658380687e-10, + -3.83538038596423702205e-09, + -2.63146884688951950684e-08, + -2.51223623787020892529e-07, + -3.88256480887769039346e-06, + -1.10588938762623716291e-04, + -9.76109749136146840777e-03, + +7.78576235018280120474e-01, + }; + + T p; + T q = 0.0; + + if (std::abs(x) <= T(8.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 29; index++) { + p = q; + q = a; + a = ((std::abs(x) / T(2.0)) - T(2.0)) * q - p + A[index]; + } + + if (x < T(0.0)) { + return -(T(0.5) * (a - p) * std::abs(x) * std::exp(std::abs(x))); + } + + return T(0.5) * (a - p) * std::abs(x) * std::exp(std::abs(x)); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(32.0) / std::abs(x) - T(2.0)) * q - p + B[index]; + } + + if (x < T(0.0)) { + return -(std::exp(std::abs(x)) * (T(0.5) * (b - p)) / std::sqrt(std::abs(x))); + } + + return std::exp(std::abs(x)) * (T(0.5) * (b - p)) / std::sqrt(std::abs(x)); +} // modified_bessel_i1_forward(T x) + +template +inline C10_HOST_DEVICE T modified_bessel_k0_forward(T x) { + static const T A[] = { + +1.37446543561352307156e-16, + +4.25981614279661018399e-14, + +1.03496952576338420167e-11, + +1.90451637722020886025e-09, + +2.53479107902614945675e-07, + +2.28621210311945178607e-05, + +1.26461541144692592338e-03, + +3.59799365153615016266e-02, + +3.44289899924628486886e-01, + -5.35327393233902768720e-01, + }; + + static const T B[] = { + +5.30043377268626276149e-18, + -1.64758043015242134646e-17, + +5.21039150503902756861e-17, + -1.67823109680541210385e-16, + +5.51205597852431940784e-16, + -1.84859337734377901440e-15, + +6.34007647740507060557e-15, + -2.22751332699166985548e-14, + +8.03289077536357521100e-14, + -2.98009692317273043925e-13, + +1.14034058820847496303e-12, + -4.51459788337394416547e-12, + +1.85594911495471785253e-11, + -7.95748924447710747776e-11, + +3.57739728140030116597e-10, + -1.69753450938905987466e-09, + +8.57403401741422608519e-09, + -4.66048989768794782956e-08, + +2.76681363944501510342e-07, + -1.83175552271911948767e-06, + +1.39498137188764993662e-05, + -1.28495495816278026384e-04, + +1.56988388573005337491e-03, + -3.14481013119645005427e-02, + +2.44030308206595545468e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 10; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return T(0.5) * (a - p) - std::log(0.5 * x) * modified_bessel_i0_forward(x); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return std::exp(-x) * (T(0.5) * (b - p)) / std::sqrt(x); +} // modified_bessel_k0_forward(T x) + +template +inline C10_HOST_DEVICE T modified_bessel_k1_forward(T x) { + static const T A[] = { + -7.02386347938628759343e-18, + -2.42744985051936593393e-15, + -6.66690169419932900609e-13, + -1.41148839263352776110e-10, + -2.21338763073472585583e-08, + -2.43340614156596823496e-06, + -1.73028895751305206302e-04, + -6.97572385963986435018e-03, + -1.22611180822657148235e-01, + -3.53155960776544875667e-01, + +1.52530022733894777053e+00, + }; + + static const T B[] = { + -5.75674448366501715755e-18, + +1.79405087314755922667e-17, + -5.68946255844285935196e-17, + +1.83809354436663880070e-16, + -6.05704724837331885336e-16, + +2.03870316562433424052e-15, + -7.01983709041831346144e-15, + +2.47715442448130437068e-14, + -8.97670518232499435011e-14, + +3.34841966607842919884e-13, + -1.28917396095102890680e-12, + +5.13963967348173025100e-12, + -2.12996783842756842877e-11, + +9.21831518760500529508e-11, + -4.19035475934189648750e-10, + +2.01504975519703286596e-09, + -1.03457624656780970260e-08, + +5.74108412545004946722e-08, + -3.50196060308781257119e-07, + +2.40648494783721712015e-06, + -1.93619797416608296024e-05, + +1.95215518471351631108e-04, + -2.85781685962277938680e-03, + +1.03923736576817238437e-01, + +2.72062619048444266945e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 11; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return std::log(T(0.5) * x) * modified_bessel_i1_forward(x) + T(0.5) * (a - p) / x; + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return std::exp(-x) * (T(0.5) * (b - p)) / std::sqrt(x); +} // modified_bessel_k1_forward(T x) + +template +inline C10_HOST_DEVICE T scaled_modified_bessel_k0_forward(T x) { + static const T A[] = { + +1.37446543561352307156e-16, + +4.25981614279661018399e-14, + +1.03496952576338420167e-11, + +1.90451637722020886025e-09, + +2.53479107902614945675e-07, + +2.28621210311945178607e-05, + +1.26461541144692592338e-03, + +3.59799365153615016266e-02, + +3.44289899924628486886e-01, + -5.35327393233902768720e-01, + }; + + static const T B[] = { + +5.30043377268626276149e-18, + -1.64758043015242134646e-17, + +5.21039150503902756861e-17, + -1.67823109680541210385e-16, + +5.51205597852431940784e-16, + -1.84859337734377901440e-15, + +6.34007647740507060557e-15, + -2.22751332699166985548e-14, + +8.03289077536357521100e-14, + -2.98009692317273043925e-13, + +1.14034058820847496303e-12, + -4.51459788337394416547e-12, + +1.85594911495471785253e-11, + -7.95748924447710747776e-11, + +3.57739728140030116597e-10, + -1.69753450938905987466e-09, + +8.57403401741422608519e-09, + -4.66048989768794782956e-08, + +2.76681363944501510342e-07, + -1.83175552271911948767e-06, + +1.39498137188764993662e-05, + -1.28495495816278026384e-04, + +1.56988388573005337491e-03, + -3.14481013119645005427e-02, + +2.44030308206595545468e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint64_t index = 1; index < 10; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return (T(0.5) * (a - p) - std::log(T(0.5) * x) * modified_bessel_i0_forward(x)) * std::exp(x); + } + + T b = B[0]; + + for (uint64_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return T(0.5) * (b - p) / std::sqrt(x); +} // T scaled_modified_bessel_k0_forward(T x) + +template +inline C10_HOST_DEVICE T scaled_modified_bessel_k1_forward(T x) { + static const T A[] = { + -7.02386347938628759343e-18, + -2.42744985051936593393e-15, + -6.66690169419932900609e-13, + -1.41148839263352776110e-10, + -2.21338763073472585583e-08, + -2.43340614156596823496e-06, + -1.73028895751305206302e-04, + -6.97572385963986435018e-03, + -1.22611180822657148235e-01, + -3.53155960776544875667e-01, + +1.52530022733894777053e+00, + }; + + static const T B[] = { + -5.75674448366501715755e-18, + +1.79405087314755922667e-17, + -5.68946255844285935196e-17, + +1.83809354436663880070e-16, + -6.05704724837331885336e-16, + +2.03870316562433424052e-15, + -7.01983709041831346144e-15, + +2.47715442448130437068e-14, + -8.97670518232499435011e-14, + +3.34841966607842919884e-13, + -1.28917396095102890680e-12, + +5.13963967348173025100e-12, + -2.12996783842756842877e-11, + +9.21831518760500529508e-11, + -4.19035475934189648750e-10, + +2.01504975519703286596e-09, + -1.03457624656780970260e-08, + +5.74108412545004946722e-08, + -3.50196060308781257119e-07, + +2.40648494783721712015e-06, + -1.93619797416608296024e-05, + +1.95215518471351631108e-04, + -2.85781685962277938680e-03, + +1.03923736576817238437e-01, + +2.72062619048444266945e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint64_t index = 1; index < 11; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return (std::log(T(0.5) * x) * modified_bessel_i1_forward(x) + T(0.5) * (a - p) / x) * std::exp(x); + } + + T b = B[0]; + + for (uint64_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return (T(0.5) * (b - p) / std::sqrt(x)); +} // T scaled_modified_bessel_k1_forward(T x) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_t_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return T(1.0); + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) { + return std::cos(n * std::acos(x + x - T(1.0))); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_t_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_t_forward(T x, T n) { + return shifted_chebyshev_polynomial_t_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_t_forward(T x, T n) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_u_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return n + 1; + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return n + 1; + } + + return -(n + 1); + } + + if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) { + if (std::sin(std::acos(x + x - T(1.0))) != T(0.0)) { + return std::sin((n + 1) * std::acos(x + x - T(1.0))) / std::sin(std::acos(x + x - T(1.0))); + } + + return (n + 1) * std::cos((n + 1) * std::acos(x + x - T(1.0))) / (x + x - T(1.0)); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_u_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_u_forward(T x, T n) { + return shifted_chebyshev_polynomial_u_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_u_forward(T x, T n) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_v_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return T(1.0); + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return (n + n + 1); + } + + return -(n + n + 1); + } + + if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) { + if (std::sin(std::acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) { + return std::cos(((n) + T(0.5)) * std::acos(x + x - T(1.0))) / std::cos(std::acos(x + x - T(1.0)) / T(2.0)); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)) - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)) - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_v_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_v_forward(T x, T n) { + return shifted_chebyshev_polynomial_v_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_v_forward(T x, T n) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_w_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return n + n + 1; + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 4) && (std::abs(x + x - T(1.0)) < T(1.0))) { + if (std::cos(std::acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) { + return std::sin((n + T(0.5)) * std::acos(x + x - T(1.0))) / std::sin(std::acos(x + x - T(1.0)) / T(2.0)); + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)) + T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)) + T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_w_forward(T x, int64_t n) + +template +inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_w_forward(T x, T n) { + return shifted_chebyshev_polynomial_w_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_w_forward(T x, T n) + +template +inline C10_HOST_DEVICE T spherical_bessel_j0_forward(T x) { + if (std::isinf(x)) { + return T(0.0); + } + + if (std::abs(x) < T(0.5)) { + return T(1.0) + x * x * (T(-1.0) / T(6.0) + x * x * (T(1.0) / T(120.0) + x * x * (T(-1.0) / T(5040.0) + x * x * (T(1.0) / T(362880.0) + x * x * (T(-1.0) / T(39916800.0) + x * x * (T(1.0) / T(6227020800.0))))))); + } + + return std::sin(x) / x; +} // T spherical_bessel_j0_forward(T x) + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h new file mode 100644 index 0000000000000000000000000000000000000000..97b0854d82d0a2fec6bb708db767d81273ec7bcc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h @@ -0,0 +1,71 @@ +#pragma once + +namespace at { +// views and their in-place version ops +#define TORCH_VIEW_FNS(m) \ + m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \ + m.impl("detach", torch::CppFunction::makeFallthrough()); \ + m.impl("detach_", torch::CppFunction::makeFallthrough()); \ + m.impl("diagonal", torch::CppFunction::makeFallthrough()); \ + m.impl("expand", torch::CppFunction::makeFallthrough()); \ + m.impl("expand_as", torch::CppFunction::makeFallthrough()); \ + m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \ + m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \ + m.impl("narrow", torch::CppFunction::makeFallthrough()); \ + m.impl("permute", torch::CppFunction::makeFallthrough()); \ + m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("select.int", torch::CppFunction::makeFallthrough()); \ + m.impl("squeeze", torch::CppFunction::makeFallthrough()); \ + m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose_", torch::CppFunction::makeFallthrough()); \ + m.impl("t", torch::CppFunction::makeFallthrough()); \ + m.impl("t_", torch::CppFunction::makeFallthrough()); \ + m.impl("real", torch::CppFunction::makeFallthrough()); \ + m.impl("imag", torch::CppFunction::makeFallthrough()); \ + m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \ + m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \ + m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("unfold", torch::CppFunction::makeFallthrough()); \ + m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \ + m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \ + m.impl("view_as", torch::CppFunction::makeFallthrough()); \ + m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \ + m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \ + m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \ + m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \ + m.impl("swapdims", torch::CppFunction::makeFallthrough()); \ + m.impl("chunk", torch::CppFunction::makeFallthrough()); \ + m.impl("reshape", torch::CppFunction::makeFallthrough()); \ + m.impl("alias", torch::CppFunction::makeFallthrough()); \ + m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("conj", torch::CppFunction::makeFallthrough()); \ + m.impl("_conj", torch::CppFunction::makeFallthrough()); \ + m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \ + m.impl("resize_", torch::CppFunction::makeFallthrough()); + +#define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \ + m.impl("empty_like", torch::CppFunction::makeFallthrough()); \ + m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \ + m.impl("empty.out", torch::CppFunction::makeFallthrough()); \ + m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \ + m.impl("full_like", torch::CppFunction::makeFallthrough()); \ + m.impl("stride.int", torch::CppFunction::makeFallthrough()); \ + m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("size.int", torch::CppFunction::makeFallthrough()); \ + m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("is_complex", torch::CppFunction::makeFallthrough()); \ + m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \ + m.impl("requires_grad_", torch::CppFunction::makeFallthrough()); +} + +#define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \ + m.impl("as_strided", torch::CppFunction::makeFallthrough()); \ + m.impl("view", torch::CppFunction::makeFallthrough()); diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/NonEmptyUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/NonEmptyUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..3d18bc5e1525bacaf27d97a86024540236ce6220 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/NonEmptyUtils.h @@ -0,0 +1,27 @@ +#include +#include +#include + +namespace at::native { + +inline int64_t ensure_nonempty_dim(int64_t dim) { + return std::max(dim, 1); +} + +inline int64_t ensure_nonempty_size(const TensorBase &t, int64_t dim) { + return t.dim() == 0 ? 1 : t.size(dim); +} + +inline int64_t ensure_nonempty_stride(const TensorBase &t, int64_t dim) { + return t.dim() == 0 ? 1 : t.stride(dim); +} + +using IdxVec = std::vector; +inline IdxVec ensure_nonempty_vec(IdxVec vec) { + if (vec.empty()) { + vec.push_back(1); + } + return vec; +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h new file mode 100644 index 0000000000000000000000000000000000000000..fbcfd3a13f9c3c71a5291fdad2e6b95b186cf681 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h @@ -0,0 +1,26 @@ +#pragma once +#include +#include +#include + +namespace at::native { +// This file contains non-symbolic signatures for ops that we have sym-intified the signature of. +// However, in certain cases (such as static runtime), we call the native versions of the ops directly. +// In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation. +TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape); +TORCH_API at::Tensor narrow(const at::Tensor& self, int64_t dim, int64_t start, int64_t length); +TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, std::optional dtype=std::nullopt, std::optional layout=std::nullopt, std::optional device=std::nullopt, std::optional pin_memory=std::nullopt, std::optional is_coalesced=std::nullopt); +TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index); +TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index); +// The below ops don't get a duplicated C++ implementation. +// They are backward ops, which make them very unlikely to be called directly +// by external code (at::native::trace_backward). +// They get their own declaration for BC purposes however. +TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const std::optional & per_sample_weights, int64_t padding_idx=-1); +TORCH_API at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const std::optional & per_sample_weights, int64_t padding_idx=-1); +TORCH_API at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim); +TORCH_API at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes); +TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor select(const at::Tensor& self, int64_t dim, int64_t index); +TORCH_API std::vector tensor_split(const Tensor& self, IntArrayRef indices, int64_t dim); +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h new file mode 100644 index 0000000000000000000000000000000000000000..53a054027f33d9de4ceb285bb3f95bd16027c402 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include + +namespace at::native { + +using padding_fn = void (*)(const Tensor&, const Tensor&, IntArrayRef); + +// reflection padding +DECLARE_DISPATCH(padding_fn, reflection_pad1d_kernel); +DECLARE_DISPATCH(padding_fn, reflection_pad1d_backward_kernel); +DECLARE_DISPATCH(padding_fn, reflection_pad2d_kernel); +DECLARE_DISPATCH(padding_fn, reflection_pad2d_backward_kernel); +DECLARE_DISPATCH(padding_fn, reflection_pad3d_kernel); +DECLARE_DISPATCH(padding_fn, reflection_pad3d_backward_kernel); + +// replication padding +DECLARE_DISPATCH(padding_fn, replication_pad1d_kernel); +DECLARE_DISPATCH(padding_fn, replication_pad1d_backward_kernel); +DECLARE_DISPATCH(padding_fn, replication_pad2d_kernel); +DECLARE_DISPATCH(padding_fn, replication_pad2d_backward_kernel); +DECLARE_DISPATCH(padding_fn, replication_pad3d_kernel); +DECLARE_DISPATCH(padding_fn, replication_pad3d_backward_kernel); + +namespace padding { + +template +inline void check_valid_input(const Tensor& input, IntArrayRef padding) { + + TORCH_CHECK(padding.size() == 2 * dim, + "padding size is expected to be ", 2 * dim, + ", but got: ", padding.size()); + + int input_dim = input.dim(); + + bool is_batch_mode = input_dim == (dim + 2); + + bool valid_batch_mode = is_batch_mode; + bool valid_non_batch_mode = !is_batch_mode; + + if (is_batch_mode) { + // allow batch size of 0-dim. + for (const auto d : c10::irange(1, input_dim)) { + valid_batch_mode = valid_batch_mode && input.size(d) != 0; + } + } else { + for (const auto d : c10::irange(0, input_dim)) { + valid_non_batch_mode = valid_non_batch_mode && input.size(d) != 0; + } + } + + // allow empty batch size but not other dimensions. + TORCH_CHECK(valid_batch_mode || valid_non_batch_mode, + "Expected ", dim + 1, "D or ", dim + 2, + "D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", + input.sizes()); +} + +} // namespace padding + +} // at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/PixelShuffle.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/PixelShuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..a9a66a3dbb9d1dfbe8b8a7b926d70bee0a645258 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/PixelShuffle.h @@ -0,0 +1,47 @@ +#include +#include + +namespace at { +namespace native { + +inline void check_pixel_shuffle_shapes(const Tensor& self, int64_t upscale_factor) { + TORCH_CHECK(self.dim() >= 3, + "pixel_shuffle expects input to have at least 3 dimensions, but got input with ", + self.dim(), " dimension(s)"); + TORCH_CHECK(upscale_factor > 0, + "pixel_shuffle expects a positive upscale_factor, but got ", + upscale_factor); + int64_t c = self.size(-3); + int64_t upscale_factor_squared = upscale_factor * upscale_factor; + TORCH_CHECK(c % upscale_factor_squared == 0, + "pixel_shuffle expects its input's 'channel' dimension to be divisible by the square of " + "upscale_factor, but input.size(-3)=", c, " is not divisible by ", upscale_factor_squared); +} + +inline void check_pixel_unshuffle_shapes(const Tensor& self, int64_t downscale_factor) { + TORCH_CHECK( + self.dim() >= 3, + "pixel_unshuffle expects input to have at least 3 dimensions, but got input with ", + self.dim(), + " dimension(s)"); + TORCH_CHECK( + downscale_factor > 0, + "pixel_unshuffle expects a positive downscale_factor, but got ", + downscale_factor); + int64_t h = self.size(-2); + int64_t w = self.size(-1); + TORCH_CHECK( + h % downscale_factor == 0, + "pixel_unshuffle expects height to be divisible by downscale_factor, but input.size(-2)=", + h, + " is not divisible by ", + downscale_factor); + TORCH_CHECK( + w % downscale_factor == 0, + "pixel_unshuffle expects width to be divisible by downscale_factor, but input.size(-1)=", + w, + " is not divisible by ", + downscale_factor); +} + +}} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h new file mode 100644 index 0000000000000000000000000000000000000000..896570e3a18f28030c7285551dc9a01b6879c806 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h @@ -0,0 +1,355 @@ +#include +#include +#include +#include +#include + +#include + +#pragma once + +namespace at::native { + +using max_pool2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, + int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH); +using max_pool2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices); + +DECLARE_DISPATCH(max_pool2d_fn, max_pool2d_kernel); +DECLARE_DISPATCH(max_pool2d_backward_fn, max_pool2d_backward_kernel); + +// averge pooling has same signature for forward and backward +using avg_pool2d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH, + int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, std::optional divisor_override); +using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH, + int dW, int dH, int padW, int padH, bool count_include_pad, std::optional divisor_override); + +DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel); +DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel); + +// averge pooling has same signature for forward and backward +using avg_pool3d_fn = void(*)(const Tensor& output, const Tensor& input, + int64_t kW, int64_t kH, int64_t kD, int64_t dW, int64_t dH, int64_t dD, + int64_t padW, int64_t padH, int64_t padD, bool count_include_pad, + std::optional divisor_override); +using avg_pool3d_backward_fn = void(*)(const Tensor& output, const Tensor& input, + int kW, int kH, int kD, int dW, int dH, int dD, + int padW, int padH, int padD, bool count_include_pad, + std::optional divisor_override); + +DECLARE_DISPATCH(avg_pool3d_fn, avg_pool3d_kernel); +DECLARE_DISPATCH(avg_pool3d_backward_fn, avg_pool3d_backward_kernel); + +using max_pool3d_fn = void(*)(Tensor& output, Tensor& indices, const Tensor& input, + int kW, int kH, int kD, int dW, int dH, int dD, int pW, int pH, int pD, int dilationW, int dilationH, int dilationD); +using max_pool3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output, const Tensor& indices); + +DECLARE_DISPATCH(max_pool3d_fn, max_pool3d_kernel); +DECLARE_DISPATCH(max_pool3d_backward_fn, max_pool3d_backward_kernel); +namespace { + +template +inline dest_t +safe_downcast(src_t v) +{ + TORCH_CHECK(std::numeric_limits::min() <= v && v <= std::numeric_limits::max(), + "integer out of range"); + + return static_cast(v); +} + +template +inline T pooling_output_shape_pad_lr( + T inputSize, T kernelSize, T pad_l, T pad_r, T stride, T dilation, + bool ceil_mode) { + T outputSize = div_rtn( + inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 + + (ceil_mode ? stride - 1 : 0), stride) + 1; + if (ceil_mode) { + // ensure that the last pooling starts inside the image + // needed to avoid problems in ceil mode + if ((outputSize - 1) * stride >= inputSize + pad_l) { + --outputSize; + } + } + return outputSize; +} + +template +inline T pooling_output_shape( + T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) { + TORCH_CHECK(stride != 0, "stride should not be zero"); + TORCH_CHECK(pad >= 0, + "pad must be non-negative, but got pad: ", pad); + TORCH_CHECK(pad <= ((kernelSize - 1) * dilation + 1) / 2, + "pad should be at most half of effective kernel size, but got pad=", + pad, ", kernel_size=", kernelSize, " and dilation=", dilation) + return pooling_output_shape_pad_lr( + inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode); +} + +template +std::pair _pooling_same_mode_padding_lr( + T inputSize, T kernelSize, T stride, T dilation) { + // NOTE: with strides, the output shape is ceil(inputSize/stride) + auto total_padding = T(dilation) * (kernelSize - 1); + + // Prefer symmetric padding if possible + if (stride > 2 && (total_padding % 2 == 1)) { + // The floor in the output size calculation gives us a little wiggle room + auto wiggle_room = inputSize % stride - 1; + if (wiggle_room > 0) { + total_padding = total_padding - 1; + } + } + + auto left = total_padding / 2; + return {left, total_padding - left}; +} + +inline std::pair pooling_same_mode_padding_lr( + int64_t inputSize, int64_t kernelSize, int64_t stride, int64_t dilation) { + return _pooling_same_mode_padding_lr(inputSize, kernelSize, stride, dilation); +} + +inline std::pair pooling_same_mode_padding_lr( + c10::SymInt inputSize, c10::SymInt kernelSize, c10::SymInt stride, c10::SymInt dilation) { + return _pooling_same_mode_padding_lr(std::move(inputSize), std::move(kernelSize), std::move(stride), std::move(dilation)); +} + +// AveragePool2d/DilatedMaxPool2d (forward) +inline void +pool2d_shape_check( + const Tensor& input, + int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW, int64_t dilationH, int64_t dilationW, + int64_t nInputPlane, + int64_t inputHeight, int64_t inputWidth, + int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format) +{ + const int64_t ndim = input.ndimension(); +#ifndef STRIP_ERROR_MESSAGES + const int64_t nOutputPlane = nInputPlane; +#endif + + TORCH_CHECK(kW > 0 && kH > 0, + "kernel size should be greater than zero, but got ", + "kH: ", kH, " kW: ", kW); + TORCH_CHECK(dW > 0 && dH > 0, + "stride should be greater than zero, but got " + "dH: ", dH, " dW: ", dW); + TORCH_CHECK(dilationH > 0 && dilationW > 0, + "dilation should be greater than zero, but got ", + "dilationH: ", dilationH, " dilationW: ", dilationW); + + bool valid_dims = input.size(1) != 0 && input.size(2) != 0; + if (memory_format == at::MemoryFormat::ChannelsLast){ + // Expect tensor in NHWC format and allow 0-dim only for N. + TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0), + "Expected 4D (batch mode) tensor expected for input with channels_last layout" + " with optional 0 dim batch size for input, but got: ", input.sizes()); + } else { + TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) || + (ndim == 4 && valid_dims && input.size(3) != 0), + "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got:", + input.sizes()); + } + + TORCH_CHECK(kW/2 >= padW && kH/2 >= padH, + "pad should be smaller than or equal to half of kernel size, but got ", + "padW = ", padW, ", padH = ", padH, ", kW = ", kW, ", kH = ", kH); + + TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1, + "Given input size: (", + nInputPlane, "x", inputHeight, "x", inputWidth, "). ", + "Calculated output size: (", + nOutputPlane, "x", outputHeight, "x", outputWidth, "). ", + "Output size is too small"); +} + +// DilatedMaxPool2d (backward) +inline void +max_pool2d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + const Tensor& indices, + int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW, + int64_t nInputPlane, + int64_t inputHeight, int64_t inputWidth, + int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format) +{ + pool2d_shape_check( + input, + kH, kW, dH, dW, padH, padW, dilationH, dilationW, + nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, memory_format); + + const int64_t ndim = input.ndimension(); + const int64_t nOutputPlane = nInputPlane; + + check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane); + check_dim_size(gradOutput, ndim, ndim-2, outputHeight); + check_dim_size(gradOutput, ndim, ndim-1, outputWidth); + + check_dim_size(indices, ndim, ndim-3, nOutputPlane); + check_dim_size(indices, ndim, ndim-2, outputHeight); + check_dim_size(indices, ndim, ndim-1, outputWidth); +} + +// AveragePool2d (backward) +inline void +avg_pool2d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + int64_t /*nbatch*/, + int kH, int kW, int dH, int dW, int padH, int padW, + int64_t nInputPlane, + int64_t inputHeight, int64_t inputWidth, + int64_t outputHeight, int64_t outputWidth, + MemoryFormat memory_format) +{ + pool2d_shape_check( + input, + kH, kW, dH, dW, padH, padW, 1, 1, + nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, + memory_format); + + const int64_t ndim = input.ndimension(); + const int64_t nOutputPlane = nInputPlane; + + check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane); + check_dim_size(gradOutput, ndim, ndim-2, outputHeight); + check_dim_size(gradOutput, ndim, ndim-1, outputWidth); +} + +// AveragePool3d/DilatedMaxPool3d (forward) +inline void +pool3d_shape_check( + const Tensor& input, + int64_t nslices, + int kT, int kH, int kW, + int dT, int dH, int dW, + int pT, int pH, int pW, + int dilationT, int dilationH, int dilationW, + int64_t itime, int64_t iheight, int64_t iwidth, + int64_t otime, int64_t oheight, int64_t owidth, + const char *fn_name, + bool check_input_size=false) +{ + const int64_t ndim = input.ndimension(); + + TORCH_CHECK(kT > 0 && kW > 0 && kH > 0, + "kernel size should be greater than zero, but got ", + "kT: ", kT, " kH: ", kH, " kW: ", kW); + TORCH_CHECK(dT > 0 && dW > 0 && dH > 0, + "stride should be greater than zero, but got ", + "dT: ", dT, " dH: ", dH, " dW: ", dW); + TORCH_CHECK(dilationT > 0 && dilationW > 0 && dilationH > 0, + "dilation should be greater than zero, but got ", + "dilationT: ", dilationT, " dilationH: ", dilationH, " dilationW: ", dilationW); + + TORCH_CHECK(ndim == 4 || ndim == 5, + fn_name, ": Expected 4D or 5D tensor for input, but got: ", input.sizes()); + + for (const auto i : c10::irange(ndim)) { + if (ndim == 5 && i == 0) { + // size of batch-dim can be 0. + continue; + } + TORCH_CHECK( + input.size(i) > 0, + fn_name, + ": Expected input's non-batch dimensions to have positive length," + " but input has a shape of ", + input.sizes(), + " and non-batch dimension ", + input.size(i), + " has length zero!") + } + + if (check_input_size) { // AveragePool3d + TORCH_CHECK(itime >= kT && iheight >= kH && iwidth >= kW, + "input image ", "(T: ", itime, " H: ", iheight, " W: ", iwidth, ") smaller than ", + "kernel size ", "(kT: ", kT, " kH: ", kH, " kW: ", kW, ")"); + } + + TORCH_CHECK(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH, + "pad should be smaller than or equal to half of kernel size, but got " + "kT: ", kT, " kW: ", kW, " kH: ", kH, " padT: ", pT, " padW: ", pW, " padH: ", pH); + + TORCH_CHECK(otime >= 1 && owidth >= 1 && oheight >= 1, + "Given input size: (", + nslices,"x", itime, "x", iheight, "x", iwidth, "). ", + "Calculated output size: (", + nslices, "x", otime, "x", oheight, "x", owidth, "). ", + "Output size is too small"); +} + +inline void +max_pool3d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + const Tensor& indices, + int64_t nslices, + int kT, int kH, int kW, + int dT, int dH, int dW, + int pT, int pH, int pW, + int dilationT, int dilationH, int dilationW, + int64_t itime, int64_t iheight, int64_t iwidth, + int64_t otime, int64_t oheight, int64_t owidth, + const char* fn_name) +{ + const int64_t ndim = input.ndimension(); + + pool3d_shape_check( + input, + nslices, + kT, kH, kW, + dT, dH, dW, + pT, pH, pW, + dilationT, dilationH, dilationW, + itime, iheight, iwidth, + otime, oheight, owidth, fn_name); + + check_dim_size(gradOutput, ndim, ndim-4, nslices); + check_dim_size(gradOutput, ndim, ndim-3, otime); + check_dim_size(gradOutput, ndim, ndim-2, oheight); + check_dim_size(gradOutput, ndim, ndim-1, owidth); + + check_dim_size(indices, ndim, ndim-4, nslices); + check_dim_size(indices, ndim, ndim-3, otime); + check_dim_size(indices, ndim, ndim-2, oheight); + check_dim_size(indices, ndim, ndim-1, owidth); +} + +inline void +avg_pool3d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + int64_t nslices, + int kT, int kH, int kW, + int dT, int dH, int dW, + int pT, int pH, int pW, + int64_t itime, int64_t iheight, int64_t iwidth, + int64_t otime, int64_t oheight, int64_t owidth, + const char *fn_name) +{ + const int64_t ndim = input.ndimension(); + + pool3d_shape_check( + input, + nslices, + kT, kH, kW, + dT, dH, dW, + pT, pH, pW, + 1, 1, 1, + itime, iheight, iwidth, + otime, oheight, owidth, + fn_name, true); + + check_dim_size(gradOutput, ndim, ndim-4, nslices); + check_dim_size(gradOutput, ndim, ndim-3, otime); + check_dim_size(gradOutput, ndim, ndim-2, oheight); + check_dim_size(gradOutput, ndim, ndim-1, owidth); +} + +} // anonymous namespace + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h new file mode 100644 index 0000000000000000000000000000000000000000..76ddda846a59a6796ac42a5c480fbddc08e65d4f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h @@ -0,0 +1,69 @@ +#pragma once + +#include + +namespace c10 { +class Scalar; +} + +namespace at { + +struct TensorIterator; +struct TensorIteratorBase; + +namespace native { + +#if defined(__CUDACC__) || defined(__HIPCC__) +#define HOST_DEVICE __host__ __device__ +#else +#define HOST_DEVICE +#endif + +// integral power in pytorch allows for negative exponents, giving truncated integral results. +// e.g. since 2**-1==0.5, the truncated integral result is zero. 1**negative_exponent is the +// only non-zero result. +template ::value, T>::type* = nullptr> +inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) { + T result = 1; + while (b) { + if (b & 1) { + result *= a; + } + b /= 2; + a *= a; + } + return result; +} + +template ::value && !std::is_signed::value, T>::type* = nullptr> +inline HOST_DEVICE T powi(T a, T b) { + return powi_impl(a, b); +} + +template ::value && std::is_signed::value, T>::type* = nullptr> +inline HOST_DEVICE T powi(T a, T b) { + if ( b < 0 ) { + if ( a == 1 ) { + return 1; + } else if ( a == -1 ) { + auto negative = (-b) % static_cast(2); + return negative ? -1 : 1; + } else { + return 0; + } + } + return powi_impl(a, b); +} + +using pow_tensor_tensor_fn = void (*)(TensorIteratorBase&); +using pow_tensor_scalar_fn = void (*)(TensorIteratorBase&, const c10::Scalar&); + +DECLARE_DISPATCH(pow_tensor_tensor_fn, pow_tensor_tensor_stub); +DECLARE_DISPATCH(pow_tensor_scalar_fn, pow_tensor_scalar_stub); + +} // namespace native + +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..928853ed44ca54aeed79b991d5feadee590d9cad --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h @@ -0,0 +1,455 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +namespace at::native { + +// Maximum and minimum possible scalar values, including infinities +template +constexpr scalar_t upper_bound() { + using lim = std::numeric_limits; + return lim::has_infinity ? lim::infinity() : lim::max(); +} + +template +constexpr scalar_t lower_bound() { + using lim = std::numeric_limits; + return lim::has_infinity ? -lim::infinity() : lim::lowest(); +} + +inline Tensor restride_dim( + const Tensor& src, int64_t dim, + IntArrayRef replacement_shape +) { + auto strides = ensure_nonempty_vec(src.strides().vec()); + strides[dim] = 0; + return src.as_strided(replacement_shape, strides); +} + +inline void _dimreduce_setup(const Tensor &result, const Tensor &self, + int64_t dim) { + IntArrayRef self_sizes = self.sizes(); + std::vector result_sizes; + result_sizes.insert(result_sizes.end(), self_sizes.begin(), self_sizes.end()); + result_sizes[dim] = 1; + result.resize_(result_sizes); +} + +inline bool _dimreduce_return_trivial(const Tensor &result, const Tensor &self, + const Scalar& ident, int64_t dim, bool keepdim) { + if (self.numel() == 1 && self.ndimension() == 0) { + result.resize_({}); + result.fill_(self); + return true; + } + // Return identity + if (self.numel() == 0) { + _dimreduce_setup(result, self, dim); + result.fill_(ident); + if (!keepdim) result.squeeze_(dim); + return true; + } + return false; +} + +inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self, + int64_t /*dim*/, bool /*keepdim*/, const char* /*fn_name*/) { + if (self.numel() == 1 && self.ndimension() == 0) { + result.resize_({}); + result.fill_(self); + return true; + } + + return false; +} + +inline std::optional _allreduce_return_trivial( + const Tensor& self, + const Scalar& ident) { + // Return identity + if (self.numel() == 0) { + return at::scalar_tensor(ident, self.options()); + } + return std::nullopt; +} + +#define OPTION_TYPE_EQUALITY_CHECK(option, out, self) \ +{ \ + TORCH_CHECK(\ + out.option() == self.option(),\ + "expected ", #option, " ",\ + self.option(),\ + " but found ", out.option())\ +} + +inline void check_scalar_type_device_layout_equal(const Tensor& out, const Tensor& self) { + OPTION_TYPE_EQUALITY_CHECK(scalar_type, out, self); + OPTION_TYPE_EQUALITY_CHECK(device, out.options(), self.options()); + OPTION_TYPE_EQUALITY_CHECK(layout, out.options(), self.options()); +} + +inline Tensor integer_upcast(const Tensor& self, std::optional dtype) { + ScalarType scalarType = self.scalar_type(); + TORCH_CHECK(!isBarebonesUnsignedType(scalarType), "integer upcasting for uint16, uint32 and uint64 is not currently implemented"); + ScalarType upcast_scalarType = dtype.value_or(at::isIntegralType(scalarType, /*includeBool=*/true) ? ScalarType::Long : scalarType); + return self.toType(upcast_scalarType); +} + +using DimMask = TensorIterator::DimMask; + +inline DimVector make_dim_vector(OptionalIntArrayRef opt_dims, int64_t ndim) { + if (opt_dims.has_value()) { + return DimVector(opt_dims.value()); + } else { + std::vector all_dims(ndim); + std::iota(all_dims.begin(), all_dims.end(), 0); + return DimVector(all_dims); + } +} + +inline DimMask make_dim_mask(OptionalIntArrayRef opt_dims, int64_t ndim, bool allow_empty_dims=false) { + DimMask mask; + if (opt_dims.has_value()) { + auto dims = opt_dims.value(); + if (dims.empty() && !allow_empty_dims) { + mask = DimMask().flip(); + } else { + mask = at::dim_list_to_bitset(dims, ndim); + } + } else { + mask = DimMask().flip(); + } + return mask; +} + +inline DimVector shape_from_dim_mask(const Tensor& self, DimMask mask, bool keepdim) { + auto shape = DimVector(self.sizes()); + for (int dim = shape.size() - 1; dim >= 0; dim--) { + if (mask[dim]) { + if (keepdim) { + shape[dim] = 1; + } else { + shape.erase(shape.begin() + dim); + } + } + } + return shape; +} + +inline void resize_reduction_result( + Tensor& result, const Tensor& self, DimMask mask, bool keepdim, + ScalarType /*dtype*/) +{ + auto shape = shape_from_dim_mask(self, mask, keepdim); + TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor."); + at::native::resize_output(result, shape); +} + +inline Tensor create_reduction_result( + const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype +) { + DimMask mask = make_dim_mask(dim, self.dim()); + auto shape = shape_from_dim_mask(self, mask, keepdim); + return at::empty(shape, self.options().dtype(dtype)); +} + +inline Tensor review_reduce_result(const Tensor& result, int ndim, DimMask mask, bool keepdim) { + if (keepdim) { + return result; + } + auto shape = DimVector(result.sizes()); + auto stride = DimVector(result.strides()); + for (const auto dim : c10::irange(ndim)) { + if (mask[dim]) { + shape.insert(shape.begin() + dim, 1); + stride.insert(stride.begin() + dim, 0); + } + } + return result.as_strided(shape, stride); +} + +inline TensorIterator make_reduction( + const char* name, Tensor& result, const Tensor& self, + at::OptionalIntArrayRef dim_opt, + bool keepdim, ScalarType in_dtype, ScalarType out_dtype) { + // check that result type and dtype match if provided + TORCH_CHECK( + !result.defined() || result.scalar_type() == out_dtype, + name, ": provided dtype must match dtype of result. Got ", + toString(result.scalar_type()), + " and ", + toString(out_dtype), + "."); + // dim={} performs an all-reduce, same as dim=None + IntArrayRef dim = dim_opt.value_or(IntArrayRef{}); + int64_t ndim = self.dim(); + auto mask = make_dim_mask(dim, ndim); + resize_reduction_result(result, self, mask, keepdim, out_dtype); + auto viewed_result = review_reduce_result(result, ndim, mask, keepdim); + namedinference::propagate_names_for_reduction(result, self, dim, keepdim); + if (self.scalar_type() == in_dtype) { + return TensorIterator::reduce_op(viewed_result, self); + } + return TensorIterator::reduce_op(viewed_result, self.to(in_dtype)); +} + +inline C10_UNUSED TensorIterator make_reduction( + const char* name, Tensor& result, const Tensor& self, + at::OptionalIntArrayRef dim, bool keepdim, ScalarType out_dtype) { + // special case for type promotion in mixed precision, improves computational + // efficiency. + // not generalize this to common mismatched input/output types to avoid cross + // product of templated kernel launches. + const bool gpu_lowp_to_f32 = ( + self.is_cuda() && (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && out_dtype == kFloat); + auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type() + : self.is_complex() ? c10::toComplexType(out_dtype) + : out_dtype; + return make_reduction(name, result, self, dim, keepdim, in_dtype, out_dtype); +} + +inline TensorIterator make_reduction( + const char* name, Tensor& result1, Tensor& result2, const Tensor& self, + at::OptionalIntArrayRef dim_opt, bool keepdim, ScalarType dtype1, + ScalarType dtype2) { + // check that result type and dtype match if provided + TORCH_CHECK( + (!result1.defined() || result1.scalar_type() == dtype1) && (!result2.defined() || result2.scalar_type() == dtype2), + name, ": provided dtype must match dtype of result. Got ", + toString(result1.scalar_type()), toString(result2.scalar_type()), + " and ", + toString(dtype1), toString(dtype2), + "."); + + // dim={} performs an all-reduce, same as dim=None + auto dim = dim_opt.value_or(IntArrayRef{}); + int64_t ndim = self.dim(); + DimMask mask = make_dim_mask(dim, ndim); + resize_reduction_result(result1, self, mask, keepdim, dtype1); + auto viewed_result1 = review_reduce_result(result1, ndim, mask, keepdim); + + resize_reduction_result(result2, self, mask, keepdim, dtype2); + auto viewed_result2 = review_reduce_result(result2, ndim, mask, keepdim); + + namedinference::propagate_names_for_reduction(result1, self, dim, keepdim); + namedinference::propagate_names_for_reduction(result2, self, dim, keepdim); + + // special case for type promotion in mixed precision, improves computational + // efficiency. + // We don't generalize this to common mismatched input/output types to avoid cross + // product of templated kernel launches. + if (self.scalar_type() == dtype1 || + (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) { + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self); + } + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1)); +} + +inline C10_UNUSED TensorIterator make_reduction( + const char* name, Tensor& result1, Tensor& result2, const Tensor& self, + at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype) { + return make_reduction(name, result1, result2, self, dim, keepdim, dtype, dtype); +} + +inline void zero_numel_check_dims(const Tensor& self, const int64_t dim, const char *fn_name) { + if (self.ndimension() == 0) { + TORCH_CHECK_INDEX(dim == 0 || dim == -1, fn_name, + ": Expected reduction dim -1 or 0 for scalar but got ", dim); + } + else { + TORCH_CHECK_INDEX(self.size(dim) != 0, fn_name, + ": Expected reduction dim ", dim, " to have non-zero size."); + } +} + +inline void zero_numel_check_dims(const Tensor& self, const IntArrayRef dim, const char *fn_name) { + TORCH_CHECK( + !dim.empty(), + fn_name, ": Expected reduction dim to be specified for input.numel() == 0. ", + "Specify the reduction dim with the 'dim' argument."); + for (const int64_t d : dim) { + zero_numel_check_dims(self, d, fn_name); + } +} + +inline std::vector get_zero_numel_tensor_size( + const Tensor& self, + const int64_t dim, + const bool keepdim, + const char* fn_name) { + TORCH_INTERNAL_ASSERT(self.numel() == 0, fn_name, ": Expected self.numel() == 0."); + zero_numel_check_dims(self, dim, fn_name); + std::vector sizes; + if (keepdim) { + sizes = self.sizes().vec(); + sizes[dim] = 1; + } + else { + for (const auto d : c10::irange(self.dim())) { + if (d != dim) { + sizes.push_back(self.sizes()[d]); + } + } + } + return sizes; +} + +// Resize the result tensor and indices when result.numel() == 0 depending on values of +// dim and keepdim for returning tensors containing reduction results. +// This function should be called when you are reducing a zero-numel tensor and want to +// resize the output and return it. This function exists for resizing zero-numel +// tensors when the size of the reduction dimension is non-zero. +inline C10_UNUSED void zero_numel_tensor_resize(Tensor& result, Tensor& result_indices, + const Tensor& self, const int64_t dim, + const bool keepdim, const char *fn_name) { + auto sizes = get_zero_numel_tensor_size(self, dim, keepdim, fn_name); + at::native::resize_output(result, sizes); + at::native::resize_output(result_indices, sizes); +} + +inline ScalarType get_dtype_from_self( + const Tensor& self, + const std::optional& dtype, + bool promote_integers) { + if (dtype.has_value()) { + return dtype.value(); + } + ScalarType src_type = self.scalar_type(); + if (promote_integers && at::isIntegralType(src_type, /*includeBool=*/true)) { + return kLong; + } + return src_type; +} + +inline ScalarType get_dtype_from_result(Tensor& result, std::optional dtype) { + TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor."); + if (dtype.has_value()) { + return dtype.value(); + } else { + return result.scalar_type(); + } +} + + +} // namespace at::native + +namespace at::meta { + +inline C10_UNUSED DimVector get_reduction_shape( + const Tensor& self, + IntArrayRef dims, + bool keepdim, + bool allow_empty_dims=false) { + auto mask = native::make_dim_mask(dims, self.dim(), allow_empty_dims); + return native::shape_from_dim_mask(self, mask, keepdim); +} + +inline void resize_reduction( + impl::MetaBase& meta, + const Tensor& self, + OptionalIntArrayRef opt_dims, + bool keepdim, + ScalarType out_dtype, + bool allow_empty_dims=false) { + DimVector dims_ = at::native::make_dim_vector(opt_dims, self.dim()); + maybe_wrap_dims(dims_, self.dim()); + auto shape = get_reduction_shape(self, dims_, keepdim, allow_empty_dims); + if (self.layout() == kStrided) { + meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype)); + } else if (shape.empty()) { + meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype).layout(kStrided)); + } else { + TORCH_CHECK(false, "resize_reduction: support for output with ", self.layout(), " layout is not implemented yet"); + } + namedinference::propagate_names_for_reduction( + meta.maybe_get_output(), self, dims_, keepdim); +} + +inline void resize_reduction_with_indices( + impl::MetaBase& meta, + const Tensor& self, + IntArrayRef dims, + bool keepdim, + ScalarType out_dtype) { + DimVector dims_(dims); + maybe_wrap_dims(dims_, self.dim()); + auto shape = get_reduction_shape(self, dims_, keepdim); + meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype)); + meta.set_output_raw_strided(1, shape, {}, self.options().dtype(kLong)); + namedinference::propagate_names_for_reduction( + meta.maybe_get_output(0), self, dims_, keepdim); + namedinference::propagate_names_for_reduction( + meta.maybe_get_output(1), self, dims_, keepdim); +} + +inline TensorIterator make_reduction( + const Tensor& self, + const Tensor& result, + OptionalIntArrayRef opt_dims, + bool keepdim, + ScalarType in_dtype) { + int64_t ndim = self.dim(); + auto mask = at::native::make_dim_mask(opt_dims, ndim); + auto viewed_result = + at::native::review_reduce_result(result, ndim, mask, keepdim); + if (self.scalar_type() == in_dtype) { + return TensorIterator::reduce_op(viewed_result, self); + } + return TensorIterator::reduce_op(viewed_result, self.to(in_dtype)); +} + +inline TensorIterator make_reduction( + const Tensor& self, + const Tensor& result1, + const Tensor& result2, + IntArrayRef dims, + bool keepdim, + ScalarType dtype1, + ScalarType /*dtype2*/) { + int64_t ndim = self.dim(); + auto mask = at::native::make_dim_mask(dims, ndim); + auto viewed_result1 = at::native::review_reduce_result(result1, ndim, mask, keepdim); + auto viewed_result2 = at::native::review_reduce_result(result2, ndim, mask, keepdim); + // special case for type promotion in mixed precision, improves computational efficiency. + // We don't generalize this to common mismatched input/output types to avoid cross product + // of templated kernel launches. + if (self.scalar_type() == dtype1 || + (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) { + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self); + } + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1)); +} + +inline C10_UNUSED TensorIterator make_reduction_from_out_ty( + const Tensor& self, + const Tensor& result, + OptionalIntArrayRef opt_dims, + bool keepdim, + ScalarType out_dtype) { + // special case for type promotion in mixed precision, improves computational + // efficiency. + // not generalize this to common mismatched input/output types to avoid cross + // product of templated kernel launches. + const bool gpu_lowp_to_f32 = + (self.is_cuda() && + (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && + out_dtype == kFloat); + auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type() : out_dtype; + return make_reduction(self, result, opt_dims, keepdim, in_dtype); +} + +} // namespace at::meta diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h new file mode 100644 index 0000000000000000000000000000000000000000..b8d6f92553a4c9bde2f90574a23108fd9667a26c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +namespace at::native { + +template < + typename index_t, + void compute(const index_t*, const int64_t*, index_t*, int64_t, int64_t)> +static inline Tensor repeat_interleave_common( + const Tensor& repeats, + std::optional output_size) { + TORCH_CHECK( + repeats.dim() == 1, "repeat_interleave only accept 1D vector as repeat"); + TORCH_CHECK( + repeats.scalar_type() == at::kLong || repeats.scalar_type() == at::kInt, + "repeats has to be Long or Int tensor"); + if (repeats.size(0) == 0) { + return at::empty_like(repeats, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + } + Tensor repeats_ = repeats.contiguous(); + Tensor cumsum = repeats.cumsum(0); + int64_t total = 0; + if (output_size.has_value()) { + total = output_size.value(); + } else { + total = cumsum[-1].item(); + TORCH_CHECK( + (repeats >= 0).all().item(), "repeats can not be negative"); + } + + Tensor result = at::empty({total}, repeats.options()); + const index_t* repeat_ptr = repeats_.const_data_ptr(); + const int64_t* cumsum_ptr = cumsum.const_data_ptr(); + index_t* result_ptr = result.data_ptr(); + compute(repeat_ptr, cumsum_ptr, result_ptr, repeats.size(0), total); + return result; +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h new file mode 100644 index 0000000000000000000000000000000000000000..951a08e84c7793d33b76203d0dfb738fa91056e0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h @@ -0,0 +1,173 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include + + +namespace at::native { + +// TODO: make all operations that resize given outputs use this function +// for consistency and maintainability. +// Some operations like `cat` might not be able to make the use of +// resize_output directly. For more details to understand how it works in `cat`, +// see https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362 +// Resizes outputs +// Functions accepting output tensors, like with the "out" kwarg, should +// call this function to handle resizing their output tensor. +// Issues a warning if the output tensor has one or more elements and +// needs resizing +// NOTE: In the future the warning will become an error +// Returns a bool saying whether or not the resize actually happened or not +TORCH_API bool resize_output(const Tensor& output, IntArrayRef shape); +// WARNING: Do NOT call this directly. If you are resizing an output and want +// to support dynamic shapes call at::resize__symint and resize_output_check_symint. +// For more details, see: https://github.com/pytorch/pytorch/pull/111530/files#r1365845272 +TORCH_API bool resize_output_symint(const Tensor& output, SymIntArrayRef shape); + +// Utility for resize_output +// Returns a bool saying resize should happen or not and +// raises a warning if resizing for one or more elements +TORCH_API bool resize_output_check(const Tensor& output, IntArrayRef shape); +TORCH_API bool resize_output_check_symint(const Tensor& output, SymIntArrayRef shape); + +TORCH_API void resize_bytes_cpu(StorageImpl* storage, size_t size_bytes); +TORCH_API void resize_bytes_meta(StorageImpl* storage, c10::SymInt size_bytes); +TORCH_API void resize_bytes_nocuda(const Storage& storage, const c10::SymInt& size_bytes); + +inline void maybe_resize_storage_cpu(TensorImpl* self, size_t new_size_bytes) { + // It does not make sense to try to resize a storage + // to hold 0 elements, and this can break + // if storage_offset is positive but + // new_size is 0, so just bail in that case + // (same comment is in cuda/Resize.h) + if (self->numel() == 0) { + return; + } + + const Storage& storage = self->unsafe_storage(); + if (!storage) { + auto new_storage = c10::make_intrusive( + StorageImpl::use_byte_size_t(), + new_size_bytes, + c10::GetCPUAllocator(), + true); + self->set_storage_keep_dtype(std::move(new_storage)); + } else if (new_size_bytes > storage.nbytes()) { + resize_bytes_cpu(storage.unsafeGetStorageImpl(), new_size_bytes); + } +} + +TORCH_API TensorImpl* resize_impl_cpu_( + TensorImpl* self, + IntArrayRef size, + at::OptionalIntArrayRef stride, + bool resize_storage = true); + +template +T maybe_convert_symint(c10::SymInt) = delete; + +template <> +inline c10::SymInt maybe_convert_symint(c10::SymInt x) { return x; } + +template <> +inline int64_t maybe_convert_symint(c10::SymInt x) { return x.guard_int(__FILE__, __LINE__); } + +template +inline void checkInBoundsForStorage( + ArrayRef size, + ArrayRef stride, + T storage_offset, + const caffe2::TypeMeta& data_type, + const Storage& new_storage) { + T storage_size_bytes = + at::detail::computeStorageNbytes(size, stride, data_type.itemsize()); + T storage_offset_bytes = storage_offset * data_type.itemsize(); + if (storage_size_bytes == 0) { + // NB: (a tensor with arbitrary 0 dims)'s storage can have any numel. + return; + } + T new_storage_size_bytes = maybe_convert_symint(new_storage.sym_nbytes()); + TORCH_CHECK( + storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes, + "setStorage: sizes ", + size, + ", strides ", + stride, + "," + " storage offset ", + storage_offset, + ", and itemsize ", + data_type.itemsize(), + " requiring a storage size of ", + storage_size_bytes + storage_offset_bytes, + " are out of bounds for storage of size ", + new_storage_size_bytes); +} + +template +inline void checkSetStorage(Tensor& result, Storage storage, T storage_offset, + ArrayRef size, ArrayRef stride) { + // FIXME: stride should be optional + if (stride.data()) { + TORCH_CHECK(size.size() == stride.size(), "unequal size length (", size.size(), + ") and stride length (", stride.size(), ")"); + } + +#ifdef DEBUG + TORCH_CHECK(size.size() <= INT_MAX, "size length (", size.size(), ") greater than INT_MAX"); +#endif + + // storage: note this can't be replaced with result.set_(storage) as the semantics of that + // function is to set the tensor size to be equal to the size of the storage. + if (!result.storage().is_alias_of(storage)) { + // Caffe2 might have tensors whose storages are null, but we + // don't allow it in PyTorch. + TORCH_INTERNAL_ASSERT(storage); + TORCH_INTERNAL_ASSERT(result.storage()); + + // We used to allow this, but this breaks device caching. + // Let's put an actual error message for this one. + TORCH_CHECK(result.storage().device() == storage.device(), + "Attempted to set the storage of a tensor on device \"", result.storage().device(), + "\" to a storage on different device \"", storage.device(), + "\". This is no longer allowed; the devices must match."); + result.unsafeGetTensorImpl()->set_storage_keep_dtype(std::move(storage)); + } + + // storageOffset + TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset); +} + +/** + * Set self's sizes, strides, and storage_offset. + * (size, stride, storage_offset) must be in bounds for self's storage. + */ +template +inline void setStrided( + const Tensor& self, + ArrayRef size, + ArrayRef stride, + T storage_offset) { + TORCH_CHECK(size.size() == stride.size(), "mismatch in length of strides and shape"); + for (const auto& val : stride) { + TORCH_CHECK(val >= 0, + "as_strided: Negative strides are not supported at the moment, " + "got strides: ", stride); + } + + auto* self_ = self.unsafeGetTensorImpl(); + checkInBoundsForStorage( + size, stride, storage_offset, self_->dtype(), self_->storage()); + + /* storage offset */ + TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset); + self_->set_sizes_and_strides(size, stride, std::make_optional(storage_offset)); +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h new file mode 100644 index 0000000000000000000000000000000000000000..cea2612a22127d00532da761cf8e773164db5ca4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at::native { + +template +inline T storage_size_for(ArrayRef size, ArrayRef stride) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(size.size() == stride.size(), + "storage_size_for(size, stride) requires that size and stride ", + "have the same size as a precondition."); + T storage_size = 1; + for (const auto dim : c10::irange(size.size())) { + if (size[dim] == 0) { + storage_size = 0; + break; + } + storage_size += (size[dim] - 1) * stride[dim]; + } + return storage_size; +} + +inline const Tensor& resize_named_tensor_( + const Tensor& self, + IntArrayRef size, + std::optional optional_memory_format) { + TORCH_INTERNAL_ASSERT(self.has_names()); + TORCH_CHECK( + self.sizes() == size, + "Cannot resize named tensor with resize_ or resize_as_ (tried to resize " + "Tensor", + self.names(), + " with size ", + self.sizes(), + " to ", + size, + "). This may be caused by passing a named tensor ", + "as an `out=` argument; please ensure that the sizes are the same. "); + TORCH_CHECK( + !optional_memory_format.has_value(), + "Unsupported memory format for named tensor resize ", + optional_memory_format.value()); + return self; +} + +// For deterministic output, fill new elements that were added after a storage +// resize with NaN or MAX_INT. `old_storage_nbytes` is the size of the storage +// before the resize happened. +inline const Tensor& fill_resize_deterministic_(const Tensor& tensor, int64_t old_storage_nbytes) { + const at::Storage& storage = tensor.unsafeGetTensorImpl()->unsafe_storage(); + int64_t new_storage_nbytes = storage.nbytes(); + int64_t old_storage_numel = old_storage_nbytes / tensor.itemsize(); + int64_t new_storage_numel = new_storage_nbytes / tensor.itemsize(); + if (new_storage_numel > old_storage_numel) { + at::Tensor tensor_view = at::empty({}, at::TensorOptions().dtype(tensor.scalar_type()).device(tensor.device())); + tensor_view.set_( + storage, + /*storage_offset=*/old_storage_numel, + /*size=*/{new_storage_numel - old_storage_numel}, + /*stride=*/{1}); + at::native::fill_empty_deterministic_(tensor_view); + } + return tensor; +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h new file mode 100644 index 0000000000000000000000000000000000000000..94816f5cfb6c7368fc616fe23868740e8bc242f6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h @@ -0,0 +1,128 @@ +#pragma once + +#include +#include +#include +#include + +namespace at::native { + +namespace { + +// checks whether index.dtype == int64 +// and self.dtype == src.dtype if src is a Tensor +inline void scatter_gather_dtype_check( + const std::string& method_name, + const Tensor& self, + const Tensor& index, + const std::optional& src_opt = std::nullopt +) { + if (index.numel() != 0) { + TORCH_CHECK( + index.scalar_type() == at::ScalarType::Long, + method_name, "(): Expected dtype int64 for index" + ); + } + + if (src_opt.has_value()) { + const auto& src = src_opt.value(); + TORCH_CHECK( + self.scalar_type() == src.scalar_type(), + method_name, "(): Expected self.dtype to be equal to src.dtype" + ); + } +} + +// Used for `gather`-like methods +// Note: self means the input tensor here +// Test: +// 1. index.size(d) <= self.size(d) for all d != dim +// 2. index.dim() == self.dim() +inline void gather_shape_check(const Tensor& self, int64_t dim, + const Tensor& index +) { + auto self_dims = ensure_nonempty_dim(self.dim()); + TORCH_CHECK(self_dims == ensure_nonempty_dim(index.dim()), + "Index tensor must have the same number of dimensions as input tensor" + ); + + for (const auto i : c10::irange(self_dims)) { + if (i != dim) { + TORCH_CHECK( + ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i), + "Size does not match at dimension ", i, + " expected index ", index.sizes(), + " to be smaller than self ", self.sizes(), + " apart from dimension ", dim + ); + } + } +} + +// Used for `scatter` and `scatter_add` +// Tests: +// 1. index.size(d) <= self.size(d) for all d != dim +// 2. index.size(d) <= src.size(d) for all d if src is a Tensor +// 3. index.dim() == self.dim() == src.dim() +inline void scatter_shape_check( + const Tensor& self, int64_t dim, const Tensor& index, + const std::optional& src_opt = std::nullopt +) { + if (index.numel() == 0) return; + TORCH_CHECK( + ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()), + "Index tensor must have the same number of dimensions as self tensor" + ); + + bool is_wrong_shape = false; + int64_t self_dims = ensure_nonempty_dim(self.dim()); + + // Check: index.size(d) <= self.size(d) for all d != dim + for (const auto d : c10::irange(self_dims)) { + int64_t index_d_size = ensure_nonempty_size(index, d); + if (d == dim) continue; + if (index_d_size > ensure_nonempty_size(self, d)) { + is_wrong_shape = true; + break; + } + } + + // Check: index.size(d) <= src.size(d) for all d if src is Tensor + if (!is_wrong_shape && src_opt.has_value()) { + const auto& src = src_opt.value(); + for (const auto d : c10::irange(self_dims)) { + int64_t index_d_size = ensure_nonempty_size(index, d); + if (index_d_size > ensure_nonempty_size(src, d)) { + is_wrong_shape = true; + break; + } + } + } + + if (src_opt.has_value()) { + const auto& src = src_opt.value(); + + TORCH_CHECK( + ensure_nonempty_dim(src.dim()) == ensure_nonempty_dim(index.dim()), + "Index tensor must have the same number of dimensions as src tensor" + ); + + TORCH_CHECK(!is_wrong_shape, + "Expected index ", index.sizes(), + " to be smaller than self ", self.sizes(), + " apart from dimension ", dim, + " and to be smaller size than src ", src.sizes() + ); + } + else { + TORCH_CHECK(!is_wrong_shape, + "Expected index ", index.sizes(), + " to be smaller than self ", self.sizes(), + " apart from dimension ", dim + ); + } +} + +} // anonymous namespace + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SegmentReduce.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SegmentReduce.h new file mode 100644 index 0000000000000000000000000000000000000000..25cd469ff5270a891e460f55239801a3fcf44c81 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SegmentReduce.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { +class Tensor; + +namespace native { + +using segment_reduce_lengths_fn = Tensor (*)( + ReductionType, + const Tensor&, + const Tensor&, + int64_t, + const std::optional&); +DECLARE_DISPATCH(segment_reduce_lengths_fn, _segment_reduce_lengths_stub); + +using segment_reduce_offsets_fn = Tensor (*)( + ReductionType, + const Tensor&, + const Tensor&, + int64_t, + const std::optional&); +DECLARE_DISPATCH(segment_reduce_offsets_fn, _segment_reduce_offsets_stub); + +using segment_reduce_lengths_backward_fn = Tensor (*)( + const Tensor&, + const Tensor&, + const Tensor&, + ReductionType, + const Tensor&, + int64_t, + const std::optional&); +DECLARE_DISPATCH(segment_reduce_lengths_backward_fn, _segment_reduce_lengths_backward_stub); + +using segment_reduce_offsets_backward_fn = Tensor (*)( + const Tensor&, + const Tensor&, + const Tensor&, + ReductionType, + const Tensor&, + int64_t, + const std::optional&); +DECLARE_DISPATCH(segment_reduce_offsets_backward_fn, _segment_reduce_offsets_backward_stub); + +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SortingUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SortingUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..a0f9cfa8bf0ce554d198b7e55079ed13e1798175 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SortingUtils.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at::native { + +// ensure we get good values and indices for kthvalue, mode +// this will always be with the reducing dim as 1-d +inline void _reduction_with_indices_allocate_or_resize_output( + Tensor& values, + Tensor& indices, + const Tensor& self, + int64_t dim_, + bool keepdim) { + int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); + auto result_sizes = self.sizes().vec(); + if (!result_sizes.empty()) { + result_sizes[dim] = 1; + } + if (values.defined()) { + TORCH_CHECK( + self.options().type_equal(values.options()), + "output values must be of same type as input"); + if (!keepdim && values.dim() == self.dim() - 1) { + // unsqueeze to preserve passed in noncontiguous tensor in resize + values.unsqueeze_(dim); + } + resize_output(values, result_sizes); + } else { + values = at::empty(result_sizes, self.options()); + } + if (indices.defined()) { + TORCH_CHECK( + indices.dtype() == kLong, "output indices must be of scalar type Long"); + TORCH_CHECK( + indices.device() == self.device(), + "output indices must be on same device as input"); + if (!keepdim && indices.dim() == self.dim() - 1) { + // unsqueeze to preserve passed in noncontiguous tensor in resize + indices.unsqueeze_(dim); + } + resize_output(indices, result_sizes); + } else { + indices = at::empty(result_sizes, self.options().dtype(kLong)); + } +} + +// ensure we get good values and indices for topk +inline void _allocate_or_resize_output_with_indices( + Tensor& values, + Tensor& indices, + const Tensor& self, + int64_t dim_, + int64_t k) { + int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); + auto result_sizes = self.sizes().vec(); + if (!result_sizes.empty()) { + result_sizes[dim] = k; + } + if (values.defined()) { + TORCH_CHECK( + self.options().type_equal(values.options()), + "output values must be of same type as input"); + values.resize_(result_sizes); + } else { + values = at::empty(result_sizes, self.options()); + } + if (indices.defined()) { + TORCH_CHECK( + indices.dtype() == kLong, "output indices must be of scalar type Long"); + TORCH_CHECK( + indices.device() == self.device(), + "output indices must be on same device as input"); + indices.resize_(result_sizes); + } else { + indices = at::empty(result_sizes, self.options().dtype(kLong)); + } +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SparseTensorUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SparseTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..3953ca0d17b57f2547bafd2d08bec577b83b713e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/SparseTensorUtils.h @@ -0,0 +1,190 @@ +#pragma once + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +namespace at::sparse { + +// Just for documentary purposes +using SparseTensor = Tensor; +using SparseType = Type; + +// This is an internal utility function for getting at the SparseTensorImpl, +// so that we can write sparse tensor specific accessors for special fields +// in SparseTensor. You should only use this for writing low level +// setters/getters for SparseTensorImpl fields; otherwise, you should use +// the low level setters/getters that were implemented using this. +// +// This may be called repeatedly, so make sure it's pretty cheap. +inline SparseTensorImpl* get_sparse_impl(const SparseTensor& self) { + TORCH_INTERNAL_ASSERT( + self.is_sparse(), "_internal_get_SparseTensorImpl: not a sparse tensor"); + return static_cast(self.unsafeGetTensorImpl()); +} + +// Takes indices and values and directly puts them into the sparse tensor, no +// copy. This used to be called THSTensor_(_move) +inline void alias_into_sparse( + const SparseTensor& self, + const Tensor& indices, + const Tensor& values) { + get_sparse_impl(self)->set_indices_and_values_unsafe(indices, values); +} + +// Take indices and values and makes a (data) copy of them to put into the +// sparse indices/values. This used to be called THSTensor_(_set) +inline void copy_into_sparse( + const SparseTensor& self, + const Tensor& indices, + const Tensor& values, + bool non_blocking) { + alias_into_sparse( + self, + indices.to(self._indices().options(), non_blocking, /*copy=*/true), + values.to(self._values().options(), non_blocking, /*copy=*/true)); +} + +// TODO: put this into the public API +inline bool is_same_tensor(const Tensor& lhs, const Tensor& rhs) { + return lhs.unsafeGetTensorImpl() == rhs.unsafeGetTensorImpl(); +} + +inline bool is_same_density(const SparseTensor& self, const SparseTensor& src) { + return self.sparse_dim() == src.sparse_dim() && + self.dense_dim() == src.dense_dim(); +} + +// Give us a new values tensor, with the same dimensionality +// as 'values' but with a new number of non-zero elements. +// TODO: Expose this for real in ATen, some day? +// NB: Doesn't preserve data. +inline Tensor new_values_with_size_of(const Tensor& values, int64_t nnz) { + std::vector size = values.sizes().vec(); + size[0] = nnz; + return at::empty(size, values.options()); +} + +// NOTE [ Flatten Sparse Indices ] +// This helper function flattens a sparse indices tensor (a Tensor) into a 1D +// indices tensor. E.g., +// input = [[2, 4, 0], +// [3, 1, 10]] +// full_size = [2, 12] +// output = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 10 ] = [27, 49, 10] +// +// In other words, assuming that each `indices[i, :]` is a valid index to a +// tensor `t` of shape `full_size`. This returns the corresponding indices to +// the flattened tensor `t.reshape( prod(full_size[:indices.size(0)]), -1 )`. +// if forceClone is true, the result will forced to be a clone of self. +// if force_clone is true, the result will forced to be a clone of self. +TORCH_API Tensor flatten_indices( + const Tensor& indices, + IntArrayRef full_size, + bool force_clone = false); + +// Flatten sparse tensor's indices from nD to 1D, similar to NOTE [ Flatten +// Sparse Indices ], except this one allows partial flatten: only flatten on +// specified dims. Note that the flatten indices might be uncoalesced if +// dims_to_flatten.size() < sparse_dim. Also if input indices is already +// coalesced, the flattened indices will also be sorted. +// +// args: +// indices: sparse tensor indices +// sizes: sparse tensor sizes +// dims_to_flatten: a list of dim index to flatten +// +// Ex1: +// indices = [[2, 4, 0], +// [3, 1, 3]] +// sizes = [2, 12] +// dims_to_flatten = [0, 1] +// new_indices = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 3 ] = [27, 49, 3] +// +// Ex2: +// dims_to_flatten = [1] +// new_indices = [ 3, 1, 3 ] # uncoalesced +TORCH_API Tensor flatten_indices_by_dims( + const Tensor& indices, + const IntArrayRef& sizes, + const IntArrayRef& dims_to_flatten); + +// Find the CSR representation for a row `indices` from the COO format +TORCH_API Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz); + +TORCH_API Tensor zeros_like_with_indices(const Tensor& t); + +template +class TensorGeometryHolder { + using geometry_holder_t = std::array; + + public: + explicit TensorGeometryHolder( + IntArrayRef sizes, + IntArrayRef strides, + TensorOptions options = {}) { + std::copy(sizes.begin(), sizes.end(), t_sizes.begin()); + std::copy(strides.begin(), strides.end(), t_strides.begin()); + } + + explicit TensorGeometryHolder(const Tensor& t) + : TensorGeometryHolder(t.sizes(), t.strides()) {} + + auto operator*() const { + return std::make_tuple(t_sizes, t_strides); + } + + private: + geometry_holder_t t_sizes; + geometry_holder_t t_strides; +}; + +template <> +class TensorGeometryHolder<0> { + using geometry_holder_t = Tensor; + + public: + explicit TensorGeometryHolder( + IntArrayRef sizes, + IntArrayRef strides, + TensorOptions options) { + const int64_t t_ndims = sizes.size(); + const auto cpu_options = TensorOptions(options).dtype(kLong).device(kCPU); + Tensor t_sizes_and_strides_cpu = at::empty({2, t_ndims}, cpu_options); + t_sizes_and_strides_cpu.select(0, 0).copy_(at::tensor(sizes, cpu_options)); + t_sizes_and_strides_cpu.select(0, 1).copy_( + at::tensor(strides, cpu_options)); + const Tensor t_sizes_and_strides = + t_sizes_and_strides_cpu.to(options.device()); + t_sizes = t_sizes_and_strides.select(0, 0); + t_strides = t_sizes_and_strides.select(0, 1); + } + + explicit TensorGeometryHolder(const Tensor& t) + : TensorGeometryHolder(t.sizes(), t.strides(), t.options()) {} + + auto operator*() const { + return std::make_tuple( + t_sizes.template data_ptr(), + t_strides.template data_ptr()); + } + + private: + geometry_holder_t t_sizes; + geometry_holder_t t_strides; +}; + +// Return all indices of a tensor with the given shape. +// +// full_coo_indices(shape) is equivalent to +// torch.ones(shape).nonzero().transpose(-2, -1) but much faster. +TORCH_API Tensor full_coo_indices(IntArrayRef sizes, TensorOptions options); + +} // namespace at::sparse diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorAdvancedIndexingUtils.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorAdvancedIndexingUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..f9b616013ddb26e20bd26d33651e76863886e129 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorAdvancedIndexingUtils.h @@ -0,0 +1,94 @@ +#pragma once +#include +#include +#include + +namespace at::native { +namespace { +#ifndef STRIP_ERROR_MESSAGES +inline std::string shapes_as_str(TensorList tensors) { + std::ostringstream os; + bool first = true; + for (auto& tensor : tensors) { + if (tensor.defined()) { + if (!first) { + os << ", "; + } + os << tensor.sizes(); + first = false; + } + } + return os.str(); +} +#endif +} // anonymous namespace + +inline std::tuple canDispatchToMaskedFill(const Tensor& self, const torch::List>& indices, +const Tensor& value){ + if (!(value.numel() ==1 && value.device().is_cpu())){ + return std::make_tuple(false,Tensor()); + } + int64_t num_ind = 0; + Tensor mask; + auto self_device = self.device(); + for (const std::optional& i: indices) { + if (!i.has_value() || !(*i).defined()){ + num_ind++; + } else { + const Tensor &index = *i; + if ((index.scalar_type() != kByte && index.scalar_type() != kBool) || + index.device() != self_device || mask.defined()){ + return std::make_tuple(false, Tensor()); + } else { + mask = index; + for (const auto j : c10::irange(index.dim())) { + int64_t srcIdx = num_ind + j; + TORCH_CHECK_INDEX(index.size(j) == self.size(srcIdx), "The shape of the mask ", index.sizes(), " at index ", j, + " does not match the shape of the indexed tensor ", self.sizes(), " at index ", srcIdx); + } + num_ind += mask.ndimension(); + } + } + } + for (C10_UNUSED const auto i : c10::irange(num_ind, self.ndimension())) { + mask = mask.unsqueeze(-1); + } + return std::make_tuple(true, mask); +} + +inline AdvancedIndex make_info(Tensor self, IOptTensorListRef orig) { + checkIndexTensorTypes(orig, /*allow_int*/ true); + // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors + auto indices = expandTensors(self, orig); + // next broadcast all index tensors together + try { + indices = expand_outplace(indices); + } catch (std::exception& e) { + TORCH_CHECK_INDEX(false, "shape mismatch: indexing tensors could not be broadcast together" + " with shapes ", shapes_as_str(indices)); + } + // add missing null Tensors so that it matches self.dim() + while (indices.size() < (size_t)self.dim()) { + indices.emplace_back(); + } + // if the non-null indices are not all adjacent, transpose self and indices + // together so that they're adjacent at the front + if (!hasContiguousSubspace(indices)) { + std::tie(self, indices) = transposeToFront(self, indices); + } + // Ensure indices are on the same device as self + for (auto & indice : indices) { + if (indice.defined() && indice.device() != self.device()) { + indice = indice.to(self.device()); + } + } + for (auto & indice : indices) { + if (indice.defined() && indice.dtype() == at::kInt) { + indice = indice.to(at::kLong); + } + } + + return AdvancedIndex(self, indices); +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorConversions.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorConversions.h new file mode 100644 index 0000000000000000000000000000000000000000..8a3853230b15ca77fb7e148e2a24a87adf629856 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorConversions.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace at { + class Tensor; +namespace native { +bool to_will_alias( + const Tensor& self, + std::optional dtype, + std::optional layout, + std::optional device, + bool copy, + std::optional optional_memory_format); + +Tensor to_meta(const Tensor& tensor); +std::optional to_meta(const std::optional& tensor); +std::vector to_meta(at::ITensorListRef t_list); +Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::optional layout, OptionalIntArrayRef blocksize, std::optional dense_dim_opt); + +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorDimApply.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorDimApply.h new file mode 100644 index 0000000000000000000000000000000000000000..4d52446446316af9546bf3a1f5c0eca58a6e608f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorDimApply.h @@ -0,0 +1,55 @@ +#pragma once +#include +#include + +namespace at::native { +//input tensors are non-zero dim and non-empty +template + +void tensor_dim_apply3(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim, Function func) { + int ndims = self.dim(); + int tensor_dim_apply_has_finished = 0; + std::vector counter(ndims, 0); + const T1* self_data = self.const_data_ptr(); + T1* values_data = values.data_ptr(); + T2* indices_data = indices.data_ptr(); + int64_t self_stride = self.stride(dim); + int64_t values_stride = values.stride(dim); + int64_t indices_stride = indices.stride(dim); + int self_dim_size = self.size(dim); + + while (!tensor_dim_apply_has_finished) { + func(self_data, values_data, indices_data, self_dim_size, self_stride, values_stride, indices_stride); + if (ndims == 1) { + break; + } + for (const auto dim_i : c10::irange(ndims)) { + if (dim_i == dim) { + if (dim_i == (ndims - 1)) { + tensor_dim_apply_has_finished = 1; + break; + } + continue; + } + counter[dim_i]++; + self_data += self.stride(dim_i); + values_data += values.stride(dim_i); + indices_data += indices.stride(dim_i); + + if (counter[dim_i] == self.size(dim_i)) { + if (dim_i == ndims-1) { + tensor_dim_apply_has_finished = 1; + break; + } else { + self_data -= counter[dim_i]*self.stride(dim_i); + values_data -= counter[dim_i]*values.stride(dim_i); + indices_data -= counter[dim_i]*indices.stride(dim_i); + counter[dim_i] = 0; + } + } else { + break; + } + } + } +} +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h new file mode 100644 index 0000000000000000000000000000000000000000..e55d2a58d709926a24467a0056323096e0890fa9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Unfold3d.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Unfold3d.h new file mode 100644 index 0000000000000000000000000000000000000000..90ead9d1f7ad48187b8c0a28af2fd59915887d17 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/Unfold3d.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +namespace at::native { + +void Unfold3dCopyCPU( + ScalarType dtype, + const void *src, + int64_t C, + int64_t X_D, + int64_t X_H, + int64_t X_W, + int64_t Y_D, + int64_t Y_H, + int64_t Y_W, + int64_t kernel_d, + int64_t kernel_h, + int64_t kernel_w, + int64_t stride_d, + int64_t stride_h, + int64_t stride_w, + int64_t pad_d, + int64_t pad_h, + int64_t pad_w, + void* dst); + +void Unfold3dAccCPU( + ScalarType dtype, + const void *src, + int64_t C, + int64_t X_D, + int64_t X_H, + int64_t X_W, + int64_t Y_D, + int64_t Y_H, + int64_t Y_W, + int64_t kernel_d, + int64_t kernel_h, + int64_t kernel_w, + int64_t stride_d, + int64_t stride_h, + int64_t stride_w, + int64_t pad_d, + int64_t pad_h, + int64_t pad_w, + void *dst); + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/UnfoldBackward.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/UnfoldBackward.h new file mode 100644 index 0000000000000000000000000000000000000000..44e05c125913ec234247938a25b47e5feeb9f183 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/UnfoldBackward.h @@ -0,0 +1,112 @@ +#pragma once + +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at::native { + +using unfold_backward_fn = void (*)( + Tensor& grad_in, + const Tensor& grad, + int64_t dim, + int64_t size, + int64_t step +); + +DECLARE_DISPATCH(unfold_backward_fn, unfold_backward_stub); + +namespace { + +// Note on naming: it is unconventional. +// grad_in does not mean that it is a gradient wrt to input, +// grad_in/grad_out is just an input/output of unfold_backward kernel. + +static C10_UNUSED TensorIterator _make_unfold_backward_iter_over_grad_out( + Tensor& grad_out, + const Tensor& grad_in, + int64_t dim, + int64_t size, + int64_t step +) { + dim = maybe_wrap_dim(dim, grad_out.dim()); + // last dim stores the folds + + auto grad_out_dim_size = ensure_nonempty_size(grad_out, dim); + auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim); + // dictates the number of elements to iterate over + // in dimension `dim` + auto iter_dim_size = std::min( + grad_out_dim_size, + (grad_in_dim_size - 1) * step + size + ); + + /* prepare grad_out for TensorIterator { */ + auto grad_out_strides = ensure_nonempty_vec(grad_out.strides().vec()); + auto grad_out_sizes = ensure_nonempty_vec(grad_out.sizes().vec()); + grad_out_sizes[dim] = iter_dim_size; + auto grad_out_restrided = grad_out.as_strided( + grad_out_sizes, grad_out_strides + ); + /* } */ + + /* prepare grad_in for TensorIterator { */ + auto grad_in_strides = ensure_nonempty_vec(grad_in.strides().vec()); + auto grad_in_sizes = ensure_nonempty_vec(grad_in.sizes().vec()); + + // set strides for dim to 0 + // and size to 1 because + // this dimension is indexed inside the kernel + grad_in_strides[dim] = 0; + grad_in_sizes[dim] = 1; + + grad_in_strides.pop_back(); + grad_in_sizes.pop_back(); + + auto grad_in_restrided = grad_in.squeeze(-1).as_strided( + grad_in_sizes, grad_in_strides + ); + /* } */ + + // During the TensorIterator iteration we have to know + // i_dim in grad_out[i_1,...,i_dim,...i_n], + // idx_dim stores this information + /* prepare idx_dim for TensorIterator { */ + auto idx_dim = at::arange( + 0, iter_dim_size, grad_in.options().dtype(at::kLong) + ); + + auto grad_out_dim = ensure_nonempty_dim(grad_out.dim()); + + auto idx_dim_strides = std::vector(grad_out_dim, 0); + auto idx_dim_sizes = std::vector(grad_out_dim, 1); + + idx_dim_strides[dim] = 1; + idx_dim_sizes[dim] = iter_dim_size; + + // idx_dim size will broadcast over determined by grad_out sizes in TensorIterator + auto idx_dim_restrided = idx_dim.as_strided(idx_dim_sizes, idx_dim_strides); + /* } */ + + auto iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_owned_output(grad_out_restrided) + .add_owned_const_input(grad_in_restrided) + .add_owned_const_input(idx_dim_restrided) + .build(); + + return iter; +} + +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/group_norm.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/group_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..1673df9253eec782328db0f673e6526b698641d0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/group_norm.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include + +namespace at { +class Tensor; + +namespace native { + +using forward_fn = void (*)( + const Tensor& /* X */, + const Tensor& /* gamma */, + const Tensor& /* beta */, + int64_t /* N */, + int64_t /* C */, + int64_t /* HxW */, + int64_t /* group */, + double /* eps */, + Tensor& /* Y */, + Tensor& /* mean */, + Tensor& /* rstd */); + +using backward_fn = void (*)( + const Tensor& /* dY */, + const Tensor& /* X */, + const Tensor& /* mean */, + const Tensor& /* rstd */, + const Tensor& /* gamma */, + int64_t /* N */, + int64_t /* C */, + int64_t /* HxW */, + int64_t /* group */, + Tensor& /* dX */, + Tensor& /* dgamma */, + Tensor& /* dbeta */); + +DECLARE_DISPATCH(forward_fn, GroupNormKernel); +DECLARE_DISPATCH(backward_fn, GroupNormBackwardKernel); + +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/im2col.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..df94723ab2a216dcc2f98a6a373b46cb239a42b3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/im2col.h @@ -0,0 +1,149 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace at::native { + +template +static void im2col( + const T* data_im, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t output_height, + const int64_t output_width, + const int64_t kernel_h, + const int64_t kernel_w, + const int64_t pad_h, + const int64_t pad_w, + const int64_t stride_h, + const int64_t stride_w, + const int64_t dilation_h, + const int64_t dilation_w, + T* data_col, + bool is_channels_last = false) { + const int64_t height_col = output_height; + const int64_t width_col = output_width; + const int64_t channels_col = channels * kernel_h * kernel_w; + + if (is_channels_last) { + at::parallel_for(0, height_col * width_col, 0, [&](int64_t begin, int64_t end) { + int64_t h_col{0}, w_col{0}; + data_index_init(begin, h_col, height_col, w_col, width_col); + + for (const auto i_col : c10::irange(begin, end)) { + for (const auto h_offset : c10::irange(kernel_h)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_offset : c10::irange(kernel_w)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + + const T* slice_im = data_im + (h_im * width + w_im) * channels; + T* slice_col = data_col + (i_col * kernel_h * kernel_w + h_offset * kernel_w + w_offset) * channels; + + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + std::copy_n(slice_im, channels, slice_col); + } else { + std::fill_n(slice_col, channels, T(0)); + } + } + } + + // move the next index + data_index_step(h_col, height_col, w_col, width_col); + } + }); + } else { + at::parallel_for(0, channels_col, 0, [&](int64_t begin, int64_t end) { + int64_t c_im{0}, h_offset{0}, w_offset{0}; + data_index_init(begin, c_im, channels, h_offset, kernel_h, w_offset, kernel_w); + + for (const auto c_col : c10::irange(begin, end)) { + for (const auto h_col : c10::irange(height_col)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_col : c10::irange(width_col)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + data_col[(c_col * height_col + h_col) * width_col + w_col] = + (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) + ? data_im[(c_im * height + h_im) * width + w_im] + : static_cast(0); + } + } + + // move to the next index + data_index_step(c_im, channels, h_offset, kernel_h, w_offset, kernel_w); + } + }); + } +} + +template +static void col2im( + const T* data_col, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t output_height, + const int64_t output_width, + const int64_t kernel_h, + const int64_t kernel_w, + const int64_t pad_h, + const int64_t pad_w, + const int64_t stride_h, + const int64_t stride_w, + const int64_t dilation_h, + const int64_t dilation_w, + T* data_im, + bool is_channels_last = false) { + std::fill_n(data_im, height * width * channels, T(0)); + + const int64_t height_col = output_height; + const int64_t width_col = output_width; + const int64_t channels_col = channels * kernel_h * kernel_w; + + if (is_channels_last) { + for (const auto h_col : c10::irange(height_col)) { + for (const auto w_col : c10::irange(width_col)) { + for (const auto h_offset : c10::irange(kernel_h)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_offset : c10::irange(kernel_w)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + + T* slice_im = data_im + (h_im * width + w_im) * channels; + const T* slice_col = data_col + ((h_col * width_col + w_col) * kernel_h * kernel_w + + h_offset * kernel_w + w_offset) * channels; + + if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width) { + std::transform(slice_col, slice_col + channels, slice_im, slice_im, std::plus()); + } + } + } + } + } + } else { + for (const auto c_col : c10::irange(channels_col)) { + int64_t w_offset = c_col % kernel_w; + int64_t h_offset = (c_col / kernel_w) % kernel_h; + int64_t c_im = c_col / kernel_h / kernel_w; + + for (const auto h_col : c10::irange(height_col)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_col : c10::irange(width_col)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + + if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width) + data_im[(c_im * height + h_im) * width + w_im] += + data_col[(c_col * height_col + h_col) * width_col + w_col]; + } + } + } + } +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/im2col_shape_check.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/im2col_shape_check.h new file mode 100644 index 0000000000000000000000000000000000000000..8a6fa47ba10f108d4437ff46bc9f4610e9f37dc4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/im2col_shape_check.h @@ -0,0 +1,232 @@ +#pragma once +#include +#include +#include + +namespace at::native { + +inline void col2im_shape_check( + const Tensor& input, + const Tensor& grad_output, + int64_t output_height, + int64_t output_width, + int64_t kernel_height, + int64_t kernel_width, + int64_t dilation_height, + int64_t dilation_width, + int64_t pad_height, + int64_t pad_width, + int64_t stride_height, + int64_t stride_width) { + TORCH_CHECK( + kernel_width > 0 && kernel_height > 0, + "kernel size should be greater than zero, but got kernel_height: ", + kernel_height, + " kernel_width: ", + kernel_width); + TORCH_CHECK( + stride_width > 0 && stride_height > 0, + "stride should be greater than zero, but got stride_height: ", + stride_height, + " stride_width: ", + stride_width); + TORCH_CHECK( + dilation_width > 0 && dilation_height > 0, + "dilation should be greater than zero, but got dilation_height: ", + dilation_height, + " dilation_width: ", + dilation_width); + TORCH_CHECK( + pad_width >= 0 && pad_height >= 0, + "padding should be non-negative, but got pad_height: ", + pad_height, + " pad_width: ", + pad_width); + + + int64_t ndim = input.ndimension(); + // allow dim=0 only the batch dimension. + TORCH_CHECK( + (ndim == 2 && input.size(0) != 0 && input.size(1) != 0) || + (ndim == 3 && input.size(1) != 0 && input.size(2) != 0), + "Expected 2D or 3D (batch mode) tensor for input with possibly 0 batch size and non-zero dimensions for input, but got: ", + input.sizes()); + + int64_t batch_dim = (ndim == 3) ? 0 : -1; + int64_t n_input_plane = input.size(batch_dim + 1); + + if (n_input_plane % (kernel_width * kernel_height) != 0) { + AT_ERROR( + "Expected size of input's dimension 1 to be divisible by the " + "product of kernel_size, but got input.size(1)=", + n_input_plane, + " and kernel_size=(", + kernel_height, + ", ", + kernel_width, + ")."); + } + + int64_t input_length = input.size(batch_dim + 2); + int64_t n_blocks_height = + div_rtn( + output_height + 2 * pad_height - + dilation_height * (kernel_height - 1) - 1, + stride_height) + + 1; + int64_t n_blocks_width = div_rtn( + output_width + 2 * pad_width - + dilation_width * (kernel_width - 1) - 1, + stride_width) + + 1; + + if (input_length != (n_blocks_height * n_blocks_width)) { + AT_ERROR( + "Given output_size=(", + output_height, + ", ", + output_width, + "), kernel_size=(", + kernel_height, + ", ", + kernel_width, + "), dilation=(", + dilation_height, + ", ", + dilation_width, + "), padding=(", + pad_height, + ", ", + pad_width, + "), stride=(", + stride_height, + ", ", + stride_width, + "), expected size of input's dimension 2 to match the calculated number of ", + "sliding blocks ", + n_blocks_height, + " * ", + n_blocks_width, + " = ", + (n_blocks_height * n_blocks_width), + ", but got input.size(2)=", + input_length, + "."); + } + + TORCH_CHECK( + n_blocks_height >= 1 && n_blocks_width >= 1, + "Given output_size=(", output_height, ", ", output_width, "), ", + "kernel_size=(", kernel_height, ", ", kernel_width, "), ", + "dilation=(", dilation_height, ", ", dilation_width, "), ", + "padding=(", pad_height, ", ", pad_width, "), ", + "stride=(", stride_height, ", ", stride_width, "), ", + "calculated shape of the array of sliding blocks as ", + "(", n_blocks_height, ", ", n_blocks_width, "), ", + "which is too small (non-positive)"); + + if (output_width < 1 || output_height < 1) { + AT_ERROR( + "Expected output spatial size to be positive, but got: output_size=(", + output_height, + ", ", + output_width, + ")."); + } +} + +inline void im2col_shape_check( + const Tensor& input, + const Tensor& grad_output, + int64_t kernel_height, + int64_t kernel_width, + int64_t dilation_height, + int64_t dilation_width, + int64_t pad_height, + int64_t pad_width, + int64_t stride_height, + int64_t stride_width) { + TORCH_CHECK( + kernel_width > 0 && kernel_height > 0, + "kernel size should be greater than zero, but got kernel_height: ", + kernel_height, + " kernel_width: ", + kernel_width); + + TORCH_CHECK( + dilation_width > 0 && dilation_height > 0, + "dilation should be greater than zero, but got dilation_height: ", + dilation_height, + " dilation_width: ", + dilation_width); + + TORCH_CHECK( + pad_width >= 0 && pad_height >= 0, + "padding should be non-negative, but got pad_height: ", + pad_height, + " pad_width: ", + pad_width); + + TORCH_CHECK( + stride_width > 0 && stride_height > 0, + "stride should be greater than zero, but got stride_height: ", + stride_height, + " stride_width: ", + stride_width); + + int64_t ndim = input.ndimension(); + + // allow dim=0 only the batch dimension. + bool valid_dims = input.size(1) != 0 && input.size(2) != 0; + TORCH_CHECK( + (ndim == 3 && input.size(0) && valid_dims) || + (ndim == 4 && valid_dims && input.size(3) != 0), + "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", + input.sizes()); + + int64_t dim_batch = 0; + + if (ndim == 3) { + dim_batch = -1; + } + + int64_t input_height = input.size(dim_batch + 2); + int64_t input_width = input.size(dim_batch + 3); + int64_t output_height = div_rtn( + input_height + 2 * pad_height - + (dilation_height * (kernel_height - 1) + 1), + stride_height) + + 1; + int64_t output_width = div_rtn( + input_width + 2 * pad_width - + (dilation_width * (kernel_width - 1) + 1), + stride_width) + + 1; + + if (output_height < 1 || output_width < 1) { + AT_ERROR( + "Given input with spatial size (", + input_height, + ", ", + input_height, + "), kernel_size=(", + kernel_height, + ", ", + kernel_width, + "), dilation=(", + dilation_height, + ", ", + dilation_width, + "), padding=(", + pad_height, + ", ", + pad_width, + "), calculated shape of the array of sliding blocks as (", + output_height, + ", ", + output_width, + "), but its components must be at least one."); + } +} + +} // namespace at::native diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/native/verbose_wrapper.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/verbose_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..59d9682e345b4440e103a1f95c6da42208764aba --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/native/verbose_wrapper.h @@ -0,0 +1,8 @@ +#pragma once + +#include + +namespace torch::verbose { +TORCH_API int _mkl_set_verbose(int enable); +TORCH_API int _mkldnn_set_verbose(int level); +} // namespace torch::verbose