diff --git a/.gitattributes b/.gitattributes index 74928c7019b81ce7852f2df679a9a6602b5ec3a1..2c1c62df4906d55a4542f401f49d875238a5ea18 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1425,3 +1425,4 @@ vllm/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filte vllm/lib/python3.10/site-packages/numpy.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/wandb/bin/gpu_stats filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/pyparsing/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/numpy/compat/__init__.py b/parrot/lib/python3.10/site-packages/numpy/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..729265aa9c27736861dc16d803ae7c186f2958c4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/compat/__init__.py @@ -0,0 +1,29 @@ +""" +Compatibility module. + +This module contains duplicated code from Python itself or 3rd party +extensions, which may be included for the following reasons: + + * compatibility + * we may only need a small subset of the copied library/module + +This module is deprecated since 1.26.0 and will be removed in future versions. + +""" + +import warnings + +from .._utils import _inspect +from .._utils._inspect import getargspec, formatargspec +from . import py3k +from .py3k import * + +warnings.warn( + "`np.compat`, which was used during the Python 2 to 3 transition," + " is deprecated since 1.26.0, and will be removed", + DeprecationWarning, stacklevel=2 +) + +__all__ = [] +__all__.extend(_inspect.__all__) +__all__.extend(py3k.__all__) diff --git a/parrot/lib/python3.10/site-packages/numpy/compat/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/compat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..930cae4d00ac02d83212c0e14d77043d1fc3b82f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/compat/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/compat/__pycache__/py3k.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/compat/__pycache__/py3k.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2beb73eb3f7ce0620f5c648c7530a678ddd3fa79 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/compat/__pycache__/py3k.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/compat/py3k.py b/parrot/lib/python3.10/site-packages/numpy/compat/py3k.py new file mode 100644 index 0000000000000000000000000000000000000000..d02c9f8fe341859202319f9b7ed65818f139e269 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/compat/py3k.py @@ -0,0 +1,145 @@ +""" +Python 3.X compatibility tools. + +While this file was originally intended for Python 2 -> 3 transition, +it is now used to create a compatibility layer between different +minor versions of Python 3. + +While the active version of numpy may not support a given version of python, we +allow downstream libraries to continue to use these shims for forward +compatibility with numpy while they transition their code to newer versions of +Python. +""" +__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', + 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', + 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', + 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', + 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] + +import sys +import os +from pathlib import Path +import io +try: + import pickle5 as pickle +except ImportError: + import pickle + +long = int +integer_types = (int,) +basestring = str +unicode = str +bytes = bytes + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') + +def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + +def isfileobj(f): + if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): + return False + try: + # BufferedReader/Writer may raise OSError when + # fetching `fileno()` (e.g. when wrapping BytesIO). + f.fileno() + return True + except OSError: + return False + +def open_latin1(filename, mode='r'): + return open(filename, mode=mode, encoding='iso-8859-1') + +def sixu(s): + return s + +strchar = 'U' + +def getexception(): + return sys.exc_info()[1] + +def asbytes_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asbytes_nested(y) for y in x] + else: + return asbytes(x) + +def asunicode_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asunicode_nested(y) for y in x] + else: + return asunicode(x) + +def is_pathlib_path(obj): + """ + Check whether obj is a `pathlib.Path` object. + + Prefer using ``isinstance(obj, os.PathLike)`` instead of this function. + """ + return isinstance(obj, Path) + +# from Python 3.7 +class contextlib_nullcontext: + """Context manager that does no additional processing. + + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + + .. note:: + Prefer using `contextlib.nullcontext` instead of this context manager. + """ + + def __init__(self, enter_result=None): + self.enter_result = enter_result + + def __enter__(self): + return self.enter_result + + def __exit__(self, *excinfo): + pass + + +def npy_load_module(name, fn, info=None): + """ + Load a module. Uses ``load_module`` which will be deprecated in python + 3.12. An alternative that uses ``exec_module`` is in + numpy.distutils.misc_util.exec_mod_from_location + + .. versionadded:: 1.11.2 + + Parameters + ---------- + name : str + Full module name. + fn : str + Path to module file. + info : tuple, optional + Only here for backward compatibility with Python 2.*. + + Returns + ------- + mod : module + + """ + # Explicitly lazy import this to avoid paying the cost + # of importing importlib at startup + from importlib.machinery import SourceFileLoader + return SourceFileLoader(name, fn).load_module() + + +os_fspath = os.fspath +os_PathLike = os.PathLike diff --git a/parrot/lib/python3.10/site-packages/numpy/compat/tests/__init__.py b/parrot/lib/python3.10/site-packages/numpy/compat/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35d1eb0545651f330ca43744386e19282a94e7f2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aafc4f20b055af00b03bcc59f52fcfa2ec6049c7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/doc/ufuncs.py b/parrot/lib/python3.10/site-packages/numpy/doc/ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..7324168e1dc80c3452b170fec2060cddb040d54c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/doc/ufuncs.py @@ -0,0 +1,138 @@ +""" +=================== +Universal Functions +=================== + +Ufuncs are, generally speaking, mathematical functions or operations that are +applied element-by-element to the contents of an array. That is, the result +in each output array element only depends on the value in the corresponding +input array (or arrays) and on no other array elements. NumPy comes with a +large suite of ufuncs, and scipy extends that suite substantially. The simplest +example is the addition operator: :: + + >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) + array([1, 3, 2, 6]) + +The ufunc module lists all the available ufuncs in numpy. Documentation on +the specific ufuncs may be found in those modules. This documentation is +intended to address the more general aspects of ufuncs common to most of +them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) +have equivalent functions defined (e.g. add() for +) + +Type coercion +============= + +What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of +two different types? What is the type of the result? Typically, the result is +the higher of the two types. For example: :: + + float32 + float64 -> float64 + int8 + int32 -> int32 + int16 + float32 -> float32 + float32 + complex64 -> complex64 + +There are some less obvious cases generally involving mixes of types +(e.g. uints, ints and floats) where equal bit sizes for each are not +capable of saving all the information in a different type of equivalent +bit size. Some examples are int32 vs float32 or uint32 vs int32. +Generally, the result is the higher type of larger size than both +(if available). So: :: + + int32 + float32 -> float64 + uint32 + int32 -> int64 + +Finally, the type coercion behavior when expressions involve Python +scalars is different than that seen for arrays. Since Python has a +limited number of types, combining a Python int with a dtype=np.int8 +array does not coerce to the higher type but instead, the type of the +array prevails. So the rules for Python scalars combined with arrays is +that the result will be that of the array equivalent the Python scalar +if the Python scalar is of a higher 'kind' than the array (e.g., float +vs. int), otherwise the resultant type will be that of the array. +For example: :: + + Python int + int8 -> int8 + Python float + int8 -> float64 + +ufunc methods +============= + +Binary ufuncs support 4 methods. + +**.reduce(arr)** applies the binary operator to elements of the array in + sequence. For example: :: + + >>> np.add.reduce(np.arange(10)) # adds all elements of array + 45 + +For multidimensional arrays, the first dimension is reduced by default: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5)) + array([ 5, 7, 9, 11, 13]) + +The axis keyword can be used to specify different axes to reduce: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) + array([10, 35]) + +**.accumulate(arr)** applies the binary operator and generates an +equivalently shaped array that includes the accumulated amount for each +element of the array. A couple examples: :: + + >>> np.add.accumulate(np.arange(10)) + array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) + >>> np.multiply.accumulate(np.arange(1,9)) + array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) + +The behavior for multidimensional arrays is the same as for .reduce(), +as is the use of the axis keyword). + +**.reduceat(arr,indices)** allows one to apply reduce to selected parts + of an array. It is a difficult method to understand. See the documentation + at: + +**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and + arr2. It will work on multidimensional arrays (the shape of the result is + the concatenation of the two input shapes.: :: + + >>> np.multiply.outer(np.arange(3),np.arange(4)) + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6]]) + +Output arguments +================ + +All ufuncs accept an optional output array. The array must be of the expected +output shape. Beware that if the type of the output array is of a different +(and lower) type than the output result, the results may be silently truncated +or otherwise corrupted in the downcast to the lower type. This usage is useful +when one wants to avoid creating large temporary arrays and instead allows one +to reuse the same array memory repeatedly (at the expense of not being able to +use more convenient operator notation in expressions). Note that when the +output argument is used, the ufunc still returns a reference to the result. + + >>> x = np.arange(2) + >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, + ... casting='unsafe') + array([0, 2]) + >>> x + array([0, 2]) + +and & or as ufuncs +================== + +Invariably people try to use the python 'and' and 'or' as logical operators +(and quite understandably). But these operators do not behave as normal +operators since Python treats these quite differently. They cannot be +overloaded with array equivalents. Thus using 'and' or 'or' with an array +results in an error. There are two alternatives: + + 1) use the ufunc functions logical_and() and logical_or(). + 2) use the bitwise operators & and \\|. The drawback of these is that if + the arguments to these operators are not boolean arrays, the result is + likely incorrect. On the other hand, most usages of logical_and and + logical_or are with boolean arrays. As long as one is careful, this is + a convenient way to apply these operators. + +""" diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5dff02d772067f69a3cbbcf79c5274b26301a0f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a9c33718a1a2be6907edf4d4a167009f051e51e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/hermite.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/hermite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30eb8eb810dadba9ade314a5cdad1ffb857e35c5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/hermite.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c119170978395cd0668bd3533dbc1786cfea67c1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de0e1cd53a84f76dff50ebb38a92c10679d3b227 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/legendre.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/legendre.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9a2d81c6944241fd11c312315bb24e86e22ed47 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/legendre.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8b128f05f58cfcdd07ee9973ee8cee595f80577 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31a7a4f88dc41c0af3f99cd6472423b920f29c15 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-310.pyc b/parrot/lib/python3.10/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00653b6c072301e4b1cc78b611e2112993fa4265 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/numpy/random/LICENSE.md b/parrot/lib/python3.10/site-packages/numpy/random/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..a6cf1b17e99725556ac56ce3661498df1ee2276a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/parrot/lib/python3.10/site-packages/numpy/random/__init__.py b/parrot/lib/python3.10/site-packages/numpy/random/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2e8f99fe3045b9c2b691a8ece67d0f06d9d73b08 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/random/__init__.py @@ -0,0 +1,215 @@ +""" +======================== +Random Number Generation +======================== + +Use ``default_rng()`` to create a `Generator` and call its methods. + +=============== ========================================================= +Generator +--------------- --------------------------------------------------------- +Generator Class implementing all of the random number distributions +default_rng Default constructor for ``Generator`` +=============== ========================================================= + +============================================= === +BitGenerator Streams that work with Generator +--------------------------------------------- --- +MT19937 +PCG64 +PCG64DXSM +Philox +SFC64 +============================================= === + +============================================= === +Getting entropy to initialize a BitGenerator +--------------------------------------------- --- +SeedSequence +============================================= === + + +Legacy +------ + +For backwards compatibility with previous versions of numpy before 1.17, the +various aliases to the global `RandomState` methods are left alone and do not +use the new `Generator` API. + +==================== ========================================================= +Utility functions +-------------------- --------------------------------------------------------- +random Uniformly distributed floats over ``[0, 1)`` +bytes Uniformly distributed random bytes. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +choice Random sample from 1-D array. +==================== ========================================================= + +==================== ========================================================= +Compatibility +functions - removed +in the new API +-------------------- --------------------------------------------------------- +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +random_integers Uniformly distributed integers in a given range. + (deprecated, use ``integers(..., closed=True)`` instead) +random_sample Alias for `random_sample` +randint Uniformly distributed integers in a given range +seed Seed the legacy random number generator. +==================== ========================================================= + +==================== ========================================================= +Univariate +distributions +-------------------- --------------------------------------------------------- +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================== +Multivariate +distributions +-------------------- ---------------------------------------------------------- +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================== + +==================== ========================================================= +Standard +distributions +-------------------- --------------------------------------------------------- +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +-------------------- --------------------------------------------------------- +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + + +""" +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random', + 'random_integers', + 'random_sample', + 'ranf', + 'rayleigh', + 'sample', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf', +] + +# add these for module-freeze analysis (like PyInstaller) +from . import _pickle +from . import _common +from . import _bounded_integers + +from ._generator import Generator, default_rng +from .bit_generator import SeedSequence, BitGenerator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .mtrand import * + +__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] + + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this + function's entire purpose is to return a newly allocated RandomState whose + state pickle can set. Consequently the RandomState returned by this function + is a freshly allocated copy with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/parrot/lib/python3.10/site-packages/numpy/random/_common.pxd b/parrot/lib/python3.10/site-packages/numpy/random/_common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0de4456d778f409f63d237d53eb083bf2c9949ae --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/random/_common.pxd @@ -0,0 +1,107 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +import numpy as np +cimport numpy as np + +from numpy.random cimport bitgen_t + +cdef double POISSON_LAM_MAX +cdef double LEGACY_POISSON_LAM_MAX +cdef uint64_t MAXSIZE + +cdef enum ConstraintType: + CONS_NONE + CONS_NON_NEGATIVE + CONS_POSITIVE + CONS_POSITIVE_NOT_NAN + CONS_BOUNDED_0_1 + CONS_BOUNDED_GT_0_1 + CONS_BOUNDED_LT_0_1 + CONS_GT_1 + CONS_GTE_1 + CONS_POISSON + LEGACY_CONS_POISSON + LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG + +ctypedef ConstraintType constraint_type + +cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) +cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) +cdef object prepare_cffi(bitgen_t *bitgen) +cdef object prepare_ctypes(bitgen_t *bitgen) +cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 + +cdef extern from "include/aligned_malloc.h": + cdef void *PyArray_realloc_aligned(void *p, size_t n) + cdef void *PyArray_malloc_aligned(size_t n) + cdef void *PyArray_calloc_aligned(size_t n, size_t s) + cdef void PyArray_free_aligned(void *p) + +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil +ctypedef double (*random_double_0)(void *state) noexcept nogil +ctypedef double (*random_double_1)(void *state, double a) noexcept nogil +ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil +ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil + +ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil +ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil +ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil + +ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil +ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil +ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil +ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil +ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil +ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil + +ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil +ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil + +ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil +ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil + +cdef double kahan_sum(double *darr, np.npy_intp n) noexcept + +cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil: + return (rnd >> 11) * (1.0 / 9007199254740992.0) + +cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object wrap_int(object val, object bits) + +cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) + +cdef validate_output_shape(iter_shape, np.ndarray output) + +cdef object cont(void *func, void *state, object size, object lock, int narg, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint, + object out) + +cdef object disc(void *func, void *state, object size, object lock, + int narg_double, int narg_int64, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint) + +cdef object cont_f(void *func, bitgen_t *state, object size, object lock, + object a, object a_name, constraint_type a_constraint, + object out) + +cdef object cont_broadcast_3(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) + +cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/parrot/lib/python3.10/site-packages/numpy/random/_mt19937.pyi b/parrot/lib/python3.10/site-packages/numpy/random/_mt19937.pyi new file mode 100644 index 0000000000000000000000000000000000000000..600411d5f6412dd6c661a5f4107bc118c734284b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/random/_mt19937.pyi @@ -0,0 +1,23 @@ +from typing import TypedDict + +from numpy import uint32 +from numpy.typing import NDArray +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _MT19937Internal(TypedDict): + key: NDArray[uint32] + pos: int + +class _MT19937State(TypedDict): + bit_generator: str + state: _MT19937Internal + +class MT19937(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... + def jumped(self, jumps: int = ...) -> MT19937: ... + @property + def state(self) -> _MT19937State: ... + @state.setter + def state(self, value: _MT19937State) -> None: ... diff --git a/parrot/lib/python3.10/site-packages/numpy/random/_pcg64.pyi b/parrot/lib/python3.10/site-packages/numpy/random/_pcg64.pyi new file mode 100644 index 0000000000000000000000000000000000000000..470aee867493b48817670f7c4ff7b24d8be31f26 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/random/_pcg64.pyi @@ -0,0 +1,42 @@ +from typing import TypedDict + +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _PCG64Internal(TypedDict): + state: int + inc: int + +class _PCG64State(TypedDict): + bit_generator: str + state: _PCG64Internal + has_uint32: int + uinteger: int + +class PCG64(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/parrot/lib/python3.10/site-packages/numpy/random/_philox.pyi b/parrot/lib/python3.10/site-packages/numpy/random/_philox.pyi new file mode 100644 index 0000000000000000000000000000000000000000..485f3bc82decec41852f677446e65866c347a2eb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/random/_philox.pyi @@ -0,0 +1,37 @@ +from typing import TypedDict + +from numpy import uint64 +from numpy.typing import NDArray +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _PhiloxInternal(TypedDict): + counter: NDArray[uint64] + key: NDArray[uint64] + +class _PhiloxState(TypedDict): + bit_generator: str + state: _PhiloxInternal + buffer: NDArray[uint64] + buffer_pos: int + has_uint32: int + uinteger: int + +class Philox(BitGenerator): + def __init__( + self, + seed: None | _ArrayLikeInt_co | SeedSequence = ..., + counter: None | _ArrayLikeInt_co = ..., + key: None | _ArrayLikeInt_co = ..., + ) -> None: ... + @property + def state( + self, + ) -> _PhiloxState: ... + @state.setter + def state( + self, + value: _PhiloxState, + ) -> None: ... + def jumped(self, jumps: int = ...) -> Philox: ... + def advance(self, delta: int) -> Philox: ... diff --git a/parrot/lib/python3.10/site-packages/numpy/random/_sfc64.pyi b/parrot/lib/python3.10/site-packages/numpy/random/_sfc64.pyi new file mode 100644 index 0000000000000000000000000000000000000000..09ea4113978941794fde435c32154d48cbddb8db --- /dev/null +++ b/parrot/lib/python3.10/site-packages/numpy/random/_sfc64.pyi @@ -0,0 +1,26 @@ +from typing import TypedDict + +from numpy import uint64 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import NDArray, _ArrayLikeInt_co + +class _SFC64Internal(TypedDict): + state: NDArray[uint64] + +class _SFC64State(TypedDict): + bit_generator: str + state: _SFC64Internal + has_uint32: int + uinteger: int + +class SFC64(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + @property + def state( + self, + ) -> _SFC64State: ... + @state.setter + def state( + self, + value: _SFC64State, + ) -> None: ... diff --git a/parrot/lib/python3.10/site-packages/pyparsing/__pycache__/core.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyparsing/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1093fcb56f18bb98b42a4258a4c6b21f6a0a02d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyparsing/__pycache__/core.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87cfa16b900ac61aa41874cdae23d6be9d811d04e73c7669ffa9427a66a60e76 +size 187940 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9493e246b43e591bcb67d6e08f70f8b65c3b1d4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _conj_physical(const at::Tensor & self); +TORCH_API at::Tensor & _conj_physical_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _conj_physical_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13794a49fb9aeec975f9ff6a3a07d9f38355dc88 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false); +TORCH_API at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5fa5d2c4522e7bc1557e9502771609baacabcb53 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _cufft_get_plan_cache_max_size(at::DeviceIndex device_index); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..88f92f38741daebaaf8c8ab9c829fc9cca41ffd0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8779ef3497b24aa118f5c4e1928b339d26798917 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _make_per_channel_quantized_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis); +TORCH_API at::Tensor & _make_per_channel_quantized_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback.h new file mode 100644 index 0000000000000000000000000000000000000000..118bc7e28e97694462701702225713f24b5ab9ab --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor +inline at::Tensor _test_functorch_fallback(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_test_functorch_fallback::call(self, other); +} + +// aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _test_functorch_fallback_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_test_functorch_fallback_out::call(self, other, out); +} +// aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _test_functorch_fallback_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::_test_functorch_fallback_out::call(self, other, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d56f1724089761b0f6199365ac874e19d727353b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unsafe_index_put { + using schema = at::Tensor (const at::Tensor &, const c10::List> &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_index_put") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..84fdb2199c47493eef23b0885fbfec73347b5d95 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_bitwise_and_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..784c847e22ec8b0206cc06767bf927ddfd0b14cf --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & cauchy_(at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..705b2331d94b773e564c9161515abd5ba4afd915 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_out_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc.h new file mode 100644 index 0000000000000000000000000000000000000000..fe78cc24db92352a6b0ef4c8e5b6fc3606fce970 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor +inline at::Tensor conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc::call(self, weight, bias, pad); +} + +// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & conv_tbc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out); +} +// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & conv_tbc_outf(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { + return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..56f5abc309f11f4aeecbf8736e72ac424d926325 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor cudnn_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups); +TORCH_API at::Tensor cudnn_convolution_relu_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfftn.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfftn.h new file mode 100644 index 0000000000000000000000000000000000000000..252d08594b292076c31efc6e86e90637f47b8132 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfftn.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_hfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_hfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_hfftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_hfftn(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn::call(self, s, dim, norm); + } +} + +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_hfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_hfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_hfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_hfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_hfftn_symint_out(const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_hfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_hfftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_hfftn_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfftn_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft.h new file mode 100644 index 0000000000000000000000000000000000000000..02e9a00d7e15fbc3046fb0f9ff1957fbcbe9f785 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_ifft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ifft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_ifft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft::call(self, n, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ifft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft::call(self, n, dim, norm); + } +} + +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft_out::call(self, n, dim, norm, out); + } +} + +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft_out::call(self, n, dim, norm, out); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f5467d39cbc3b655e1a8398078b0e8331f49e6d3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fractional_max_pool3d_out_cpu : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; +struct TORCH_API structured_fractional_max_pool3d_out_cuda : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..54e866da05dfb7c5afa762c43ba89342cd12913a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06a867a3f36a53486cbb1c50d2a9a72ab62924d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..74e1a74747a23599d524de477ea2d2b45e7d5b86 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API less_equal_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API less_equal_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_equal_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API less_equal_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API less_equal__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_equal__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2912880a9d3e0cc7432101940632e1326bb65185 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_cross_out : public at::meta::structured_linalg_cross { +void impl(const at::Tensor & self, const at::Tensor & other, int64_t dim, const at::Tensor & out); +}; +TORCH_API at::Tensor linalg_cross_zerotensor(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h new file mode 100644 index 0000000000000000000000000000000000000000..6dc0725ce8ba645007fa4aa9f18fd1510e048960 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..c2749f33289af24060ff34e8f178f885e3b2ee2f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..295692e5314e088bbf2ef8160ccdadce1a7700a1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1855d87c2d525d910ac31ce4e4e90ce4a7ee651b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9906a613c8f40c0b52e882b6967263c39b4415f5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor pad_sequence(at::TensorList sequences, bool batch_first=false, double padding_value=0.0); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a50988f59dd2ad87153f3fc790b75643bbceb80d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & poisson_out(const at::Tensor & self, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor _s_poisson_cpu(const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor _s_poisson_cuda(const at::Tensor & self, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/polar_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/polar_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..91c660afadeaf1ee14cdc9089276ea35223c6e0d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/polar_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & polar_out(at::Tensor & out, const at::Tensor & abs, const at::Tensor & angle); +TORCH_API at::Tensor & polar_outf(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/positive_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/positive_native.h new file mode 100644 index 0000000000000000000000000000000000000000..165a50b2a7f2319cb81538c172d6540aaec77bdf --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/positive_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor positive(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5d24a8093e936026b59bf58ba2979a680f95baaa --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_reflection_pad3d_backward_out_cpu : public at::meta::structured_reflection_pad3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +struct TORCH_API structured_reflection_pad3d_backward_out_cuda : public at::meta::structured_reflection_pad3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9c31f1e3624b67ddced1e52cef0b38d25951da1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4690e7cf42eb5e54c14423364b08e04725ee9b5c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0); +TORCH_API at::Tensor thnn_conv2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)); +TORCH_API at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0); +TORCH_API at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & thnn_conv2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)); +TORCH_API at::Tensor & thnn_conv2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e7ef6fa9c860a8fc886482a56242d5e12d811a8a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor uniform(const at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & uniform_out(const at::Tensor & self, double from, double to, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & uniform_(at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & uniform_meta_(at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dfb7dc53d3103c132dd1e5710359ed2572de3df8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a2b60affb84f8526d5ec1ac56a4f6c63c857c76c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor upsample_nearest1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out); +TORCH_API at::Tensor & upsample_nearest1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b8fed149f2d8c95b8e0137a77e88f5fa4dbd008d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_nearest2d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_nearest2d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API upsample_nearest2d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/extension/uuid.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/extension/uuid.h new file mode 100644 index 0000000000000000000000000000000000000000..42bb21cf0b2ed0846f774039f3ef58cc32649d4b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/extension/uuid.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/extension_type.h" + +namespace arrow::extension { + +/// \brief UuidArray stores array of UUIDs. Underlying storage type is +/// FixedSizeBinary(16). +class ARROW_EXPORT UuidArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +/// \brief UuidType is a canonical arrow extension type for UUIDs. +/// UUIDs are stored as FixedSizeBinary(16) with big-endian notation and this +/// does not interpret the bytes in any way. Specific UUID version is not +/// required or guaranteed. +class ARROW_EXPORT UuidType : public ExtensionType { + public: + /// \brief Construct a UuidType. + UuidType() : ExtensionType(fixed_size_binary(16)) {} + + std::string extension_name() const override { return "arrow.uuid"; } + std::string ToString(bool show_metadata = false) const override; + + bool ExtensionEquals(const ExtensionType& other) const override; + + /// Create a UuidArray from ArrayData + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return ""; } + + /// \brief Create a UuidType instance + static Result> Make() { return std::make_shared(); } +}; + +/// \brief Return a UuidType instance. +ARROW_EXPORT std::shared_ptr uuid(); + +} // namespace arrow::extension diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/concurrency.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/concurrency.h new file mode 100644 index 0000000000000000000000000000000000000000..43ceb8debcecb24e0f859b8636057cacfc090bac --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/concurrency.h @@ -0,0 +1,263 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/interfaces.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { +namespace internal { + +template +class SharedLockGuard { + public: + explicit SharedLockGuard(LockType* lock) : lock_(lock) { lock_->LockShared(); } + + ~SharedLockGuard() { lock_->UnlockShared(); } + + protected: + LockType* lock_; +}; + +template +class ExclusiveLockGuard { + public: + explicit ExclusiveLockGuard(LockType* lock) : lock_(lock) { lock_->LockExclusive(); } + + ~ExclusiveLockGuard() { lock_->UnlockExclusive(); } + + protected: + LockType* lock_; +}; + +// Debug concurrency checker that marks "shared" and "exclusive" code sections, +// aborting if the concurrency rules get violated. Does nothing in release mode. +// Note that we intentionally use the same class declaration in debug and +// release builds in order to avoid runtime failures when e.g. loading a +// release-built DLL with a debug-built application, or the reverse. + +class ARROW_EXPORT SharedExclusiveChecker { + public: + SharedExclusiveChecker(); + void LockShared(); + void UnlockShared(); + void LockExclusive(); + void UnlockExclusive(); + + SharedLockGuard shared_guard() { + return SharedLockGuard(this); + } + + ExclusiveLockGuard exclusive_guard() { + return ExclusiveLockGuard(this); + } + + protected: + struct Impl; + std::shared_ptr impl_; +}; + +// Concurrency wrappers for IO classes that check the correctness of +// concurrent calls to various methods. It is not necessary to wrap all +// IO classes with these, only a few core classes that get used in tests. +// +// We're not using virtual inheritance here as virtual bases have poorly +// understood semantic overhead which we'd be passing on to implementers +// and users of these interfaces. Instead, we just duplicate the method +// wrappers between those two classes. + +template +class ARROW_EXPORT InputStreamConcurrencyWrapper : public InputStream { + public: + Status Close() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoClose(); + } + + Status Abort() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoAbort(); + } + + Result Tell() const final { + auto guard = lock_.exclusive_guard(); + return derived()->DoTell(); + } + + Result Read(int64_t nbytes, void* out) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes, out); + } + + Result> Read(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes); + } + + Result Peek(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoPeek(nbytes); + } + + /* + Methods to implement in derived class: + + Status DoClose(); + Result DoTell() const; + Result DoRead(int64_t nbytes, void* out); + Result> DoRead(int64_t nbytes); + + And optionally: + + Status DoAbort() override; + Result DoPeek(int64_t nbytes) override; + + These methods should be protected in the derived class and + InputStreamConcurrencyWrapper declared as a friend with + + friend InputStreamConcurrencyWrapper; + */ + + protected: + // Default implementations. They are virtual because the derived class may + // have derived classes itself. + virtual Status DoAbort() { return derived()->DoClose(); } + + virtual Result DoPeek(int64_t ARROW_ARG_UNUSED(nbytes)) { + return Status::NotImplemented("Peek not implemented"); + } + + Derived* derived() { return ::arrow::internal::checked_cast(this); } + + const Derived* derived() const { + return ::arrow::internal::checked_cast(this); + } + + mutable SharedExclusiveChecker lock_; +}; + +template +class ARROW_EXPORT RandomAccessFileConcurrencyWrapper : public RandomAccessFile { + public: + Status Close() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoClose(); + } + + Status Abort() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoAbort(); + } + + Result Tell() const final { + auto guard = lock_.exclusive_guard(); + return derived()->DoTell(); + } + + Result Read(int64_t nbytes, void* out) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes, out); + } + + Result> Read(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes); + } + + Result Peek(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoPeek(nbytes); + } + + Status Seek(int64_t position) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoSeek(position); + } + + Result GetSize() final { + auto guard = lock_.shared_guard(); + return derived()->DoGetSize(); + } + + // NOTE: ReadAt doesn't use stream pointer, but it is allowed to update it + // (it's the case on Windows when using ReadFileEx). + // So any method that relies on the current position (even if it doesn't + // update it, such as Peek) cannot run in parallel with ReadAt and has + // to use the exclusive_guard. + + Result ReadAt(int64_t position, int64_t nbytes, void* out) final { + auto guard = lock_.shared_guard(); + return derived()->DoReadAt(position, nbytes, out); + } + + Result> ReadAt(int64_t position, int64_t nbytes) final { + auto guard = lock_.shared_guard(); + return derived()->DoReadAt(position, nbytes); + } + + /* + Methods to implement in derived class: + + Status DoClose(); + Result DoTell() const; + Result DoRead(int64_t nbytes, void* out); + Result> DoRead(int64_t nbytes); + Status DoSeek(int64_t position); + Result DoGetSize() + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + Result> DoReadAt(int64_t position, int64_t nbytes); + + And optionally: + + Status DoAbort() override; + Result DoPeek(int64_t nbytes) override; + + These methods should be protected in the derived class and + RandomAccessFileConcurrencyWrapper declared as a friend with + + friend RandomAccessFileConcurrencyWrapper; + */ + + protected: + // Default implementations. They are virtual because the derived class may + // have derived classes itself. + virtual Status DoAbort() { return derived()->DoClose(); } + + virtual Result DoPeek(int64_t ARROW_ARG_UNUSED(nbytes)) { + return Status::NotImplemented("Peek not implemented"); + } + + Derived* derived() { return ::arrow::internal::checked_cast(this); } + + const Derived* derived() const { + return ::arrow::internal::checked_cast(this); + } + + mutable SharedExclusiveChecker lock_; +}; + +} // namespace internal +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h new file mode 100644 index 0000000000000000000000000000000000000000..50d4f2c4dfc90f8ffb8061f68125b24ae82bb7ed --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h @@ -0,0 +1,221 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// IO interface implementations for OS files + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; +class Status; + +namespace io { + +/// \brief An operating system file open in write-only mode. +class ARROW_EXPORT FileOutputStream : public OutputStream { + public: + ~FileOutputStream() override; + + /// \brief Open a local file for writing, truncating any existing file + /// \param[in] path with UTF8 encoding + /// \param[in] append append to existing file, otherwise truncate to 0 bytes + /// \return an open FileOutputStream + /// + /// When opening a new file, any existing file with the indicated path is + /// truncated to 0 bytes, deleting any existing data + static Result> Open(const std::string& path, + bool append = false); + + /// \brief Open a file descriptor for writing. The underlying file isn't + /// truncated. + /// \param[in] fd file descriptor + /// \return an open FileOutputStream + /// + /// The file descriptor becomes owned by the OutputStream, and will be closed + /// on Close() or destruction. + static Result> Open(int fd); + + // OutputStream interface + Status Close() override; + bool closed() const override; + Result Tell() const override; + + // Write bytes to the stream. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + int file_descriptor() const; + + private: + FileOutputStream(); + + class ARROW_NO_EXPORT FileOutputStreamImpl; + std::unique_ptr impl_; +}; + +/// \brief An operating system file open in read-only mode. +/// +/// Reads through this implementation are unbuffered. If many small reads +/// need to be issued, it is recommended to use a buffering layer for good +/// performance. +class ARROW_EXPORT ReadableFile + : public internal::RandomAccessFileConcurrencyWrapper { + public: + ~ReadableFile() override; + + /// \brief Open a local file for reading + /// \param[in] path with UTF8 encoding + /// \param[in] pool a MemoryPool for memory allocations + /// \return ReadableFile instance + static Result> Open( + const std::string& path, MemoryPool* pool = default_memory_pool()); + + /// \brief Open a local file for reading + /// \param[in] fd file descriptor + /// \param[in] pool a MemoryPool for memory allocations + /// \return ReadableFile instance + /// + /// The file descriptor becomes owned by the ReadableFile, and will be closed + /// on Close() or destruction. + static Result> Open( + int fd, MemoryPool* pool = default_memory_pool()); + + bool closed() const override; + + int file_descriptor() const; + + Status WillNeed(const std::vector& ranges) override; + + private: + friend RandomAccessFileConcurrencyWrapper; + + explicit ReadableFile(MemoryPool* pool); + + Status DoClose(); + Result DoTell() const; + Result DoRead(int64_t nbytes, void* buffer); + Result> DoRead(int64_t nbytes); + + /// \brief Thread-safe implementation of ReadAt + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + + /// \brief Thread-safe implementation of ReadAt + Result> DoReadAt(int64_t position, int64_t nbytes); + + Result DoGetSize(); + Status DoSeek(int64_t position); + + class ARROW_NO_EXPORT ReadableFileImpl; + std::unique_ptr impl_; +}; + +/// \brief A file interface that uses memory-mapped files for memory interactions +/// +/// This implementation supports zero-copy reads. The same class is used +/// for both reading and writing. +/// +/// If opening a file in a writable mode, it is not truncated first as with +/// FileOutputStream. +class ARROW_EXPORT MemoryMappedFile : public ReadWriteFileInterface { + public: + ~MemoryMappedFile() override; + + /// Create new file with indicated size, return in read/write mode + static Result> Create(const std::string& path, + int64_t size); + + // mmap() with whole file + static Result> Open(const std::string& path, + FileMode::type mode); + + // mmap() with a region of file, the offset must be a multiple of the page size + static Result> Open(const std::string& path, + FileMode::type mode, + const int64_t offset, + const int64_t length); + + Status Close() override; + + bool closed() const override; + + Result Tell() const override; + + Status Seek(int64_t position) override; + + // Required by RandomAccessFile, copies memory into out. Not thread-safe + Result Read(int64_t nbytes, void* out) override; + + // Zero copy read, moves position pointer. Not thread-safe + Result> Read(int64_t nbytes) override; + + // Zero-copy read, leaves position unchanged. Acquires a reader lock + // for the duration of slice creation (typically very short). Is thread-safe. + Result> ReadAt(int64_t position, int64_t nbytes) override; + + // Raw copy of the memory at specified position. Thread-safe, but + // locks out other readers for the duration of memcpy. Prefer the + // zero copy method + Result ReadAt(int64_t position, int64_t nbytes, void* out) override; + + // Synchronous ReadAsync override + Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes) override; + + Status WillNeed(const std::vector& ranges) override; + + bool supports_zero_copy() const override; + + /// Write data at the current position in the file. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + /// Set the size of the map to new_size. + Status Resize(int64_t new_size); + + /// Write data at a particular position in the file. Thread-safe + Status WriteAt(int64_t position, const void* data, int64_t nbytes) override; + + Result GetSize() override; + + int file_descriptor() const; + + private: + MemoryMappedFile(); + + Status WriteInternal(const void* data, int64_t nbytes); + + class ARROW_NO_EXPORT MemoryMap; + std::shared_ptr memory_map_; +}; + +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h new file mode 100644 index 0000000000000000000000000000000000000000..b36c38c6d48688a793c2588477f97648a8b550c6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h @@ -0,0 +1,362 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cancel.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct ReadRange { + int64_t offset; + int64_t length; + + friend bool operator==(const ReadRange& left, const ReadRange& right) { + return (left.offset == right.offset && left.length == right.length); + } + friend bool operator!=(const ReadRange& left, const ReadRange& right) { + return !(left == right); + } + + bool Contains(const ReadRange& other) const { + return (offset <= other.offset && offset + length >= other.offset + other.length); + } +}; + +/// EXPERIMENTAL: options provider for IO tasks +/// +/// Includes an Executor (which will be used to execute asynchronous reads), +/// a MemoryPool (which will be used to allocate buffers when zero copy reads +/// are not possible), and an external id (in case the executor receives tasks from +/// multiple sources and must distinguish tasks associated with this IOContext). +struct ARROW_EXPORT IOContext { + // No specified executor: will use a global IO thread pool + IOContext() : IOContext(default_memory_pool(), StopToken::Unstoppable()) {} + + explicit IOContext(StopToken stop_token) + : IOContext(default_memory_pool(), std::move(stop_token)) {} + + explicit IOContext(MemoryPool* pool, StopToken stop_token = StopToken::Unstoppable()); + + explicit IOContext(MemoryPool* pool, ::arrow::internal::Executor* executor, + StopToken stop_token = StopToken::Unstoppable(), + int64_t external_id = -1) + : pool_(pool), + executor_(executor), + external_id_(external_id), + stop_token_(std::move(stop_token)) {} + + explicit IOContext(::arrow::internal::Executor* executor, + StopToken stop_token = StopToken::Unstoppable(), + int64_t external_id = -1) + : pool_(default_memory_pool()), + executor_(executor), + external_id_(external_id), + stop_token_(std::move(stop_token)) {} + + MemoryPool* pool() const { return pool_; } + + ::arrow::internal::Executor* executor() const { return executor_; } + + // An application-specific ID, forwarded to executor task submissions + int64_t external_id() const { return external_id_; } + + StopToken stop_token() const { return stop_token_; } + + private: + MemoryPool* pool_; + ::arrow::internal::Executor* executor_; + int64_t external_id_; + StopToken stop_token_; +}; + +class ARROW_EXPORT FileInterface : public std::enable_shared_from_this { + public: + virtual ~FileInterface() = 0; + + /// \brief Close the stream cleanly + /// + /// For writable streams, this will attempt to flush any pending data + /// before releasing the underlying resource. + /// + /// After Close() is called, closed() returns true and the stream is not + /// available for further operations. + virtual Status Close() = 0; + + /// \brief Close the stream asynchronously + /// + /// By default, this will just submit the synchronous Close() to the + /// default I/O thread pool. Subclasses may implement this in a more + /// efficient manner. + virtual Future<> CloseAsync(); + + /// \brief Close the stream abruptly + /// + /// This method does not guarantee that any pending data is flushed. + /// It merely releases any underlying resource used by the stream for + /// its operation. + /// + /// After Abort() is called, closed() returns true and the stream is not + /// available for further operations. + virtual Status Abort(); + + /// \brief Return the position in this stream + virtual Result Tell() const = 0; + + /// \brief Return whether the stream is closed + virtual bool closed() const = 0; + + FileMode::type mode() const { return mode_; } + + protected: + FileInterface() : mode_(FileMode::READ) {} + FileMode::type mode_; + void set_mode(FileMode::type mode) { mode_ = mode; } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(FileInterface); +}; + +class ARROW_EXPORT Seekable { + public: + virtual ~Seekable() = default; + virtual Status Seek(int64_t position) = 0; +}; + +class ARROW_EXPORT Writable { + public: + virtual ~Writable() = default; + + /// \brief Write the given data to the stream + /// + /// This method always processes the bytes in full. Depending on the + /// semantics of the stream, the data may be written out immediately, + /// held in a buffer, or written asynchronously. In the case where + /// the stream buffers the data, it will be copied. To avoid potentially + /// large copies, use the Write variant that takes an owned Buffer. + virtual Status Write(const void* data, int64_t nbytes) = 0; + + /// \brief Write the given data to the stream + /// + /// Since the Buffer owns its memory, this method can avoid a copy if + /// buffering is required. See Write(const void*, int64_t) for details. + virtual Status Write(const std::shared_ptr& data); + + /// \brief Flush buffered bytes, if any + virtual Status Flush(); + + Status Write(std::string_view data); +}; + +class ARROW_EXPORT Readable { + public: + virtual ~Readable() = default; + + /// \brief Read data from current file position. + /// + /// Read at most `nbytes` from the current file position into `out`. + /// The number of bytes read is returned. + virtual Result Read(int64_t nbytes, void* out) = 0; + + /// \brief Read data from current file position. + /// + /// Read at most `nbytes` from the current file position. Less bytes may + /// be read if EOF is reached. This method updates the current file position. + /// + /// In some cases (e.g. a memory-mapped file), this method may avoid a + /// memory copy. + virtual Result> Read(int64_t nbytes) = 0; + + /// EXPERIMENTAL: The IOContext associated with this file. + /// + /// By default, this is the same as default_io_context(), but it may be + /// overridden by subclasses. + virtual const IOContext& io_context() const; +}; + +class ARROW_EXPORT OutputStream : virtual public FileInterface, public Writable { + protected: + OutputStream() = default; +}; + +class ARROW_EXPORT InputStream : virtual public FileInterface, virtual public Readable { + public: + /// \brief Advance or skip stream indicated number of bytes + /// \param[in] nbytes the number to move forward + /// \return Status + Status Advance(int64_t nbytes); + + /// \brief Return zero-copy string_view to upcoming bytes. + /// + /// Do not modify the stream position. The view becomes invalid after + /// any operation on the stream. May trigger buffering if the requested + /// size is larger than the number of buffered bytes. + /// + /// May return NotImplemented on streams that don't support it. + /// + /// \param[in] nbytes the maximum number of bytes to see + virtual Result Peek(int64_t nbytes); + + /// \brief Return true if InputStream is capable of zero copy Buffer reads + /// + /// Zero copy reads imply the use of Buffer-returning Read() overloads. + virtual bool supports_zero_copy() const; + + /// \brief Read and return stream metadata + /// + /// If the stream implementation doesn't support metadata, empty metadata + /// is returned. Note that it is allowed to return a null pointer rather + /// than an allocated empty metadata. + virtual Result> ReadMetadata(); + + /// \brief Read stream metadata asynchronously + virtual Future> ReadMetadataAsync( + const IOContext& io_context); + Future> ReadMetadataAsync(); + + protected: + InputStream() = default; +}; + +class ARROW_EXPORT RandomAccessFile : public InputStream, public Seekable { + public: + /// Necessary because we hold a std::unique_ptr + ~RandomAccessFile() override; + + /// \brief Create an isolated InputStream that reads a segment of a + /// RandomAccessFile. Multiple such stream can be created and used + /// independently without interference + /// \param[in] file a file instance + /// \param[in] file_offset the starting position in the file + /// \param[in] nbytes the extent of bytes to read. The file should have + /// sufficient bytes available + static Result> GetStream( + std::shared_ptr file, int64_t file_offset, int64_t nbytes); + + /// \brief Return the total file size in bytes. + /// + /// This method does not read or move the current file position, so is safe + /// to call concurrently with e.g. ReadAt(). + virtual Result GetSize() = 0; + + /// \brief Read data from given file position. + /// + /// At most `nbytes` bytes are read. The number of bytes read is returned + /// (it can be less than `nbytes` if EOF is reached). + /// + /// This method can be safely called from multiple threads concurrently. + /// It is unspecified whether this method updates the file position or not. + /// + /// The default RandomAccessFile-provided implementation uses Seek() and Read(), + /// but subclasses may override it with a more efficient implementation + /// that doesn't depend on implicit file positioning. + /// + /// \param[in] position Where to read bytes from + /// \param[in] nbytes The number of bytes to read + /// \param[out] out The buffer to read bytes into + /// \return The number of bytes read, or an error + virtual Result ReadAt(int64_t position, int64_t nbytes, void* out); + + /// \brief Read data from given file position. + /// + /// At most `nbytes` bytes are read, but it can be less if EOF is reached. + /// + /// \param[in] position Where to read bytes from + /// \param[in] nbytes The number of bytes to read + /// \return A buffer containing the bytes read, or an error + virtual Result> ReadAt(int64_t position, int64_t nbytes); + + /// EXPERIMENTAL: Read data asynchronously. + virtual Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes); + + /// EXPERIMENTAL: Read data asynchronously, using the file's IOContext. + Future> ReadAsync(int64_t position, int64_t nbytes); + + /// EXPERIMENTAL: Explicit multi-read. + /// \brief Request multiple reads at once + /// + /// The underlying filesystem may optimize these reads by coalescing small reads into + /// large reads or by breaking up large reads into multiple parallel smaller reads. The + /// reads should be issued in parallel if it makes sense for the filesystem. + /// + /// One future will be returned for each input read range. Multiple returned futures + /// may correspond to a single read. Or, a single returned future may be a combined + /// result of several individual reads. + /// + /// \param[in] ranges The ranges to read + /// \return A future that will complete with the data from the requested range is + /// available + virtual std::vector>> ReadManyAsync( + const IOContext&, const std::vector& ranges); + + /// EXPERIMENTAL: Explicit multi-read, using the file's IOContext. + std::vector>> ReadManyAsync( + const std::vector& ranges); + + /// EXPERIMENTAL: Inform that the given ranges may be read soon. + /// + /// Some implementations might arrange to prefetch some of the data. + /// However, no guarantee is made and the default implementation does nothing. + /// For robust prefetching, use ReadAt() or ReadAsync(). + virtual Status WillNeed(const std::vector& ranges); + + protected: + RandomAccessFile(); + + private: + struct ARROW_NO_EXPORT Impl; + std::unique_ptr interface_impl_; +}; + +class ARROW_EXPORT WritableFile : public OutputStream, public Seekable { + public: + virtual Status WriteAt(int64_t position, const void* data, int64_t nbytes) = 0; + + protected: + WritableFile() = default; +}; + +class ARROW_EXPORT ReadWriteFileInterface : public RandomAccessFile, public WritableFile { + protected: + ReadWriteFileInterface() { RandomAccessFile::set_mode(FileMode::READWRITE); } +}; + +/// \brief Return an iterator on an input stream +/// +/// The iterator yields a fixed-size block on each Next() call, except the +/// last block in the stream which may be smaller. +/// Once the end of stream is reached, Next() returns nullptr +/// (unlike InputStream::Read() which returns an empty buffer). +ARROW_EXPORT +Result>> MakeInputStreamIterator( + std::shared_ptr stream, int64_t block_size); + +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..5b760a2b5a9cfe1feca6066edb9a594467bc06fb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h @@ -0,0 +1,213 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Public API for different memory sharing / IO mechanisms + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +namespace io { + +/// \brief An output stream that writes to a resizable buffer +class ARROW_EXPORT BufferOutputStream : public OutputStream { + public: + explicit BufferOutputStream(const std::shared_ptr& buffer); + + /// \brief Create in-memory output stream with indicated capacity using a + /// memory pool + /// \param[in] initial_capacity the initial allocated internal capacity of + /// the OutputStream + /// \param[in,out] pool a MemoryPool to use for allocations + /// \return the created stream + static Result> Create( + int64_t initial_capacity = 4096, MemoryPool* pool = default_memory_pool()); + + ~BufferOutputStream() override; + + // Implement the OutputStream interface + + /// Close the stream, preserving the buffer (retrieve it with Finish()). + Status Close() override; + bool closed() const override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + + /// \cond FALSE + using OutputStream::Write; + /// \endcond + + /// Close the stream and return the buffer + Result> Finish(); + + /// \brief Initialize state of OutputStream with newly allocated memory and + /// set position to 0 + /// \param[in] initial_capacity the starting allocated capacity + /// \param[in,out] pool the memory pool to use for allocations + /// \return Status + Status Reset(int64_t initial_capacity = 1024, MemoryPool* pool = default_memory_pool()); + + int64_t capacity() const { return capacity_; } + + private: + BufferOutputStream(); + + // Ensures there is sufficient space available to write nbytes + Status Reserve(int64_t nbytes); + + std::shared_ptr buffer_; + bool is_open_; + int64_t capacity_; + int64_t position_; + uint8_t* mutable_data_; +}; + +/// \brief A helper class to track the size of allocations +/// +/// Writes to this stream do not copy or retain any data, they just bump +/// a size counter that can be later used to know exactly which data size +/// needs to be allocated for actual writing. +class ARROW_EXPORT MockOutputStream : public OutputStream { + public: + MockOutputStream() : extent_bytes_written_(0), is_open_(true) {} + + // Implement the OutputStream interface + Status Close() override; + bool closed() const override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + int64_t GetExtentBytesWritten() const { return extent_bytes_written_; } + + private: + int64_t extent_bytes_written_; + bool is_open_; +}; + +/// \brief An output stream that writes into a fixed-size mutable buffer +class ARROW_EXPORT FixedSizeBufferWriter : public WritableFile { + public: + /// Input buffer must be mutable, will abort if not + explicit FixedSizeBufferWriter(const std::shared_ptr& buffer); + ~FixedSizeBufferWriter() override; + + Status Close() override; + bool closed() const override; + Status Seek(int64_t position) override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + Status WriteAt(int64_t position, const void* data, int64_t nbytes) override; + + void set_memcopy_threads(int num_threads); + void set_memcopy_blocksize(int64_t blocksize); + void set_memcopy_threshold(int64_t threshold); + + protected: + class FixedSizeBufferWriterImpl; + std::unique_ptr impl_; +}; + +/// \class BufferReader +/// \brief Random access zero-copy reads on an arrow::Buffer +class ARROW_EXPORT BufferReader + : public internal::RandomAccessFileConcurrencyWrapper { + public: + /// \brief Instantiate from std::shared_ptr. + /// + /// This is a zero-copy constructor. + explicit BufferReader(std::shared_ptr buffer); + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + explicit BufferReader(const Buffer& buffer); + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + BufferReader(const uint8_t* data, int64_t size); + + /// \brief Instantiate from std::string_view. Does not own data + /// \deprecated Deprecated in 14.0.0. Use FromString or + /// BufferReader(std::shared_ptr buffer) instead. + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + explicit BufferReader(std::string_view data); + + /// \brief Instantiate from std::string. Owns data. + static std::unique_ptr FromString(std::string data); + + bool closed() const override; + + bool supports_zero_copy() const override; + + std::shared_ptr buffer() const { return buffer_; } + + // Synchronous ReadAsync override + Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes) override; + Status WillNeed(const std::vector& ranges) override; + + protected: + friend RandomAccessFileConcurrencyWrapper; + + Status DoClose(); + + Result DoRead(int64_t nbytes, void* buffer); + Result> DoRead(int64_t nbytes); + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + Result> DoReadAt(int64_t position, int64_t nbytes); + Result DoPeek(int64_t nbytes) override; + + Result DoTell() const; + Status DoSeek(int64_t position); + Result DoGetSize(); + + Status CheckClosed() const { + if (!is_open_) { + return Status::Invalid("Operation forbidden on closed BufferReader"); + } + return Status::OK(); + } + + std::shared_ptr buffer_; + const uint8_t* data_; + int64_t size_; + int64_t position_; + bool is_open_; +}; + +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h new file mode 100644 index 0000000000000000000000000000000000000000..fdcc56dfa6af622fcfd9fd10984c1d0a87414149 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Slow stream implementations, mainly for testing and benchmarking + +#pragma once + +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class Status; + +namespace io { + +class ARROW_EXPORT LatencyGenerator { + public: + virtual ~LatencyGenerator(); + + void Sleep(); + + virtual double NextLatency() = 0; + + static std::shared_ptr Make(double average_latency); + static std::shared_ptr Make(double average_latency, int32_t seed); +}; + +// XXX use ConcurrencyWrapper? It could increase chances of finding a race. + +template +class SlowInputStreamBase : public StreamType { + public: + SlowInputStreamBase(std::shared_ptr stream, + std::shared_ptr latencies) + : stream_(std::move(stream)), latencies_(std::move(latencies)) {} + + SlowInputStreamBase(std::shared_ptr stream, double average_latency) + : stream_(std::move(stream)), latencies_(LatencyGenerator::Make(average_latency)) {} + + SlowInputStreamBase(std::shared_ptr stream, double average_latency, + int32_t seed) + : stream_(std::move(stream)), + latencies_(LatencyGenerator::Make(average_latency, seed)) {} + + protected: + std::shared_ptr stream_; + std::shared_ptr latencies_; +}; + +/// \brief An InputStream wrapper that makes reads slower. +/// +/// Read() calls are made slower by an average latency (in seconds). +/// Actual latencies form a normal distribution closely centered +/// on the average latency. +/// Other calls are forwarded directly. +class ARROW_EXPORT SlowInputStream : public SlowInputStreamBase { + public: + ~SlowInputStream() override; + + using SlowInputStreamBase::SlowInputStreamBase; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + Result Peek(int64_t nbytes) override; + + Result Tell() const override; +}; + +/// \brief A RandomAccessFile wrapper that makes reads slower. +/// +/// Similar to SlowInputStream, but allows random access and seeking. +class ARROW_EXPORT SlowRandomAccessFile : public SlowInputStreamBase { + public: + ~SlowRandomAccessFile() override; + + using SlowInputStreamBase::SlowInputStreamBase; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + Result ReadAt(int64_t position, int64_t nbytes, void* out) override; + Result> ReadAt(int64_t position, int64_t nbytes) override; + Result Peek(int64_t nbytes) override; + + Result GetSize() override; + Status Seek(int64_t position) override; + Result Tell() const override; +}; + +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h new file mode 100644 index 0000000000000000000000000000000000000000..9484ac7712427733862ecbc7d9ee932c5dfc0907 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +// Output stream that just writes to stdout. +class ARROW_EXPORT StdoutStream : public OutputStream { + public: + StdoutStream(); + ~StdoutStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + + private: + int64_t pos_; +}; + +// Output stream that just writes to stderr. +class ARROW_EXPORT StderrStream : public OutputStream { + public: + StderrStream(); + ~StderrStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + + private: + int64_t pos_; +}; + +// Input stream that just reads from stdin. +class ARROW_EXPORT StdinStream : public InputStream { + public: + StdinStream(); + ~StdinStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Result Read(int64_t nbytes, void* out) override; + + Result> Read(int64_t nbytes) override; + + private: + int64_t pos_; +}; + +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/test_common.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..9abaef1a665366b841d78788f7736257716dfe31 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/test_common.h @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace io { + +class MemoryMappedFile; + +ARROW_TESTING_EXPORT +void AssertFileContents(const std::string& path, const std::string& contents); + +ARROW_TESTING_EXPORT bool FileExists(const std::string& path); + +ARROW_TESTING_EXPORT Status PurgeLocalFileFromOsCache(const std::string& path); + +ARROW_TESTING_EXPORT +Status ZeroMemoryMap(MemoryMappedFile* file); + +class ARROW_TESTING_EXPORT MemoryMapFixture { + public: + void TearDown(); + + void CreateFile(const std::string& path, int64_t size); + + Result> InitMemoryMap(int64_t size, + const std::string& path); + + void AppendFile(const std::string& path); + + private: + std::vector tmp_files_; +}; + +class ARROW_TESTING_EXPORT TrackedRandomAccessFile : public io::RandomAccessFile { + public: + virtual int64_t num_reads() const = 0; + virtual int64_t bytes_read() const = 0; + virtual const std::vector& get_read_ranges() const = 0; + static std::unique_ptr Make(io::RandomAccessFile* target); +}; + +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..a1b9e626bba289a030d87d0a14bfa2f1fb2dc29d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct FileMode { + enum type { READ, WRITE, READWRITE }; +}; + +struct IOContext; +struct CacheOptions; + +/// EXPERIMENTAL: convenience global singleton for default IOContext settings +ARROW_EXPORT +const IOContext& default_io_context(); + +/// \brief Get the capacity of the global I/O thread pool +/// +/// Return the number of worker threads in the thread pool to which +/// Arrow dispatches various I/O-bound tasks. This is an ideal number, +/// not necessarily the exact number of threads at a given point in time. +/// +/// You can change this number using SetIOThreadPoolCapacity(). +ARROW_EXPORT int GetIOThreadPoolCapacity(); + +/// \brief Set the capacity of the global I/O thread pool +/// +/// Set the number of worker threads in the thread pool to which +/// Arrow dispatches various I/O-bound tasks. +/// +/// The current number is returned by GetIOThreadPoolCapacity(). +ARROW_EXPORT Status SetIOThreadPoolCapacity(int threads); + +class FileInterface; +class Seekable; +class Writable; +class Readable; +class OutputStream; +class FileOutputStream; +class InputStream; +class ReadableFile; +class RandomAccessFile; +class MemoryMappedFile; +class WritableFile; +class ReadWriteFileInterface; + +class LatencyGenerator; + +class BufferOutputStream; +class BufferReader; +class CompressedInputStream; +class CompressedOutputStream; +class BufferedInputStream; +class BufferedOutputStream; + +} // namespace io +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..1187037aed29e2cc5910e156c260fc9d9d81bff5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" + +namespace arrow { + +class Decimal128; +class Decimal256; + +namespace py { + +class OwnedRef; + +// +// Python Decimal support +// + +namespace internal { + +// \brief Import the Python Decimal type +ARROW_PYTHON_EXPORT +Status ImportDecimalType(OwnedRef* decimal_type); + +// \brief Convert a Python Decimal object to a C++ string +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[out] The string representation of the Python Decimal instance +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status PythonDecimalToString(PyObject* python_decimal, std::string* out); + +// \brief Convert a C++ std::string to a Python Decimal instance +// \param[in] decimal_constructor The decimal type object +// \param[in] decimal_string A decimal string +// \return An instance of decimal.Decimal +ARROW_PYTHON_EXPORT +PyObject* DecimalFromString(PyObject* decimal_constructor, + const std::string& decimal_string); + +// \brief Convert a Python decimal to an Arrow Decimal128 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal128* out); + +// \brief Convert a Python object to an Arrow Decimal128 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal128* out); + +// \brief Convert a Python decimal to an Arrow Decimal256 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal256* out); + +// \brief Convert a Python object to an Arrow Decimal256 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal256* out); + +// \brief Check whether obj is an instance of Decimal +ARROW_PYTHON_EXPORT +bool PyDecimal_Check(PyObject* obj); + +// \brief Check whether obj is nan. This function will abort the program if the argument +// is not a Decimal instance +ARROW_PYTHON_EXPORT +bool PyDecimal_ISNAN(PyObject* obj); + +// \brief Helper class to track and update the precision and scale of a decimal +class ARROW_PYTHON_EXPORT DecimalMetadata { + public: + DecimalMetadata(); + DecimalMetadata(int32_t precision, int32_t scale); + + // \brief Adjust the precision and scale of a decimal type given a new precision and a + // new scale \param[in] suggested_precision A candidate precision \param[in] + // suggested_scale A candidate scale \return The status of the operation + Status Update(int32_t suggested_precision, int32_t suggested_scale); + + // \brief A convenient interface for updating the precision and scale based on a Python + // Decimal object \param object A Python Decimal object \return The status of the + // operation + Status Update(PyObject* object); + + int32_t precision() const { return precision_; } + int32_t scale() const { return scale_; } + + private: + int32_t precision_; + int32_t scale_; +}; + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/__pycache__/kqueue.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/__pycache__/kqueue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00b84a976a5030210061047302621761da9daaf4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/__pycache__/kqueue.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/__pycache__/read_directory_changes.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/__pycache__/read_directory_changes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c903bec601770a32929ddc678926b89a2675a58 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/__pycache__/read_directory_changes.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/__init__.py b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e1b25803132190a2338f1f95ae6910295d2e473 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/__init__.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011 Yesudeep Mangalapilly +# Copyright 2012 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +:module: watchdog.utils +:synopsis: Utility classes and functions. +:author: yesudeep@google.com (Yesudeep Mangalapilly) + +Classes +------- +.. autoclass:: BaseThread + :members: + :show-inheritance: + :inherited-members: + +""" +import os +import sys +import threading +from wandb_watchdog.utils import platform +from wandb_watchdog.utils.compat import Event + + +if sys.version_info[0] == 2 and platform.is_windows(): + # st_ino is not implemented in os.stat on this platform + import win32stat + stat = win32stat.stat +else: + stat = os.stat + + +def has_attribute(ob, attribute): + """ + :func:`hasattr` swallows exceptions. :func:`has_attribute` tests a Python object for the + presence of an attribute. + + :param ob: + object to inspect + :param attribute: + ``str`` for the name of the attribute. + """ + return getattr(ob, attribute, None) is not None + + +class UnsupportedLibc(Exception): + pass + + +class BaseThread(threading.Thread): + """ Convenience class for creating stoppable threads. """ + + def __init__(self): + threading.Thread.__init__(self) + self.daemon = True + self._stopped_event = Event() + + @property + def stopped_event(self): + return self._stopped_event + + def should_keep_running(self): + """Determines whether the thread should continue running.""" + return not self._stopped_event.is_set() + + def on_thread_stop(self): + """Override this method instead of :meth:`stop()`. + :meth:`stop()` calls this method. + + This method is called immediately after the thread is signaled to stop. + """ + pass + + def stop(self): + """Signals the thread to stop.""" + self._stopped_event.set() + self.on_thread_stop() + + def on_thread_start(self): + """Override this method instead of :meth:`start()`. :meth:`start()` + calls this method. + + This method is called right before this thread is started and this + object’s run() method is invoked. + """ + pass + + def start(self): + self.on_thread_start() + threading.Thread.start(self) + + +def load_module(module_name): + """Imports a module given its name and returns a handle to it.""" + try: + __import__(module_name) + except ImportError: + raise ImportError('No module named %s' % module_name) + return sys.modules[module_name] + + +def load_class(dotted_path): + """Loads and returns a class definition provided a dotted path + specification the last part of the dotted path is the class name + and there is at least one module name preceding the class name. + + Notes: + You will need to ensure that the module you are trying to load + exists in the Python path. + + Examples: + - module.name.ClassName # Provided module.name is in the Python path. + - module.ClassName # Provided module is in the Python path. + + What won't work: + - ClassName + - modle.name.ClassName # Typo in module name. + - module.name.ClasNam # Typo in classname. + """ + dotted_path_split = dotted_path.split('.') + if len(dotted_path_split) > 1: + klass_name = dotted_path_split[-1] + module_name = '.'.join(dotted_path_split[:-1]) + + module = load_module(module_name) + if has_attribute(module, klass_name): + klass = getattr(module, klass_name) + return klass + # Finally create and return an instance of the class + # return klass(*args, **kwargs) + else: + raise AttributeError('Module %s does not have class attribute %s' % ( + module_name, klass_name)) + else: + raise ValueError( + 'Dotted module path %s must contain a module name and a classname' % dotted_path) diff --git a/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/compat.py b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..0f6e7947b924a0bd07a99b7e6523c8a161beac42 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/compat.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2014 Thomas Amland +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +__all__ = ['queue', 'Event'] + +try: + import queue +except ImportError: + import Queue as queue + + +if sys.version_info < (2, 7): + from watchdog.utils.event_backport import Event +else: + from threading import Event \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/decorators.py b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..abb325c1c1028746cf2a9a1a99bf81432bbae657 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/decorators.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Most of this code was obtained from the Python documentation online. + +"""Decorator utility functions. + +decorators: +- synchronized +- propertyx +- accepts +- returns +- singleton +- attrs +- deprecated +""" + +import functools +import warnings +import threading +import sys + + +def synchronized(lock=None): + """Decorator that synchronizes a method or a function with a mutex lock. + + Example usage: + + @synchronized() + def operation(self, a, b): + ... + """ + if lock is None: + lock = threading.Lock() + + def wrapper(function): + def new_function(*args, **kwargs): + lock.acquire() + try: + return function(*args, **kwargs) + finally: + lock.release() + + return new_function + + return wrapper + + +def propertyx(function): + """Decorator to easily create properties in classes. + + Example: + + class Angle(object): + def __init__(self, rad): + self._rad = rad + + @property + def rad(): + def fget(self): + return self._rad + def fset(self, angle): + if isinstance(angle, Angle): + angle = angle.rad + self._rad = float(angle) + + Arguments: + - `function`: The function to be decorated. + """ + keys = ('fget', 'fset', 'fdel') + func_locals = {'doc': function.__doc__} + + def probe_func(frame, event, arg): + if event == 'return': + locals = frame.f_locals + func_locals.update(dict((k, locals.get(k)) for k in keys)) + sys.settrace(None) + return probe_func + + sys.settrace(probe_func) + function() + return property(**func_locals) + + +def accepts(*types): + """Decorator to ensure that the decorated function accepts the given types as arguments. + + Example: + @accepts(int, (int,float)) + @returns((int,float)) + def func(arg1, arg2): + return arg1 * arg2 + """ + + def check_accepts(f): + assert len(types) == f.__code__.co_argcount + + def new_f(*args, **kwds): + for (a, t) in zip(args, types): + assert isinstance(a, t),\ + "arg %r does not match %s" % (a, t) + return f(*args, **kwds) + + new_f.__name__ = f.__name__ + return new_f + + return check_accepts + + +def returns(rtype): + """Decorator to ensure that the decorated function returns the given + type as argument. + + Example: + @accepts(int, (int,float)) + @returns((int,float)) + def func(arg1, arg2): + return arg1 * arg2 + """ + + def check_returns(f): + def new_f(*args, **kwds): + result = f(*args, **kwds) + assert isinstance(result, rtype),\ + "return value %r does not match %s" % (result, rtype) + return result + + new_f.__name__ = f.__name__ + return new_f + + return check_returns + + +def singleton(cls): + """Decorator to ensures a class follows the singleton pattern. + + Example: + @singleton + class MyClass: + ... + """ + instances = {} + + def getinstance(): + if cls not in instances: + instances[cls] = cls() + return instances[cls] + + return getinstance + + +def attrs(**kwds): + """Decorator to add attributes to a function. + + Example: + + @attrs(versionadded="2.2", + author="Guido van Rossum") + def mymethod(f): + ... + """ + + def decorate(f): + for k in kwds: + setattr(f, k, kwds[k]) + return f + + return decorate + + +def deprecated(func): + """This is a decorator which can be used to mark functions + as deprecated. It will result in a warning being emitted + when the function is used. + + ## Usage examples ## + @deprecated + def my_func(): + pass + + @other_decorators_must_be_upper + @deprecated + def my_func(): + pass + """ + + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.warn_explicit( + "Call to deprecated function %(funcname)s." % { + 'funcname': func.__name__, + }, + category=DeprecationWarning, + filename=func.__code__.co_filename, + lineno=func.__code__.co_firstlineno + 1 + ) + return func(*args, **kwargs) + + return new_func diff --git a/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/dirsnapshot.py b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/dirsnapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..c321d0ffe45c3b943e4649c1dbeec9931bf0db5d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/dirsnapshot.py @@ -0,0 +1,293 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011 Yesudeep Mangalapilly +# Copyright 2012 Google, Inc. +# Copyright 2014 Thomas Amland +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:module: watchdog.utils.dirsnapshot +:synopsis: Directory snapshots and comparison. +:author: yesudeep@google.com (Yesudeep Mangalapilly) + +.. ADMONITION:: Where are the moved events? They "disappeared" + + This implementation does not take partition boundaries + into consideration. It will only work when the directory + tree is entirely on the same file system. More specifically, + any part of the code that depends on inode numbers can + break if partition boundaries are crossed. In these cases, + the snapshot diff will represent file/directory movement as + created and deleted events. + +Classes +------- +.. autoclass:: DirectorySnapshot + :members: + :show-inheritance: + +.. autoclass:: DirectorySnapshotDiff + :members: + :show-inheritance: + +""" + +import errno +import os +from stat import S_ISDIR +from wandb_watchdog.utils import stat as default_stat + + +class DirectorySnapshotDiff(object): + """ + Compares two directory snapshots and creates an object that represents + the difference between the two snapshots. + + :param ref: + The reference directory snapshot. + :type ref: + :class:`DirectorySnapshot` + :param snapshot: + The directory snapshot which will be compared + with the reference snapshot. + :type snapshot: + :class:`DirectorySnapshot` + """ + + def __init__(self, ref, snapshot): + created = snapshot.paths - ref.paths + deleted = ref.paths - snapshot.paths + + # check that all unchanged paths have the same inode + for path in ref.paths & snapshot.paths: + if ref.inode(path) != snapshot.inode(path): + created.add(path) + deleted.add(path) + + # find moved paths + moved = set() + for path in set(deleted): + inode = ref.inode(path) + new_path = snapshot.path(inode) + if new_path: + # file is not deleted but moved + deleted.remove(path) + moved.add((path, new_path)) + + for path in set(created): + inode = snapshot.inode(path) + old_path = ref.path(inode) + if old_path: + created.remove(path) + moved.add((old_path, path)) + + # find modified paths + # first check paths that have not moved + modified = set() + for path in ref.paths & snapshot.paths: + if ref.inode(path) == snapshot.inode(path): + if ref.mtime(path) != snapshot.mtime(path): + modified.add(path) + + for (old_path, new_path) in moved: + if ref.mtime(old_path) != snapshot.mtime(new_path): + modified.add(old_path) + + self._dirs_created = [path for path in created if snapshot.isdir(path)] + self._dirs_deleted = [path for path in deleted if ref.isdir(path)] + self._dirs_modified = [path for path in modified if ref.isdir(path)] + self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)] + + self._files_created = list(created - set(self._dirs_created)) + self._files_deleted = list(deleted - set(self._dirs_deleted)) + self._files_modified = list(modified - set(self._dirs_modified)) + self._files_moved = list(moved - set(self._dirs_moved)) + + @property + def files_created(self): + """List of files that were created.""" + return self._files_created + + @property + def files_deleted(self): + """List of files that were deleted.""" + return self._files_deleted + + @property + def files_modified(self): + """List of files that were modified.""" + return self._files_modified + + @property + def files_moved(self): + """ + List of files that were moved. + + Each event is a two-tuple the first item of which is the path + that has been renamed to the second item in the tuple. + """ + return self._files_moved + + @property + def dirs_modified(self): + """ + List of directories that were modified. + """ + return self._dirs_modified + + @property + def dirs_moved(self): + """ + List of directories that were moved. + + Each event is a two-tuple the first item of which is the path + that has been renamed to the second item in the tuple. + """ + return self._dirs_moved + + @property + def dirs_deleted(self): + """ + List of directories that were deleted. + """ + return self._dirs_deleted + + @property + def dirs_created(self): + """ + List of directories that were created. + """ + return self._dirs_created + +class DirectorySnapshot(object): + """ + A snapshot of stat information of files in a directory. + + :param path: + The directory path for which a snapshot should be taken. + :type path: + ``str`` + :param recursive: + ``True`` if the entire directory tree should be included in the + snapshot; ``False`` otherwise. + :type recursive: + ``bool`` + :param walker_callback: + .. deprecated:: 0.7.2 + :param stat: + Use custom stat function that returns a stat structure for path. + Currently only st_dev, st_ino, st_mode and st_mtime are needed. + + A function with the signature ``walker_callback(path, stat_info)`` + which will be called for every entry in the directory tree. + :param listdir: + Use custom listdir function. See ``os.listdir`` for details. + """ + + def __init__(self, path, recursive=True, + walker_callback=(lambda p, s: None), + stat=default_stat, + listdir=os.listdir): + self._stat_info = {} + self._inode_to_path = {} + + st = stat(path) + self._stat_info[path] = st + self._inode_to_path[(st.st_ino, st.st_dev)] = path + + def walk(root): + try: + paths = [os.path.join(root, name) for name in listdir(root)] + except OSError as e: + # Directory may have been deleted between finding it in the directory + # list of its parent and trying to delete its contents. If this + # happens we treat it as empty. + if e.errno == errno.ENOENT: + return + else: + raise + entries = [] + for p in paths: + try: + entries.append((p, stat(p))) + except OSError: + continue + for _ in entries: + yield _ + if recursive: + for path, st in entries: + if S_ISDIR(st.st_mode): + for _ in walk(path): + yield _ + + for p, st in walk(path): + i = (st.st_ino, st.st_dev) + self._inode_to_path[i] = p + self._stat_info[p] = st + walker_callback(p, st) + + @property + def paths(self): + """ + Set of file/directory paths in the snapshot. + """ + return set(self._stat_info.keys()) + + def path(self, id): + """ + Returns path for id. None if id is unknown to this snapshot. + """ + return self._inode_to_path.get(id) + + def inode(self, path): + """ Returns an id for path. """ + st = self._stat_info[path] + return (st.st_ino, st.st_dev) + + def isdir(self, path): + return S_ISDIR(self._stat_info[path].st_mode) + + def mtime(self, path): + return self._stat_info[path].st_mtime + + def stat_info(self, path): + """ + Returns a stat information object for the specified path from + the snapshot. + + Attached information is subject to change. Do not use unless + you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`, + :func:`isdir` instead. + + :param path: + The path for which stat information should be obtained + from a snapshot. + """ + return self._stat_info[path] + + def __sub__(self, previous_dirsnap): + """Allow subtracting a DirectorySnapshot object instance from + another. + + :returns: + A :class:`DirectorySnapshotDiff` object. + """ + return DirectorySnapshotDiff(previous_dirsnap, self) + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return str(self._stat_info) diff --git a/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/event_backport.py b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/event_backport.py new file mode 100644 index 0000000000000000000000000000000000000000..5c136e46d54839347c36e7f3ff81ff64f51b0d5b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/event_backport.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Backport of Event from py2.7 (method wait in py2.6 returns None) + +from threading import Condition, Lock + + +class Event(object): + + def __init__(self,): + self.__cond = Condition(Lock()) + self.__flag = False + + def isSet(self): + return self.__flag + + is_set = isSet + + def set(self): + self.__cond.acquire() + try: + self.__flag = True + self.__cond.notify_all() + finally: + self.__cond.release() + + def clear(self): + self.__cond.acquire() + try: + self.__flag = False + finally: + self.__cond.release() + + def wait(self, timeout=None): + self.__cond.acquire() + try: + if not self.__flag: + self.__cond.wait(timeout) + return self.__flag + finally: + self.__cond.release()