diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_bunch.py b/parrot/lib/python3.10/site-packages/scipy/_lib/_bunch.py new file mode 100644 index 0000000000000000000000000000000000000000..bb562e4348f46dc1137afe3d3ce50f1149c85376 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/_bunch.py @@ -0,0 +1,225 @@ +import sys as _sys +from keyword import iskeyword as _iskeyword + + +def _validate_names(typename, field_names, extra_field_names): + """ + Ensure that all the given names are valid Python identifiers that + do not start with '_'. Also check that there are no duplicates + among field_names + extra_field_names. + """ + for name in [typename] + field_names + extra_field_names: + if not isinstance(name, str): + raise TypeError('typename and all field names must be strings') + if not name.isidentifier(): + raise ValueError('typename and all field names must be valid ' + f'identifiers: {name!r}') + if _iskeyword(name): + raise ValueError('typename and all field names cannot be a ' + f'keyword: {name!r}') + + seen = set() + for name in field_names + extra_field_names: + if name.startswith('_'): + raise ValueError('Field names cannot start with an underscore: ' + f'{name!r}') + if name in seen: + raise ValueError(f'Duplicate field name: {name!r}') + seen.add(name) + + +# Note: This code is adapted from CPython:Lib/collections/__init__.py +def _make_tuple_bunch(typename, field_names, extra_field_names=None, + module=None): + """ + Create a namedtuple-like class with additional attributes. + + This function creates a subclass of tuple that acts like a namedtuple + and that has additional attributes. + + The additional attributes are listed in `extra_field_names`. The + values assigned to these attributes are not part of the tuple. + + The reason this function exists is to allow functions in SciPy + that currently return a tuple or a namedtuple to returned objects + that have additional attributes, while maintaining backwards + compatibility. + + This should only be used to enhance *existing* functions in SciPy. + New functions are free to create objects as return values without + having to maintain backwards compatibility with an old tuple or + namedtuple return value. + + Parameters + ---------- + typename : str + The name of the type. + field_names : list of str + List of names of the values to be stored in the tuple. These names + will also be attributes of instances, so the values in the tuple + can be accessed by indexing or as attributes. At least one name + is required. See the Notes for additional restrictions. + extra_field_names : list of str, optional + List of names of values that will be stored as attributes of the + object. See the notes for additional restrictions. + + Returns + ------- + cls : type + The new class. + + Notes + ----- + There are restrictions on the names that may be used in `field_names` + and `extra_field_names`: + + * The names must be unique--no duplicates allowed. + * The names must be valid Python identifiers, and must not begin with + an underscore. + * The names must not be Python keywords (e.g. 'def', 'and', etc., are + not allowed). + + Examples + -------- + >>> from scipy._lib._bunch import _make_tuple_bunch + + Create a class that acts like a namedtuple with length 2 (with field + names `x` and `y`) that will also have the attributes `w` and `beta`: + + >>> Result = _make_tuple_bunch('Result', ['x', 'y'], ['w', 'beta']) + + `Result` is the new class. We call it with keyword arguments to create + a new instance with given values. + + >>> result1 = Result(x=1, y=2, w=99, beta=0.5) + >>> result1 + Result(x=1, y=2, w=99, beta=0.5) + + `result1` acts like a tuple of length 2: + + >>> len(result1) + 2 + >>> result1[:] + (1, 2) + + The values assigned when the instance was created are available as + attributes: + + >>> result1.y + 2 + >>> result1.beta + 0.5 + """ + if len(field_names) == 0: + raise ValueError('field_names must contain at least one name') + + if extra_field_names is None: + extra_field_names = [] + _validate_names(typename, field_names, extra_field_names) + + typename = _sys.intern(str(typename)) + field_names = tuple(map(_sys.intern, field_names)) + extra_field_names = tuple(map(_sys.intern, extra_field_names)) + + all_names = field_names + extra_field_names + arg_list = ', '.join(field_names) + full_list = ', '.join(all_names) + repr_fmt = ''.join(('(', + ', '.join(f'{name}=%({name})r' for name in all_names), + ')')) + tuple_new = tuple.__new__ + _dict, _tuple, _zip = dict, tuple, zip + + # Create all the named tuple methods to be added to the class namespace + + s = f"""\ +def __new__(_cls, {arg_list}, **extra_fields): + return _tuple_new(_cls, ({arg_list},)) + +def __init__(self, {arg_list}, **extra_fields): + for key in self._extra_fields: + if key not in extra_fields: + raise TypeError("missing keyword argument '%s'" % (key,)) + for key, val in extra_fields.items(): + if key not in self._extra_fields: + raise TypeError("unexpected keyword argument '%s'" % (key,)) + self.__dict__[key] = val + +def __setattr__(self, key, val): + if key in {repr(field_names)}: + raise AttributeError("can't set attribute %r of class %r" + % (key, self.__class__.__name__)) + else: + self.__dict__[key] = val +""" + del arg_list + namespace = {'_tuple_new': tuple_new, + '__builtins__': dict(TypeError=TypeError, + AttributeError=AttributeError), + '__name__': f'namedtuple_{typename}'} + exec(s, namespace) + __new__ = namespace['__new__'] + __new__.__doc__ = f'Create new instance of {typename}({full_list})' + __init__ = namespace['__init__'] + __init__.__doc__ = f'Instantiate instance of {typename}({full_list})' + __setattr__ = namespace['__setattr__'] + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + repr_fmt % self._asdict() + + def _asdict(self): + 'Return a new dict which maps field names to their values.' + out = _dict(_zip(self._fields, self)) + out.update(self.__dict__) + return out + + def __getnewargs_ex__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return _tuple(self), self.__dict__ + + # Modify function metadata to help with introspection and debugging + for method in (__new__, __repr__, _asdict, __getnewargs_ex__): + method.__qualname__ = f'{typename}.{method.__name__}' + + # Build-up the class namespace dictionary + # and use type() to build the result class + class_namespace = { + '__doc__': f'{typename}({full_list})', + '_fields': field_names, + '__new__': __new__, + '__init__': __init__, + '__repr__': __repr__, + '__setattr__': __setattr__, + '_asdict': _asdict, + '_extra_fields': extra_field_names, + '__getnewargs_ex__': __getnewargs_ex__, + } + for index, name in enumerate(field_names): + + def _get(self, index=index): + return self[index] + class_namespace[name] = property(_get) + for name in extra_field_names: + + def _get(self, name=name): + return self.__dict__[name] + class_namespace[name] = property(_get) + + result = type(typename, (tuple,), class_namespace) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the named tuple is created. Bypass this step in environments + # where sys._getframe is not defined (Jython for example) or sys._getframe + # is not defined for arguments greater than 0 (IronPython), or where the + # user has specified a particular module. + if module is None: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + result.__module__ = module + __new__.__module__ = module + + return result diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback.py b/parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback.py new file mode 100644 index 0000000000000000000000000000000000000000..1980d06f5489e6633fb611c35bfb56903bd63e7f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback.py @@ -0,0 +1,251 @@ +from . import _ccallback_c + +import ctypes + +PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0] + +ffi = None + +class CData: + pass + +def _import_cffi(): + global ffi, CData + + if ffi is not None: + return + + try: + import cffi + ffi = cffi.FFI() + CData = ffi.CData + except ImportError: + ffi = False + + +class LowLevelCallable(tuple): + """ + Low-level callback function. + + Some functions in SciPy take as arguments callback functions, which + can either be python callables or low-level compiled functions. Using + compiled callback functions can improve performance somewhat by + avoiding wrapping data in Python objects. + + Such low-level functions in SciPy are wrapped in `LowLevelCallable` + objects, which can be constructed from function pointers obtained from + ctypes, cffi, Cython, or contained in Python `PyCapsule` objects. + + .. seealso:: + + Functions accepting low-level callables: + + `scipy.integrate.quad`, `scipy.ndimage.generic_filter`, + `scipy.ndimage.generic_filter1d`, `scipy.ndimage.geometric_transform` + + Usage examples: + + :ref:`ndimage-ccallbacks`, :ref:`quad-callbacks` + + Parameters + ---------- + function : {PyCapsule, ctypes function pointer, cffi function pointer} + Low-level callback function. + user_data : {PyCapsule, ctypes void pointer, cffi void pointer} + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*, + if possible. + + Attributes + ---------- + function + Callback function given. + user_data + User data given. + signature + Signature of the function. + + Methods + ------- + from_cython + Class method for constructing callables from Cython C-exported + functions. + + Notes + ----- + The argument ``function`` can be one of: + + - PyCapsule, whose name contains the C function signature + - ctypes function pointer + - cffi function pointer + + The signature of the low-level callback must match one of those expected + by the routine it is passed to. + + If constructing low-level functions from a PyCapsule, the name of the + capsule must be the corresponding signature, in the format:: + + return_type (arg1_type, arg2_type, ...) + + For example:: + + "void (double)" + "double (double, int *, void *)" + + The context of a PyCapsule passed in as ``function`` is used as ``user_data``, + if an explicit value for ``user_data`` was not given. + + """ + + # Make the class immutable + __slots__ = () + + def __new__(cls, function, user_data=None, signature=None): + # We need to hold a reference to the function & user data, + # to prevent them going out of scope + item = cls._parse_callback(function, user_data, signature) + return tuple.__new__(cls, (item, function, user_data)) + + def __repr__(self): + return f"LowLevelCallable({self.function!r}, {self.user_data!r})" + + @property + def function(self): + return tuple.__getitem__(self, 1) + + @property + def user_data(self): + return tuple.__getitem__(self, 2) + + @property + def signature(self): + return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0)) + + def __getitem__(self, idx): + raise ValueError() + + @classmethod + def from_cython(cls, module, name, user_data=None, signature=None): + """ + Create a low-level callback function from an exported Cython function. + + Parameters + ---------- + module : module + Cython module where the exported function resides + name : str + Name of the exported function + user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*. + + """ + try: + function = module.__pyx_capi__[name] + except AttributeError as e: + message = "Given module is not a Cython module with __pyx_capi__ attribute" + raise ValueError(message) from e + except KeyError as e: + message = f"No function {name!r} found in __pyx_capi__ of the module" + raise ValueError(message) from e + return cls(function, user_data, signature) + + @classmethod + def _parse_callback(cls, obj, user_data=None, signature=None): + _import_cffi() + + if isinstance(obj, LowLevelCallable): + func = tuple.__getitem__(obj, 0) + elif isinstance(obj, PyCFuncPtr): + func, signature = _get_ctypes_func(obj, signature) + elif isinstance(obj, CData): + func, signature = _get_cffi_func(obj, signature) + elif _ccallback_c.check_capsule(obj): + func = obj + else: + raise ValueError("Given input is not a callable or a " + "low-level callable (pycapsule/ctypes/cffi)") + + if isinstance(user_data, ctypes.c_void_p): + context = _get_ctypes_data(user_data) + elif isinstance(user_data, CData): + context = _get_cffi_data(user_data) + elif user_data is None: + context = 0 + elif _ccallback_c.check_capsule(user_data): + context = user_data + else: + raise ValueError("Given user data is not a valid " + "low-level void* pointer (pycapsule/ctypes/cffi)") + + return _ccallback_c.get_raw_capsule(func, signature, context) + + +# +# ctypes helpers +# + +def _get_ctypes_func(func, signature=None): + # Get function pointer + func_ptr = ctypes.cast(func, ctypes.c_void_p).value + + # Construct function signature + if signature is None: + signature = _typename_from_ctypes(func.restype) + " (" + for j, arg in enumerate(func.argtypes): + if j == 0: + signature += _typename_from_ctypes(arg) + else: + signature += ", " + _typename_from_ctypes(arg) + signature += ")" + + return func_ptr, signature + + +def _typename_from_ctypes(item): + if item is None: + return "void" + elif item is ctypes.c_void_p: + return "void *" + + name = item.__name__ + + pointer_level = 0 + while name.startswith("LP_"): + pointer_level += 1 + name = name[3:] + + if name.startswith('c_'): + name = name[2:] + + if pointer_level > 0: + name += " " + "*"*pointer_level + + return name + + +def _get_ctypes_data(data): + # Get voidp pointer + return ctypes.cast(data, ctypes.c_void_p).value + + +# +# CFFI helpers +# + +def _get_cffi_func(func, signature=None): + # Get function pointer + func_ptr = ffi.cast('uintptr_t', func) + + # Get signature + if signature is None: + signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ') + + return func_ptr, signature + + +def _get_cffi_data(data): + # Get pointer + return ffi.cast('uintptr_t', data) diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_gcutils.py b/parrot/lib/python3.10/site-packages/scipy/_lib/_gcutils.py new file mode 100644 index 0000000000000000000000000000000000000000..854ae36228614f3eb8849e9f95abf0dd387b5d35 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/_gcutils.py @@ -0,0 +1,105 @@ +""" +Module for testing automatic garbage collection of objects + +.. autosummary:: + :toctree: generated/ + + set_gc_state - enable or disable garbage collection + gc_state - context manager for given state of garbage collector + assert_deallocated - context manager to check for circular references on object + +""" +import weakref +import gc + +from contextlib import contextmanager +from platform import python_implementation + +__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated'] + + +IS_PYPY = python_implementation() == 'PyPy' + + +class ReferenceError(AssertionError): + pass + + +def set_gc_state(state): + """ Set status of garbage collector """ + if gc.isenabled() == state: + return + if state: + gc.enable() + else: + gc.disable() + + +@contextmanager +def gc_state(state): + """ Context manager to set state of garbage collector to `state` + + Parameters + ---------- + state : bool + True for gc enabled, False for disabled + + Examples + -------- + >>> with gc_state(False): + ... assert not gc.isenabled() + >>> with gc_state(True): + ... assert gc.isenabled() + """ + orig_state = gc.isenabled() + set_gc_state(state) + yield + set_gc_state(orig_state) + + +@contextmanager +def assert_deallocated(func, *args, **kwargs): + """Context manager to check that object is deallocated + + This is useful for checking that an object can be freed directly by + reference counting, without requiring gc to break reference cycles. + GC is disabled inside the context manager. + + This check is not available on PyPy. + + Parameters + ---------- + func : callable + Callable to create object to check + \\*args : sequence + positional arguments to `func` in order to create object to check + \\*\\*kwargs : dict + keyword arguments to `func` in order to create object to check + + Examples + -------- + >>> class C: pass + >>> with assert_deallocated(C) as c: + ... # do something + ... del c + + >>> class C: + ... def __init__(self): + ... self._circular = self # Make circular reference + >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL + ... # do something + ... del c + Traceback (most recent call last): + ... + ReferenceError: Remaining reference(s) to object + """ + if IS_PYPY: + raise RuntimeError("assert_deallocated is unavailable on PyPy") + + with gc_state(False): + obj = func(*args, **kwargs) + ref = weakref.ref(obj) + yield obj + del obj + if ref() is not None: + raise ReferenceError("Remaining reference(s) to object") diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bfb217d3ba8618170d57b5451f369660eb4ede64 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..18e11f349a5c869c5e27e41dafa35145f6a5fae8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so differ diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/_testutils.py b/parrot/lib/python3.10/site-packages/scipy/_lib/_testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..4a830edcdf0cb19d68eb914c98c3b1dcfafab823 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/_testutils.py @@ -0,0 +1,337 @@ +""" +Generic test utilities. + +""" + +import inspect +import os +import re +import shutil +import subprocess +import sys +import sysconfig +from importlib.util import module_from_spec, spec_from_file_location + +import numpy as np +import scipy + +try: + # Need type: ignore[import-untyped] for mypy >= 1.6 + import cython # type: ignore[import-untyped] + from Cython.Compiler.Version import ( # type: ignore[import-untyped] + version as cython_version, + ) +except ImportError: + cython = None +else: + from scipy._lib import _pep440 + required_version = '3.0.8' + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip Cython API tests + cython = None + + +__all__ = ['PytestTester', 'check_free_memory', '_TestPythranFunc', 'IS_MUSL'] + + +IS_MUSL = False +# alternate way is +# from packaging.tags import sys_tags +# _tags = list(sys_tags()) +# if 'musllinux' in _tags[0].platform: +_v = sysconfig.get_config_var('HOST_GNU_TYPE') or '' +if 'musl' in _v: + IS_MUSL = True + + +IS_EDITABLE = 'editable' in scipy.__path__[0] + + +class FPUModeChangeWarning(RuntimeWarning): + """Warning about FPU mode change""" + pass + + +class PytestTester: + """ + Run tests for this namespace + + ``scipy.test()`` runs tests for all of SciPy, with the default settings. + When used from a submodule (e.g., ``scipy.cluster.test()``, only the tests + for that namespace are run. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Whether to run only the fast tests, or also those marked as slow. + Default is 'fast'. + verbose : int, optional + Test output verbosity. Default is 1. + extra_argv : list, optional + Arguments to pass through to Pytest. + doctests : bool, optional + Whether to run doctests or not. Default is False. + coverage : bool, optional + Whether to run tests with code coverage measurements enabled. + Default is False. + tests : list of str, optional + List of module names to run tests for. By default, uses the module + from which the ``test`` function is called. + parallel : int, optional + Run tests in parallel with pytest-xdist, if number given is larger than + 1. Default is 1. + + """ + def __init__(self, module_name): + self.module_name = module_name + + def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False, + coverage=False, tests=None, parallel=None): + import pytest + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + pytest_args = ['--showlocals', '--tb=short'] + + if doctests: + pytest_args += [ + "--doctest-modules", + "--ignore=scipy/interpolate/_interpnd_info.py", + "--ignore=scipy/_lib/array_api_compat", + "--ignore=scipy/_lib/highs", + "--ignore=scipy/_lib/unuran", + "--ignore=scipy/_lib/_gcutils.py", + "--ignore=scipy/_lib/doccer.py", + "--ignore=scipy/_lib/_uarray", + ] + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose and int(verbose) > 1: + pytest_args += ["-" + "v"*(int(verbose)-1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + pytest_args += ["-m", "not slow"] + elif label != "full": + pytest_args += ["-m", label] + + if tests is None: + tests = [self.module_name] + + if parallel is not None and parallel > 1: + if _pytest_has_xdist(): + pytest_args += ['-n', str(parallel)] + else: + import warnings + warnings.warn('Could not run tests in parallel because ' + 'pytest-xdist plugin is not available.', + stacklevel=2) + + pytest_args += ['--pyargs'] + list(tests) + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return (code == 0) + + +class _TestPythranFunc: + ''' + These are situations that can be tested in our pythran tests: + - A function with multiple array arguments and then + other positional and keyword arguments. + - A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`. + Note: list/tuple input is not yet tested! + + `self.arguments`: A dictionary which key is the index of the argument, + value is tuple(array value, all supported dtypes) + `self.partialfunc`: A function used to freeze some non-array argument + that of no interests in the original function + ''' + ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp] + ALL_FLOAT = [np.float32, np.float64] + ALL_COMPLEX = [np.complex64, np.complex128] + + def setup_method(self): + self.arguments = {} + self.partialfunc = None + self.expected = None + + def get_optional_args(self, func): + # get optional arguments with its default value, + # used for testing keywords + signature = inspect.signature(func) + optional_args = {} + for k, v in signature.parameters.items(): + if v.default is not inspect.Parameter.empty: + optional_args[k] = v.default + return optional_args + + def get_max_dtype_list_length(self): + # get the max supported dtypes list length in all arguments + max_len = 0 + for arg_idx in self.arguments: + cur_len = len(self.arguments[arg_idx][1]) + if cur_len > max_len: + max_len = cur_len + return max_len + + def get_dtype(self, dtype_list, dtype_idx): + # get the dtype from dtype_list via index + # if the index is out of range, then return the last dtype + if dtype_idx > len(dtype_list)-1: + return dtype_list[-1] + else: + return dtype_list[dtype_idx] + + def test_all_dtypes(self): + for type_idx in range(self.get_max_dtype_list_length()): + args_array = [] + for arg_idx in self.arguments: + new_dtype = self.get_dtype(self.arguments[arg_idx][1], + type_idx) + args_array.append(self.arguments[arg_idx][0].astype(new_dtype)) + self.pythranfunc(*args_array) + + def test_views(self): + args_array = [] + for arg_idx in self.arguments: + args_array.append(self.arguments[arg_idx][0][::-1][::-1]) + self.pythranfunc(*args_array) + + def test_strided(self): + args_array = [] + for arg_idx in self.arguments: + args_array.append(np.repeat(self.arguments[arg_idx][0], + 2, axis=0)[::2]) + self.pythranfunc(*args_array) + + +def _pytest_has_xdist(): + """ + Check if the pytest-xdist plugin is installed, providing parallel tests + """ + # Check xdist exists without importing, otherwise pytests emits warnings + from importlib.util import find_spec + return find_spec('xdist') is not None + + +def check_free_memory(free_mb): + """ + Check *free_mb* of memory is available, otherwise do pytest.skip + """ + import pytest + + try: + mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM']) + msg = '{} MB memory required, but environment SCIPY_AVAILABLE_MEM={}'.format( + free_mb, os.environ['SCIPY_AVAILABLE_MEM']) + except KeyError: + mem_free = _get_mem_available() + if mem_free is None: + pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM " + "variable to free memory in MB to run the test.") + msg = f'{free_mb} MB memory required, but {mem_free/1e6} MB available' + + if mem_free < free_mb * 1e6: + pytest.skip(msg) + + +def _parse_size(size_str): + suffixes = {'': 1e6, + 'b': 1.0, + 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12, + 'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12, + 'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4} + m = re.match(r'^\s*(\d+)\s*({})\s*$'.format('|'.join(suffixes.keys())), + size_str, + re.I) + if not m or m.group(2) not in suffixes: + raise ValueError("Invalid size string") + + return float(m.group(1)) * suffixes[m.group(2)] + + +def _get_mem_available(): + """ + Get information about memory available, not counting swap. + """ + try: + import psutil + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith('linux'): + info = {} + with open('/proc/meminfo') as f: + for line in f: + p = line.split() + info[p[0].strip(':').lower()] = float(p[1]) * 1e3 + + if 'memavailable' in info: + # Linux >= 3.14 + return info['memavailable'] + else: + return info['memfree'] + info['cached'] + + return None + +def _test_cython_extension(tmp_path, srcdir): + """ + Helper function to test building and importing Cython modules that + make use of the Cython APIs for BLAS, LAPACK, optimize, and special. + """ + import pytest + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + + # build the examples in a temporary directory + mod_name = os.path.split(srcdir)[1] + shutil.copytree(srcdir, tmp_path / mod_name) + build_dir = tmp_path / mod_name / 'tests' / '_cython_examples' + target_dir = build_dir / 'build' + os.makedirs(target_dir, exist_ok=True) + + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see numpy#24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'") + + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--native-file", native_file, + "--vsenv", str(build_dir)], + cwd=target_dir, + ) + else: + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(build_dir)], + cwd=target_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) + + # import without adding the directory to sys.path + suffix = sysconfig.get_config_var('EXT_SUFFIX') + + def load(modname): + so = (target_dir / modname).with_suffix(suffix) + spec = spec_from_file_location(modname, so) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + # test that the module can be imported + return load("extending"), load("extending_cpp") diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py b/parrot/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79712ae1bdb76eb6155e0823ec1992dd28bd0282 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py @@ -0,0 +1,22 @@ +""" +NumPy Array API compatibility library + +This is a small wrapper around NumPy and CuPy that is compatible with the +Array API standard https://data-apis.org/array-api/latest/. See also NEP 47 +https://numpy.org/neps/nep-0047-array-api-standard.html. + +Unlike array_api_strict, this is not a strict minimal implementation of the +Array API, but rather just an extension of the main NumPy namespace with +changes needed to be compliant with the Array API. See +https://numpy.org/doc/stable/reference/array_api.html for a full list of +changes. In particular, unlike array_api_strict, this package does not use a +separate Array object, but rather just uses numpy.ndarray directly. + +Library authors using the Array API may wish to test against array_api_strict +to ensure they are not using functionality outside of the standard, but prefer +this implementation for the default when working with NumPy arrays. + +""" +__version__ = '1.5.1' + +from .common import * # noqa: F401, F403 diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/decorator.py b/parrot/lib/python3.10/site-packages/scipy/_lib/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..02121774d3c2a9407a73366bb3e5915387a571d0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/decorator.py @@ -0,0 +1,399 @@ +# ######################### LICENSE ############################ # + +# Copyright (c) 2005-2015, Michele Simionato +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: + +# Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# Redistributions in bytecode form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +""" +Decorator module, see https://pypi.python.org/pypi/decorator +for the documentation. +""" +import re +import sys +import inspect +import operator +import itertools +import collections + +from inspect import getfullargspec + +__version__ = '4.0.5' + + +def get_init(cls): + return cls.__init__ + + +# getargspec has been deprecated in Python 3.5 +ArgSpec = collections.namedtuple( + 'ArgSpec', 'args varargs varkw defaults') + + +def getargspec(f): + """A replacement for inspect.getargspec""" + spec = getfullargspec(f) + return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults) + + +DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(') + + +# basic functionality +class FunctionMaker: + """ + An object with the ability to create functions with a given signature. + It has attributes name, doc, module, signature, defaults, dict, and + methods update and make. + """ + + # Atomic get-and-increment provided by the GIL + _compile_count = itertools.count() + + def __init__(self, func=None, name=None, signature=None, + defaults=None, doc=None, module=None, funcdict=None): + self.shortsignature = signature + if func: + # func can be a class or a callable, but not an instance method + self.name = func.__name__ + if self.name == '': # small hack for lambda functions + self.name = '_lambda_' + self.doc = func.__doc__ + self.module = func.__module__ + if inspect.isfunction(func): + argspec = getfullargspec(func) + self.annotations = getattr(func, '__annotations__', {}) + for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', + 'kwonlydefaults'): + setattr(self, a, getattr(argspec, a)) + for i, arg in enumerate(self.args): + setattr(self, 'arg%d' % i, arg) + allargs = list(self.args) + allshortargs = list(self.args) + if self.varargs: + allargs.append('*' + self.varargs) + allshortargs.append('*' + self.varargs) + elif self.kwonlyargs: + allargs.append('*') # single star syntax + for a in self.kwonlyargs: + allargs.append('%s=None' % a) + allshortargs.append(f'{a}={a}') + if self.varkw: + allargs.append('**' + self.varkw) + allshortargs.append('**' + self.varkw) + self.signature = ', '.join(allargs) + self.shortsignature = ', '.join(allshortargs) + self.dict = func.__dict__.copy() + # func=None happens when decorating a caller + if name: + self.name = name + if signature is not None: + self.signature = signature + if defaults: + self.defaults = defaults + if doc: + self.doc = doc + if module: + self.module = module + if funcdict: + self.dict = funcdict + # check existence required attributes + assert hasattr(self, 'name') + if not hasattr(self, 'signature'): + raise TypeError('You are decorating a non-function: %s' % func) + + def update(self, func, **kw): + "Update the signature of func with the data in self" + func.__name__ = self.name + func.__doc__ = getattr(self, 'doc', None) + func.__dict__ = getattr(self, 'dict', {}) + func.__defaults__ = getattr(self, 'defaults', ()) + func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) + func.__annotations__ = getattr(self, 'annotations', None) + try: + frame = sys._getframe(3) + except AttributeError: # for IronPython and similar implementations + callermodule = '?' + else: + callermodule = frame.f_globals.get('__name__', '?') + func.__module__ = getattr(self, 'module', callermodule) + func.__dict__.update(kw) + + def make(self, src_templ, evaldict=None, addsource=False, **attrs): + "Make a new function from a given template and update the signature" + src = src_templ % vars(self) # expand name and signature + evaldict = evaldict or {} + mo = DEF.match(src) + if mo is None: + raise SyntaxError('not a valid function template\n%s' % src) + name = mo.group(1) # extract the function name + names = set([name] + [arg.strip(' *') for arg in + self.shortsignature.split(',')]) + for n in names: + if n in ('_func_', '_call_'): + raise NameError(f'{n} is overridden in\n{src}') + if not src.endswith('\n'): # add a newline just for safety + src += '\n' # this is needed in old versions of Python + + # Ensure each generated function has a unique filename for profilers + # (such as cProfile) that depend on the tuple of (, + # , ) being unique. + filename = '' % (next(self._compile_count),) + try: + code = compile(src, filename, 'single') + exec(code, evaldict) + except: # noqa: E722 + print('Error in generated code:', file=sys.stderr) + print(src, file=sys.stderr) + raise + func = evaldict[name] + if addsource: + attrs['__source__'] = src + self.update(func, **attrs) + return func + + @classmethod + def create(cls, obj, body, evaldict, defaults=None, + doc=None, module=None, addsource=True, **attrs): + """ + Create a function from the strings name, signature, and body. + evaldict is the evaluation dictionary. If addsource is true, an + attribute __source__ is added to the result. The attributes attrs + are added, if any. + """ + if isinstance(obj, str): # "name(signature)" + name, rest = obj.strip().split('(', 1) + signature = rest[:-1] # strip a right parens + func = None + else: # a function + name = None + signature = None + func = obj + self = cls(func, name, signature, defaults, doc, module) + ibody = '\n'.join(' ' + line for line in body.splitlines()) + return self.make('def %(name)s(%(signature)s):\n' + ibody, + evaldict, addsource, **attrs) + + +def decorate(func, caller): + """ + decorate(func, caller) decorates a function using a caller. + """ + evaldict = func.__globals__.copy() + evaldict['_call_'] = caller + evaldict['_func_'] = func + fun = FunctionMaker.create( + func, "return _call_(_func_, %(shortsignature)s)", + evaldict, __wrapped__=func) + if hasattr(func, '__qualname__'): + fun.__qualname__ = func.__qualname__ + return fun + + +def decorator(caller, _func=None): + """decorator(caller) converts a caller function into a decorator""" + if _func is not None: # return a decorated function + # this is obsolete behavior; you should use decorate instead + return decorate(_func, caller) + # else return a decorator function + if inspect.isclass(caller): + name = caller.__name__.lower() + callerfunc = get_init(caller) + doc = (f'decorator({caller.__name__}) converts functions/generators into ' + f'factories of {caller.__name__} objects') + elif inspect.isfunction(caller): + if caller.__name__ == '': + name = '_lambda_' + else: + name = caller.__name__ + callerfunc = caller + doc = caller.__doc__ + else: # assume caller is an object with a __call__ method + name = caller.__class__.__name__.lower() + callerfunc = caller.__call__.__func__ + doc = caller.__call__.__doc__ + evaldict = callerfunc.__globals__.copy() + evaldict['_call_'] = caller + evaldict['_decorate_'] = decorate + return FunctionMaker.create( + '%s(func)' % name, 'return _decorate_(func, _call_)', + evaldict, doc=doc, module=caller.__module__, + __wrapped__=caller) + + +# ####################### contextmanager ####################### # + +try: # Python >= 3.2 + from contextlib import _GeneratorContextManager +except ImportError: # Python >= 2.5 + from contextlib import GeneratorContextManager as _GeneratorContextManager + + +class ContextManager(_GeneratorContextManager): + def __call__(self, func): + """Context manager decorator""" + return FunctionMaker.create( + func, "with _self_: return _func_(%(shortsignature)s)", + dict(_self_=self, _func_=func), __wrapped__=func) + + +init = getfullargspec(_GeneratorContextManager.__init__) +n_args = len(init.args) +if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g(*a, **k)) + ContextManager.__init__ = __init__ +elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4 + pass +elif n_args == 4: # (self, gen, args, kwds) Python 3.5 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g, a, k) + ContextManager.__init__ = __init__ + +contextmanager = decorator(ContextManager) + + +# ############################ dispatch_on ############################ # + +def append(a, vancestors): + """ + Append ``a`` to the list of the virtual ancestors, unless it is already + included. + """ + add = True + for j, va in enumerate(vancestors): + if issubclass(va, a): + add = False + break + if issubclass(a, va): + vancestors[j] = a + add = False + if add: + vancestors.append(a) + + +# inspired from simplegeneric by P.J. Eby and functools.singledispatch +def dispatch_on(*dispatch_args): + """ + Factory of decorators turning a function into a generic function + dispatching on the given arguments. + """ + assert dispatch_args, 'No dispatch args passed' + dispatch_str = '(%s,)' % ', '.join(dispatch_args) + + def check(arguments, wrong=operator.ne, msg=''): + """Make sure one passes the expected number of arguments""" + if wrong(len(arguments), len(dispatch_args)): + raise TypeError('Expected %d arguments, got %d%s' % + (len(dispatch_args), len(arguments), msg)) + + def gen_func_dec(func): + """Decorator turning a function into a generic function""" + + # first check the dispatch arguments + argset = set(getfullargspec(func).args) + if not set(dispatch_args) <= argset: + raise NameError('Unknown dispatch arguments %s' % dispatch_str) + + typemap = {} + + def vancestors(*types): + """ + Get a list of sets of virtual ancestors for the given types + """ + check(types) + ras = [[] for _ in range(len(dispatch_args))] + for types_ in typemap: + for t, type_, ra in zip(types, types_, ras): + if issubclass(t, type_) and type_ not in t.__mro__: + append(type_, ra) + return [set(ra) for ra in ras] + + def ancestors(*types): + """ + Get a list of virtual MROs, one for each type + """ + check(types) + lists = [] + for t, vas in zip(types, vancestors(*types)): + n_vas = len(vas) + if n_vas > 1: + raise RuntimeError( + f'Ambiguous dispatch for {t}: {vas}') + elif n_vas == 1: + va, = vas + mro = type('t', (t, va), {}).__mro__[1:] + else: + mro = t.__mro__ + lists.append(mro[:-1]) # discard t and object + return lists + + def register(*types): + """ + Decorator to register an implementation for the given types + """ + check(types) + + def dec(f): + check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) + typemap[types] = f + return f + return dec + + def dispatch_info(*types): + """ + An utility to introspect the dispatch algorithm + """ + check(types) + lst = [tuple(a.__name__ for a in anc) + for anc in itertools.product(*ancestors(*types))] + return lst + + def _dispatch(dispatch_args, *args, **kw): + types = tuple(type(arg) for arg in dispatch_args) + try: # fast path + f = typemap[types] + except KeyError: + pass + else: + return f(*args, **kw) + combinations = itertools.product(*ancestors(*types)) + next(combinations) # the first one has been already tried + for types_ in combinations: + f = typemap.get(types_) + if f is not None: + return f(*args, **kw) + + # else call the default implementation + return func(*args, **kw) + + return FunctionMaker.create( + func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, + dict(_f_=_dispatch), register=register, default=func, + typemap=typemap, vancestors=vancestors, ancestors=ancestors, + dispatch_info=dispatch_info, __wrapped__=func) + + gen_func_dec.__name__ = 'dispatch_on' + dispatch_str + return gen_func_dec diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/doccer.py b/parrot/lib/python3.10/site-packages/scipy/_lib/doccer.py new file mode 100644 index 0000000000000000000000000000000000000000..707f97017b81871e3c495a39e47587cf1f17175c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/doccer.py @@ -0,0 +1,275 @@ +''' Utilities to allow inserting docstring fragments for common +parameters into function and method docstrings''' + +import sys + +__all__ = [ + 'docformat', 'inherit_docstring_from', 'indentcount_lines', + 'filldoc', 'unindent_dict', 'unindent_string', 'extend_notes_in_docstring', + 'replace_notes_in_docstring', 'doc_replace' +] + + +def docformat(docstring, docdict=None): + ''' Fill a function docstring from variables in dictionary + + Adapt the indent of the inserted docs + + Parameters + ---------- + docstring : string + docstring from function, possibly with dict formatting strings + docdict : dict, optional + dictionary with keys that match the dict formatting strings + and values that are docstring fragments to be inserted. The + indentation of the inserted docstrings is set to match the + minimum indentation of the ``docstring`` by adding this + indentation to all lines of the inserted string, except the + first. + + Returns + ------- + outstring : string + string with requested ``docdict`` strings inserted + + Examples + -------- + >>> docformat(' Test string with %(value)s', {'value':'inserted value'}) + ' Test string with inserted value' + >>> docstring = 'First line\\n Second line\\n %(value)s' + >>> inserted_string = "indented\\nstring" + >>> docdict = {'value': inserted_string} + >>> docformat(docstring, docdict) + 'First line\\n Second line\\n indented\\n string' + ''' + if not docstring: + return docstring + if docdict is None: + docdict = {} + if not docdict: + return docstring + lines = docstring.expandtabs().splitlines() + # Find the minimum indent of the main docstring, after first line + if len(lines) < 2: + icount = 0 + else: + icount = indentcount_lines(lines[1:]) + indent = ' ' * icount + # Insert this indent to dictionary docstrings + indented = {} + for name, dstr in docdict.items(): + lines = dstr.expandtabs().splitlines() + try: + newlines = [lines[0]] + for line in lines[1:]: + newlines.append(indent+line) + indented[name] = '\n'.join(newlines) + except IndexError: + indented[name] = dstr + return docstring % indented + + +def inherit_docstring_from(cls): + """ + This decorator modifies the decorated function's docstring by + replacing occurrences of '%(super)s' with the docstring of the + method of the same name from the class `cls`. + + If the decorated method has no docstring, it is simply given the + docstring of `cls`s method. + + Parameters + ---------- + cls : Python class or instance + A class with a method with the same name as the decorated method. + The docstring of the method in this class replaces '%(super)s' in the + docstring of the decorated method. + + Returns + ------- + f : function + The decorator function that modifies the __doc__ attribute + of its argument. + + Examples + -------- + In the following, the docstring for Bar.func created using the + docstring of `Foo.func`. + + >>> class Foo: + ... def func(self): + ... '''Do something useful.''' + ... return + ... + >>> class Bar(Foo): + ... @inherit_docstring_from(Foo) + ... def func(self): + ... '''%(super)s + ... Do it fast. + ... ''' + ... return + ... + >>> b = Bar() + >>> b.func.__doc__ + 'Do something useful.\n Do it fast.\n ' + + """ + def _doc(func): + cls_docstring = getattr(cls, func.__name__).__doc__ + func_docstring = func.__doc__ + if func_docstring is None: + func.__doc__ = cls_docstring + else: + new_docstring = func_docstring % dict(super=cls_docstring) + func.__doc__ = new_docstring + return func + return _doc + + +def extend_notes_in_docstring(cls, notes): + """ + This decorator replaces the decorated function's docstring + with the docstring from corresponding method in `cls`. + It extends the 'Notes' section of that docstring to include + the given `notes`. + """ + def _doc(func): + cls_docstring = getattr(cls, func.__name__).__doc__ + # If python is called with -OO option, + # there is no docstring + if cls_docstring is None: + return func + end_of_notes = cls_docstring.find(' References\n') + if end_of_notes == -1: + end_of_notes = cls_docstring.find(' Examples\n') + if end_of_notes == -1: + end_of_notes = len(cls_docstring) + func.__doc__ = (cls_docstring[:end_of_notes] + notes + + cls_docstring[end_of_notes:]) + return func + return _doc + + +def replace_notes_in_docstring(cls, notes): + """ + This decorator replaces the decorated function's docstring + with the docstring from corresponding method in `cls`. + It replaces the 'Notes' section of that docstring with + the given `notes`. + """ + def _doc(func): + cls_docstring = getattr(cls, func.__name__).__doc__ + notes_header = ' Notes\n -----\n' + # If python is called with -OO option, + # there is no docstring + if cls_docstring is None: + return func + start_of_notes = cls_docstring.find(notes_header) + end_of_notes = cls_docstring.find(' References\n') + if end_of_notes == -1: + end_of_notes = cls_docstring.find(' Examples\n') + if end_of_notes == -1: + end_of_notes = len(cls_docstring) + func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] + + notes + + cls_docstring[end_of_notes:]) + return func + return _doc + + +def indentcount_lines(lines): + ''' Minimum indent for all lines in line list + + >>> lines = [' one', ' two', ' three'] + >>> indentcount_lines(lines) + 1 + >>> lines = [] + >>> indentcount_lines(lines) + 0 + >>> lines = [' one'] + >>> indentcount_lines(lines) + 1 + >>> indentcount_lines([' ']) + 0 + ''' + indentno = sys.maxsize + for line in lines: + stripped = line.lstrip() + if stripped: + indentno = min(indentno, len(line) - len(stripped)) + if indentno == sys.maxsize: + return 0 + return indentno + + +def filldoc(docdict, unindent_params=True): + ''' Return docstring decorator using docdict variable dictionary + + Parameters + ---------- + docdict : dictionary + dictionary containing name, docstring fragment pairs + unindent_params : {False, True}, boolean, optional + If True, strip common indentation from all parameters in + docdict + + Returns + ------- + decfunc : function + decorator that applies dictionary to input function docstring + + ''' + if unindent_params: + docdict = unindent_dict(docdict) + + def decorate(f): + f.__doc__ = docformat(f.__doc__, docdict) + return f + return decorate + + +def unindent_dict(docdict): + ''' Unindent all strings in a docdict ''' + can_dict = {} + for name, dstr in docdict.items(): + can_dict[name] = unindent_string(dstr) + return can_dict + + +def unindent_string(docstring): + ''' Set docstring to minimum indent for all lines, including first + + >>> unindent_string(' two') + 'two' + >>> unindent_string(' two\\n three') + 'two\\n three' + ''' + lines = docstring.expandtabs().splitlines() + icount = indentcount_lines(lines) + if icount == 0: + return docstring + return '\n'.join([line[icount:] for line in lines]) + + +def doc_replace(obj, oldval, newval): + """Decorator to take the docstring from obj, with oldval replaced by newval + + Equivalent to ``func.__doc__ = obj.__doc__.replace(oldval, newval)`` + + Parameters + ---------- + obj : object + The object to take the docstring from. + oldval : string + The string to replace from the original docstring. + newval : string + The string to replace ``oldval`` with. + """ + # __doc__ may be None for optimized Python (-OO) + doc = (obj.__doc__ or '').replace(oldval, newval) + + def inner(func): + func.__doc__ = doc + return func + + return inner diff --git a/parrot/lib/python3.10/site-packages/scipy/_lib/uarray.py b/parrot/lib/python3.10/site-packages/scipy/_lib/uarray.py new file mode 100644 index 0000000000000000000000000000000000000000..b29fc713efb3e836cc179ac87ce41f87b51870ef --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/_lib/uarray.py @@ -0,0 +1,31 @@ +"""`uarray` provides functions for generating multimethods that dispatch to +multiple different backends + +This should be imported, rather than `_uarray` so that an installed version could +be used instead, if available. This means that users can call +`uarray.set_backend` directly instead of going through SciPy. + +""" + + +# Prefer an installed version of uarray, if available +try: + import uarray as _uarray +except ImportError: + _has_uarray = False +else: + from scipy._lib._pep440 import Version as _Version + + _has_uarray = _Version(_uarray.__version__) >= _Version("0.8") + del _uarray + del _Version + + +if _has_uarray: + from uarray import * # noqa: F403 + from uarray import _Function +else: + from ._uarray import * # noqa: F403 + from ._uarray import _Function # noqa: F401 + +del _has_uarray diff --git a/parrot/lib/python3.10/site-packages/scipy/constants/__init__.py b/parrot/lib/python3.10/site-packages/scipy/constants/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2805070eef1d77567ecf094aa08049d0b0a797 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/constants/__init__.py @@ -0,0 +1,347 @@ +r""" +================================== +Constants (:mod:`scipy.constants`) +================================== + +.. currentmodule:: scipy.constants + +Physical and mathematical constants and units. + + +Mathematical constants +====================== + +================ ================================================================= +``pi`` Pi +``golden`` Golden ratio +``golden_ratio`` Golden ratio +================ ================================================================= + + +Physical constants +================== + +=========================== ================================================================= +``c`` speed of light in vacuum +``speed_of_light`` speed of light in vacuum +``mu_0`` the magnetic constant :math:`\mu_0` +``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` +``h`` the Planck constant :math:`h` +``Planck`` the Planck constant :math:`h` +``hbar`` :math:`\hbar = h/(2\pi)` +``G`` Newtonian constant of gravitation +``gravitational_constant`` Newtonian constant of gravitation +``g`` standard acceleration of gravity +``e`` elementary charge +``elementary_charge`` elementary charge +``R`` molar gas constant +``gas_constant`` molar gas constant +``alpha`` fine-structure constant +``fine_structure`` fine-structure constant +``N_A`` Avogadro constant +``Avogadro`` Avogadro constant +``k`` Boltzmann constant +``Boltzmann`` Boltzmann constant +``sigma`` Stefan-Boltzmann constant :math:`\sigma` +``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma` +``Wien`` Wien displacement law constant +``Rydberg`` Rydberg constant +``m_e`` electron mass +``electron_mass`` electron mass +``m_p`` proton mass +``proton_mass`` proton mass +``m_n`` neutron mass +``neutron_mass`` neutron mass +=========================== ================================================================= + + +Constants database +------------------ + +In addition to the above variables, :mod:`scipy.constants` also contains the +2018 CODATA recommended values [CODATA2018]_ database containing more physical +constants. + +.. autosummary:: + :toctree: generated/ + + value -- Value in physical_constants indexed by key + unit -- Unit in physical_constants indexed by key + precision -- Relative precision in physical_constants indexed by key + find -- Return list of physical_constant keys with a given string + ConstantWarning -- Constant sought not in newest CODATA data set + +.. data:: physical_constants + + Dictionary of physical constants, of the format + ``physical_constants[name] = (value, unit, uncertainty)``. + +Available constants: + +====================================================================== ==== +%(constant_names)s +====================================================================== ==== + + +Units +===== + +SI prefixes +----------- + +============ ================================================================= +``quetta`` :math:`10^{30}` +``ronna`` :math:`10^{27}` +``yotta`` :math:`10^{24}` +``zetta`` :math:`10^{21}` +``exa`` :math:`10^{18}` +``peta`` :math:`10^{15}` +``tera`` :math:`10^{12}` +``giga`` :math:`10^{9}` +``mega`` :math:`10^{6}` +``kilo`` :math:`10^{3}` +``hecto`` :math:`10^{2}` +``deka`` :math:`10^{1}` +``deci`` :math:`10^{-1}` +``centi`` :math:`10^{-2}` +``milli`` :math:`10^{-3}` +``micro`` :math:`10^{-6}` +``nano`` :math:`10^{-9}` +``pico`` :math:`10^{-12}` +``femto`` :math:`10^{-15}` +``atto`` :math:`10^{-18}` +``zepto`` :math:`10^{-21}` +``yocto`` :math:`10^{-24}` +``ronto`` :math:`10^{-27}` +``quecto`` :math:`10^{-30}` +============ ================================================================= + +Binary prefixes +--------------- + +============ ================================================================= +``kibi`` :math:`2^{10}` +``mebi`` :math:`2^{20}` +``gibi`` :math:`2^{30}` +``tebi`` :math:`2^{40}` +``pebi`` :math:`2^{50}` +``exbi`` :math:`2^{60}` +``zebi`` :math:`2^{70}` +``yobi`` :math:`2^{80}` +============ ================================================================= + +Mass +---- + +================= ============================================================ +``gram`` :math:`10^{-3}` kg +``metric_ton`` :math:`10^{3}` kg +``grain`` one grain in kg +``lb`` one pound (avoirdupous) in kg +``pound`` one pound (avoirdupous) in kg +``blob`` one inch version of a slug in kg (added in 1.0.0) +``slinch`` one inch version of a slug in kg (added in 1.0.0) +``slug`` one slug in kg (added in 1.0.0) +``oz`` one ounce in kg +``ounce`` one ounce in kg +``stone`` one stone in kg +``grain`` one grain in kg +``long_ton`` one long ton in kg +``short_ton`` one short ton in kg +``troy_ounce`` one Troy ounce in kg +``troy_pound`` one Troy pound in kg +``carat`` one carat in kg +``m_u`` atomic mass constant (in kg) +``u`` atomic mass constant (in kg) +``atomic_mass`` atomic mass constant (in kg) +================= ============================================================ + +Angle +----- + +================= ============================================================ +``degree`` degree in radians +``arcmin`` arc minute in radians +``arcminute`` arc minute in radians +``arcsec`` arc second in radians +``arcsecond`` arc second in radians +================= ============================================================ + + +Time +---- + +================= ============================================================ +``minute`` one minute in seconds +``hour`` one hour in seconds +``day`` one day in seconds +``week`` one week in seconds +``year`` one year (365 days) in seconds +``Julian_year`` one Julian year (365.25 days) in seconds +================= ============================================================ + + +Length +------ + +===================== ============================================================ +``inch`` one inch in meters +``foot`` one foot in meters +``yard`` one yard in meters +``mile`` one mile in meters +``mil`` one mil in meters +``pt`` one point in meters +``point`` one point in meters +``survey_foot`` one survey foot in meters +``survey_mile`` one survey mile in meters +``nautical_mile`` one nautical mile in meters +``fermi`` one Fermi in meters +``angstrom`` one Angstrom in meters +``micron`` one micron in meters +``au`` one astronomical unit in meters +``astronomical_unit`` one astronomical unit in meters +``light_year`` one light year in meters +``parsec`` one parsec in meters +===================== ============================================================ + +Pressure +-------- + +================= ============================================================ +``atm`` standard atmosphere in pascals +``atmosphere`` standard atmosphere in pascals +``bar`` one bar in pascals +``torr`` one torr (mmHg) in pascals +``mmHg`` one torr (mmHg) in pascals +``psi`` one psi in pascals +================= ============================================================ + +Area +---- + +================= ============================================================ +``hectare`` one hectare in square meters +``acre`` one acre in square meters +================= ============================================================ + + +Volume +------ + +=================== ======================================================== +``liter`` one liter in cubic meters +``litre`` one liter in cubic meters +``gallon`` one gallon (US) in cubic meters +``gallon_US`` one gallon (US) in cubic meters +``gallon_imp`` one gallon (UK) in cubic meters +``fluid_ounce`` one fluid ounce (US) in cubic meters +``fluid_ounce_US`` one fluid ounce (US) in cubic meters +``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters +``bbl`` one barrel in cubic meters +``barrel`` one barrel in cubic meters +=================== ======================================================== + +Speed +----- + +================== ========================================================== +``kmh`` kilometers per hour in meters per second +``mph`` miles per hour in meters per second +``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second +``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second +``knot`` one knot in meters per second +================== ========================================================== + + +Temperature +----------- + +===================== ======================================================= +``zero_Celsius`` zero of Celsius scale in Kelvin +``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins +===================== ======================================================= + +.. autosummary:: + :toctree: generated/ + + convert_temperature + +Energy +------ + +==================== ======================================================= +``eV`` one electron volt in Joules +``electron_volt`` one electron volt in Joules +``calorie`` one calorie (thermochemical) in Joules +``calorie_th`` one calorie (thermochemical) in Joules +``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules +``erg`` one erg in Joules +``Btu`` one British thermal unit (International Steam Table) in Joules +``Btu_IT`` one British thermal unit (International Steam Table) in Joules +``Btu_th`` one British thermal unit (thermochemical) in Joules +``ton_TNT`` one ton of TNT in Joules +==================== ======================================================= + +Power +----- + +==================== ======================================================= +``hp`` one horsepower in watts +``horsepower`` one horsepower in watts +==================== ======================================================= + +Force +----- + +==================== ======================================================= +``dyn`` one dyne in newtons +``dyne`` one dyne in newtons +``lbf`` one pound force in newtons +``pound_force`` one pound force in newtons +``kgf`` one kilogram force in newtons +``kilogram_force`` one kilogram force in newtons +==================== ======================================================= + +Optics +------ + +.. autosummary:: + :toctree: generated/ + + lambda2nu + nu2lambda + +References +========== + +.. [CODATA2018] CODATA Recommended Values of the Fundamental + Physical Constants 2018. + + https://physics.nist.gov/cuu/Constants/ + +""" # noqa: E501 +# Modules contributed by BasSw (wegwerp@gmail.com) +from ._codata import * +from ._constants import * +from ._codata import _obsolete_constants, physical_constants + +# Deprecated namespaces, to be removed in v2.0.0 +from . import codata, constants + +_constant_names_list = [(_k.lower(), _k, _v) + for _k, _v in physical_constants.items() + if _k not in _obsolete_constants] +_constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])), + _x[2][0], _x[2][1]) + for _x in sorted(_constant_names_list)]) +if __doc__: + __doc__ = __doc__ % dict(constant_names=_constant_names) + +del _constant_names +del _constant_names_list + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55bf1879ffad4aff70fa8a011a4b51415aa9961c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e83aab98af7ebb8be0f6f734a5582e3b6fd01bb9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30b29aba4aaa742be270cbfb911971d9ae685b4b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7568fc8259d247730c6c0b28afd5ac7e42f3fad9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e1f02d8f49224942730bf2df8325ba903ec685a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e6939911b6632e0c066707836b1ebb165715b3f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..5e7be04f3c0291502b50b101db82d299aadc7772 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py @@ -0,0 +1,54 @@ +# Created by Pearu Peterson, September 2002 + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test()' +Run tests if fftpack is not installed: + python tests/test_helper.py [] +""" + +from numpy.testing import assert_array_almost_equal +from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq + +from numpy import pi, random + +class TestFFTShift: + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + y = [-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + y = [-5,-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + + def test_inverse(self): + for n in [1,4,9,100,211]: + x = random.random((n,)) + assert_array_almost_equal(ifftshift(fftshift(x)),x) + + +class TestFFTFreq: + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + assert_array_almost_equal(9*fftfreq(9),x) + assert_array_almost_equal(9*pi*fftfreq(9,pi),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + assert_array_almost_equal(10*fftfreq(10),x) + assert_array_almost_equal(10*pi*fftfreq(10,pi),x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0,1,1,2,2,3,3,4,4] + assert_array_almost_equal(9*rfftfreq(9),x) + assert_array_almost_equal(9*pi*rfftfreq(9,pi),x) + x = [0,1,1,2,2,3,3,4,4,5] + assert_array_almost_equal(10*rfftfreq(10),x) + assert_array_almost_equal(10*pi*rfftfreq(10,pi),x) diff --git a/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6108d460c7864bdc5dd9425bddf93576fac5b39d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py @@ -0,0 +1,815 @@ +from os.path import join, dirname + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_equal +import pytest +from pytest import raises as assert_raises + +from scipy.fftpack._realtransforms import ( + dct, idct, dst, idst, dctn, idctn, dstn, idstn) + +# Matlab reference data +MDATA = np.load(join(dirname(__file__), 'test.npz')) +X = [MDATA['x%d' % i] for i in range(8)] +Y = [MDATA['y%d' % i] for i in range(8)] + +# FFTW reference data: the data are organized as follows: +# * SIZES is an array containing all available sizes +# * for every type (1, 2, 3, 4) and every size, the array dct_type_size +# contains the output of the DCT applied to the input np.linspace(0, size-1, +# size) +FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz')) +FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz')) +FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes'] + + +def fftw_dct_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.float64: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dct_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def fftw_dst_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.float64: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dst_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def dct_2d_ref(x, **kwargs): + """Calculate reference values for testing dct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dct(x[:, col], **kwargs) + return x + + +def idct_2d_ref(x, **kwargs): + """Calculate reference values for testing idct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idct(x[:, col], **kwargs) + return x + + +def dst_2d_ref(x, **kwargs): + """Calculate reference values for testing dst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dst(x[:, col], **kwargs) + return x + + +def idst_2d_ref(x, **kwargs): + """Calculate reference values for testing idst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idst(x[:, col], **kwargs) + return x + + +def naive_dct1(x, norm=None): + """Calculate textbook definition version of DCT-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N-1 + y = np.zeros(N) + m0, m = 1, 2 + if norm == 'ortho': + m0 = np.sqrt(1.0/M) + m = np.sqrt(2.0/M) + for k in range(N): + for n in range(1, N-1): + y[k] += m*x[n]*np.cos(np.pi*n*k/M) + y[k] += m0 * x[0] + y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1) + if norm == 'ortho': + y[0] *= 1/np.sqrt(2) + y[N-1] *= 1/np.sqrt(2) + return y + + +def naive_dst1(x, norm=None): + """Calculate textbook definition version of DST-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N+1 + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M) + if norm == 'ortho': + y *= np.sqrt(0.5/M) + return y + + +def naive_dct4(x, norm=None): + """Calculate textbook definition version of DCT-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +def naive_dst4(x, norm=None): + """Calculate textbook definition version of DST-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +class TestComplex: + def test_dct_complex64(self): + y = dct(1j*np.arange(5, dtype=np.complex64)) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dct_complex(self): + y = dct(np.arange(5)*1j) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idct_complex(self): + y = idct(np.arange(5)*1j) + x = 1j*idct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex64(self): + y = dst(np.arange(5, dtype=np.complex64)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex(self): + y = dst(np.arange(5)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idst_complex(self): + y = idst(np.arange(5)*1j) + x = 1j*idst(np.arange(5)) + assert_array_almost_equal(x, y) + + +class _TestDCTBase: + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + x, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + y = dct(x, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + def test_axis(self): + nt = 2 + for i in [7, 8, 9, 16, 32, 64]: + x = np.random.randn(nt, i) + y = dct(x, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[j], dct(x[j], type=self.type), + decimal=self.dec) + + x = x.T + y = dct(x, axis=0, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type), + decimal=self.dec) + + +class _TestDCTIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=1) + y2 = naive_dct1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDCTIIBase(_TestDCTBase): + def test_definition_matlab(self): + # Test correspondence with MATLAB (orthornomal mode). + dt = np.result_type(np.float32, self.rdt) + for xr, yr in zip(X, Y): + x = np.array(xr, dtype=dt) + y = dct(x, norm="ortho", type=2) + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, yr, decimal=self.dec) + + +class _TestDCTIIIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=2) + xi = dct(y, norm="ortho", type=3) + assert_equal(xi.dtype, dt) + assert_array_almost_equal(xi, x, decimal=self.dec) + +class _TestDCTIVBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=4) + y2 = naive_dct4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + + +class TestDCTIDouble(_TestDCTIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 1 + + +class TestDCTIFloat(_TestDCTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDCTIInt(_TestDCTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDCTIIDouble(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 2 + + +class TestDCTIIFloat(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestDCTIIInt(_TestDCTIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestDCTIIIDouble(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestDCTIIIFloat(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIIIInt(_TestDCTIIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class TestDCTIVDouble(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 3 + + +class TestDCTIVFloat(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIVInt(_TestDCTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class _TestIDCTBase: + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + x = idct(yr, type=self.type) + if self.type == 1: + x /= 2 * (i-1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDCTIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 1 + + +class TestIDCTIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDCTIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDCTIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 2 + + +class TestIDCTIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestIDCTIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestIDCTIIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestIDCTIIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestIDCTIIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + +class TestIDCTIVDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestIDCTIVFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 4 + + +class TestIDCTIVInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + +class _TestDSTBase: + def setup_method(self): + self.rdt = None # dtype + self.dec = None # number of decimals to match + self.type = None # dst type + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + y = dst(xr, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class _TestDSTIBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dst(x, norm='ortho', type=1) + y2 = naive_dst1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDSTIVBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dst(x, norm='ortho', type=4) + y2 = naive_dst4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, y2, decimal=self.dec) + +class TestDSTIDouble(_TestDSTIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 1 + + +class TestDSTIFloat(_TestDSTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDSTIInt(_TestDSTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDSTIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 2 + + +class TestDSTIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestDSTIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestDSTIIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestDSTIIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 7 + self.type = 3 + + +class TestDSTIIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 7 + self.type = 3 + + +class TestDSTIVDouble(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestDSTIVFloat(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 4 + + +class TestDSTIVInt(_TestDSTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + + +class _TestIDSTBase: + def setup_method(self): + self.rdt = None + self.dec = None + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + x = idst(yr, type=self.type) + if self.type == 1: + x /= 2 * (i+1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(x) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDSTIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 1 + + +class TestIDSTIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDSTIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDSTIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 2 + + +class TestIDSTIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestIDSTIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestIDSTIIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestIDSTIIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 3 + + +class TestIDSTIIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 3 + + +class TestIDSTIVDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestIDSTIVFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 4 + + +class TestIDSTIVnt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 4 + + +class TestOverwrite: + """Check input overwrite behavior.""" + + real_dtypes = [np.float32, np.float64] + + def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw): + x2 = x.copy() + routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not overwrite_x: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + for type in [1, 2, 3, 4]: + for overwrite_x in [True, False]: + for norm in [None, 'ortho']: + self._check(data, routine, type, None, axis, norm, + overwrite_x) + + def test_dct(self): + for dtype in self.real_dtypes: + self._check_1d(dct, dtype, (16,), -1) + self._check_1d(dct, dtype, (16, 2), 0) + self._check_1d(dct, dtype, (2, 16), 1) + + def test_idct(self): + for dtype in self.real_dtypes: + self._check_1d(idct, dtype, (16,), -1) + self._check_1d(idct, dtype, (16, 2), 0) + self._check_1d(idct, dtype, (2, 16), 1) + + def test_dst(self): + for dtype in self.real_dtypes: + self._check_1d(dst, dtype, (16,), -1) + self._check_1d(dst, dtype, (16, 2), 0) + self._check_1d(dst, dtype, (2, 16), 1) + + def test_idst(self): + for dtype in self.real_dtypes: + self._check_1d(idst, dtype, (16,), -1) + self._check_1d(idst, dtype, (16, 2), 0) + self._check_1d(idst, dtype, (2, 16), 1) + + +class Test_DCTN_IDCTN: + dec = 14 + dct_type = [1, 2, 3, 4] + norms = [None, 'ortho'] + rstate = np.random.RandomState(1234) + shape = (32, 16) + data = rstate.randn(*shape) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [None, + 1, (1,), [1], + 0, (0,), [0], + (0, 1), [0, 1], + (-2, -1), [-2, -1]]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', ['ortho']) + def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm): + tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm) + tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm) + assert_array_almost_equal(self.data, tmp, decimal=12) + + @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref), + (dstn, dst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_dctn_vs_2d_reference(self, fforward, fforward_ref, + dct_type, norm): + y1 = fforward(self.data, type=dct_type, axes=None, norm=norm) + y2 = fforward_ref(self.data, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref), + (idstn, idst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', [None, 'ortho']) + def test_idctn_vs_2d_reference(self, finverse, finverse_ref, + dct_type, norm): + fdata = dctn(self.data, type=dct_type, norm=norm) + y1 = finverse(fdata, type=dct_type, norm=norm) + y2 = finverse_ref(fdata, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + def test_axes_and_shape(self, fforward, finverse): + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=(0, 1)) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape, axes=0) + + @pytest.mark.parametrize('fforward', [dctn, dstn]) + def test_shape(self, fforward): + tmp = fforward(self.data, shape=(128, 128), axes=None) + assert_equal(tmp.shape, (128, 128)) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [1, (1,), [1], + 0, (0,), [0]]) + def test_shape_is_none_with_axes(self, fforward, finverse, axes): + tmp = fforward(self.data, shape=None, axes=axes, norm='ortho') + tmp = finverse(tmp, shape=None, axes=axes, norm='ortho') + assert_array_almost_equal(self.data, tmp, decimal=self.dec) diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d8ec93edd045a2e69319ea5586a1fb2eb0666710 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5e2829eaa3dfe255d555694ab36f29dbc5243296 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const at::Tensor & reservedSpace) { + return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_choose_qparams_per_tensor_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_choose_qparams_per_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..504d4b01b81dfdcd7e948009f0b4ee6a21b7b224 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_choose_qparams_per_tensor_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _choose_qparams_per_tensor { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_choose_qparams_per_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)") + static ::std::tuple call(const at::Tensor & self, bool reduce_range); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool reduce_range); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3b70d35e04c7a8b594b561159628b7784df7fc43 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cufft_clear_plan_cache { + using schema = void (at::DeviceIndex); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cufft_clear_plan_cache") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cufft_clear_plan_cache(DeviceIndex device_index) -> ()") + static void call(at::DeviceIndex device_index); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_dimI_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_dimI_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..23627d845f499a60505dfdaeede66b00ac8604fc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_dimI_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _dimI { + using schema = int64_t (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_dimI") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_dimI(Tensor self) -> int") + static int64_t call(const at::Tensor & self); + static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a05795e149af4b001cfd6c01d802e412706b3e2d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor +inline at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); +} +namespace symint { + template ::value>> + at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); + } +} + +// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor +inline at::Tensor _embedding_bag_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); +} +namespace symint { + template ::value>> + at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6c59e54e6fe05a15f4f7ad63c8b243bc66e0ac83 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_cos { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cos") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cos(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_cos_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cos_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cos_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_cos_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cos") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b75f2e9d15c1d4fa5424170f9fb7a4105e032e9e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _pack_padded_sequence { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pack_padded_sequence") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first); +}; + +struct TORCH_API _pack_padded_sequence_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pack_padded_sequence") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..a864fb1672799a233cd12cde95297f01ba7e3cc8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor +inline at::Tensor _remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { + return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..05ebe69adbdd502fe8bb6d7ef644317bd7e9d4b0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _reshape_from_tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_reshape_from_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & shape); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & shape); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bea0f24a7146a528700a2a784ceda80a0ac4f70b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple _scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, const c10::optional & dropout_mask={}, c10::optional scale=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h new file mode 100644 index 0000000000000000000000000000000000000000..1cf6d860a3ae1a558bd0916e609636dd2b14a7a7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor _sparse_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha); +} + +// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out); +} +// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5a3b672c7c4df7c51f9386201182ba8cd0dbe9ee --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h new file mode 100644 index 0000000000000000000000000000000000000000..01ced1d19f99d8e761938c9cd17f9b3a1e399ebb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor +inline at::Tensor _standard_gamma(const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma::call(self, generator); +} + +// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _standard_gamma_out(at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma_out::call(self, generator, out); +} +// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _standard_gamma_outf(const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_standard_gamma_out::call(self, generator, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..66b85fa5ffb5c9ef5c890e0d6e2d0c8fa1bfd439 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _standard_gamma_out(const at::Tensor & self, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor _s_gamma_cpu(const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor _s_gamma_cuda(const at::Tensor & self, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..6ecaaf983f434ead4a26fe5d3c4c39b317030176 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor _to_copy(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format) { + return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format); +} + +// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_copy_out(at::Tensor & out, const at::Tensor & self, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out); +} +// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c938c24c6076493b391af4028cf5ed7f6ddce976 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/all_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/all_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1e71888d22493972849fedf700d9fbdae737cd3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/all_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor all(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b435cbfd512be92deabaa6e7c714a1dca95f7810 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API binary_cross_entropy_with_logits { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_with_logits") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction); +}; + +struct TORCH_API binary_cross_entropy_with_logits_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_with_logits") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9ba705902773db5c52c07fced97750c5afc8277b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +TORCH_API at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..e6ad76ecb1bb264b0501b533921867b25e6402e1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::crow_indices_copy(Tensor self) -> Tensor +inline at::Tensor crow_indices_copy(const at::Tensor & self) { + return at::_ops::crow_indices_copy::call(self); +} + +// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & crow_indices_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::crow_indices_copy_out::call(self, out); +} +// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & crow_indices_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::crow_indices_copy_out::call(self, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06300d430b9ede5eadcc5129fcbe09d192865e8d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_out(at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); +TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_outf(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8423d3881dfbebb212c422f4e66484bd64d7e3e2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & cudnn_grid_sampler_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & grid); +TORCH_API at::Tensor & cudnn_grid_sampler_outf(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dropout_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dropout_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fdc59661fafaa00a81f1ac6b692e56f5c89e0823 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dropout_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor dropout(const at::Tensor & input, double p, bool train); +TORCH_API at::Tensor & dropout_(at::Tensor & self, double p, bool train); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e0b4dec8808e4052cc11f4d927cbffdccaf25be --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor erfc(const at::Tensor & self); +TORCH_API at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & erfc_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0dc1bed7b745622fc776e90da241989ef42286dc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_rfft { + using schema = at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm); +}; + +struct TORCH_API fft_rfft_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1c9fc3c9067ac995e6eafe0a9077bf871d331c6a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ddbba218b0c8d5e636c57fed91cc53c1a8f183a5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +}; + +struct TORCH_API log_sigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..99e254426f259cbedd7b6428ac46e800e3097041 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rnn_tanh_cell { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_tanh_cell") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6c9cf8aa1a0584700975487adf59ac16d5e0e610 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sinc(const at::Tensor & self); +TORCH_API at::Tensor & sinc_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7b1e76e8f93f39d31b1e94d46f2f70902b278e33 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API softplus_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softplus") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out); +}; + +struct TORCH_API softplus { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softplus") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6cd978f1f3cb2d3f9bc5b85cbe32d2c8edda7f54 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_sinc_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_sinc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0edd1b6e812d5e9455fadb7b106ef36922f0e5b0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_sinc_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_sinc(const at::Tensor & self); +TORCH_API at::Tensor & special_sinc_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_native.h new file mode 100644 index 0000000000000000000000000000000000000000..72d499b31f20da5f6041e01423801b680a8bd6b7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_xlog1py_out : public at::meta::structured_special_xlog1py { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor special_xlog1py(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & special_xlog1py_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor special_xlog1py(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & special_xlog1py_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..33bb94f26294727d5c2b66e655dc36fb45a266a4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_xlog1py { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API special_xlog1py_self_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other); +}; + +struct TORCH_API special_xlog1py_other_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "other_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API special_xlog1py_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API special_xlog1py_self_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API special_xlog1py_other_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "other_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..117be69cc3651cd6eb47ad13e020be29a8535655 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void split_copy_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0); +TORCH_API void split_copy_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out); +TORCH_API void split_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0); +TORCH_API void split_copy_symint_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sum_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8b1219cd4193152bad339fd3ae1067a8e9a261dc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sum_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sum { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sum(Tensor self, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype); +}; + +struct TORCH_API sum_dim_IntList { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_IntList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); +}; + +struct TORCH_API sum_dim_DimnameList { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_DimnameList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype); +}; + +struct TORCH_API sum_IntList_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "IntList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); +}; + +struct TORCH_API sum_DimnameList_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "DimnameList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out); +}; + +struct TORCH_API sum_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/swapdims_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/swapdims_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..31869c9f20dd321e3fcf6e7cfa40b2c7ead9a8b5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/swapdims_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API swapdims { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::swapdims") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +struct TORCH_API swapdims_ { + using schema = at::Tensor & (at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::swapdims_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8eeeba6363231cfe8c50ef7f718e3c6a509bf623 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor to_mkldnn(const at::Tensor & self, c10::optional dtype=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..70760f9cba5efd983f6fcdf61bd43007106d842a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_triangular_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6f9a263daa3363b6a100537d6c88ac6e5abf7c66 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API triu_indices { + using schema = at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::triu_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API triu_indices_out { + using schema = at::Tensor & (int64_t, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::triu_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t row, int64_t col, int64_t offset, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..568495b7acbb85a6c03bdacc70da5259f14368ef --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..48c7575bb9b8010ceb77d7334a9233ff38b20d29 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_bicubic2d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6260b44f2a7fe9405761aecdd0a2505c724b6770 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..277cefbb493de29a1efbd0df258b26625a81814a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_nearest3d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_nearest3d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API upsample_nearest3d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..589d5ba0e5fc918669e2f26ee73d912632447470 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/wandb_telemetry_pb2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/wandb_telemetry_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a47fcfd15938e5d3452191a5ba36cd887f2fbade Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/wandb_telemetry_pb2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v4/__pycache__/wandb_telemetry_pb2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/proto/v4/__pycache__/wandb_telemetry_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ad0758e1943517e5ae89346107d5a3ce641fc0f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/proto/v4/__pycache__/wandb_telemetry_pb2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_server_pb2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_server_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47075d18432e35726138347e0c6f634ab5ccd86e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_server_pb2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_settings_pb2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_settings_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f5359ecae617a982e998414e58f803e9227107d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_settings_pb2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_telemetry_pb2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_telemetry_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b3f613c6c13c49f55625e29f9c6e729d88504c6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wandb/proto/v5/__pycache__/wandb_telemetry_pb2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_base_pb2.py b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_base_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..d02477cff9f03584418a3a316e8cae7db7048f91 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_base_pb2.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: wandb/proto/wandb_base.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cwandb/proto/wandb_base.proto\x12\x0ewandb_internal\"6\n\x0b_RecordInfo\x12\x11\n\tstream_id\x18\x01 \x01(\t\x12\x14\n\x0c_tracelog_id\x18\x64 \x01(\t\"!\n\x0c_RequestInfo\x12\x11\n\tstream_id\x18\x01 \x01(\t\"#\n\x0b_ResultInfo\x12\x14\n\x0c_tracelog_id\x18\x64 \x01(\tB\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_base_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\031core/pkg/service_go_proto' + _globals['__RECORDINFO']._serialized_start=48 + _globals['__RECORDINFO']._serialized_end=102 + _globals['__REQUESTINFO']._serialized_start=104 + _globals['__REQUESTINFO']._serialized_end=137 + _globals['__RESULTINFO']._serialized_start=139 + _globals['__RESULTINFO']._serialized_end=174 +# @@protoc_insertion_point(module_scope) diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_internal_pb2.py b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_internal_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..0d0f8182cc3983a67b4fe6d0634517300fa1c199 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_internal_pb2.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: wandb/proto/wandb_internal.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from wandb.proto import wandb_base_pb2 as wandb_dot_proto_dot_wandb__base__pb2 +from wandb.proto import wandb_telemetry_pb2 as wandb_dot_proto_dot_wandb__telemetry__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n wandb/proto/wandb_internal.proto\x12\x0ewandb_internal\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cwandb/proto/wandb_base.proto\x1a!wandb/proto/wandb_telemetry.proto\"\x95\t\n\x06Record\x12\x0b\n\x03num\x18\x01 \x01(\x03\x12\x30\n\x07history\x18\x02 \x01(\x0b\x32\x1d.wandb_internal.HistoryRecordH\x00\x12\x30\n\x07summary\x18\x03 \x01(\x0b\x32\x1d.wandb_internal.SummaryRecordH\x00\x12.\n\x06output\x18\x04 \x01(\x0b\x32\x1c.wandb_internal.OutputRecordH\x00\x12.\n\x06\x63onfig\x18\x05 \x01(\x0b\x32\x1c.wandb_internal.ConfigRecordH\x00\x12,\n\x05\x66iles\x18\x06 \x01(\x0b\x32\x1b.wandb_internal.FilesRecordH\x00\x12,\n\x05stats\x18\x07 \x01(\x0b\x32\x1b.wandb_internal.StatsRecordH\x00\x12\x32\n\x08\x61rtifact\x18\x08 \x01(\x0b\x32\x1e.wandb_internal.ArtifactRecordH\x00\x12,\n\x08tbrecord\x18\t \x01(\x0b\x32\x18.wandb_internal.TBRecordH\x00\x12,\n\x05\x61lert\x18\n \x01(\x0b\x32\x1b.wandb_internal.AlertRecordH\x00\x12\x34\n\ttelemetry\x18\x0b \x01(\x0b\x32\x1f.wandb_internal.TelemetryRecordH\x00\x12.\n\x06metric\x18\x0c \x01(\x0b\x32\x1c.wandb_internal.MetricRecordH\x00\x12\x35\n\noutput_raw\x18\r \x01(\x0b\x32\x1f.wandb_internal.OutputRawRecordH\x00\x12(\n\x03run\x18\x11 \x01(\x0b\x32\x19.wandb_internal.RunRecordH\x00\x12-\n\x04\x65xit\x18\x12 \x01(\x0b\x32\x1d.wandb_internal.RunExitRecordH\x00\x12,\n\x05\x66inal\x18\x14 \x01(\x0b\x32\x1b.wandb_internal.FinalRecordH\x00\x12.\n\x06header\x18\x15 \x01(\x0b\x32\x1c.wandb_internal.HeaderRecordH\x00\x12.\n\x06\x66ooter\x18\x16 \x01(\x0b\x32\x1c.wandb_internal.FooterRecordH\x00\x12\x39\n\npreempting\x18\x17 \x01(\x0b\x32#.wandb_internal.RunPreemptingRecordH\x00\x12\x34\n\x12noop_link_artifact\x18\x18 \x01(\x0b\x32\x16.google.protobuf.EmptyH\x00\x12\x39\n\x0cuse_artifact\x18\x19 \x01(\x0b\x32!.wandb_internal.UseArtifactRecordH\x00\x12*\n\x07request\x18\x64 \x01(\x0b\x32\x17.wandb_internal.RequestH\x00\x12(\n\x07\x63ontrol\x18\x10 \x01(\x0b\x32\x17.wandb_internal.Control\x12\x0c\n\x04uuid\x18\x13 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfoB\r\n\x0brecord_type\"\xa8\x01\n\x07\x43ontrol\x12\x10\n\x08req_resp\x18\x01 \x01(\x08\x12\r\n\x05local\x18\x02 \x01(\x08\x12\x10\n\x08relay_id\x18\x03 \x01(\t\x12\x14\n\x0cmailbox_slot\x18\x04 \x01(\t\x12\x13\n\x0b\x61lways_send\x18\x05 \x01(\x08\x12\x14\n\x0c\x66low_control\x18\x06 \x01(\x08\x12\x12\n\nend_offset\x18\x07 \x01(\x03\x12\x15\n\rconnection_id\x18\x08 \x01(\t\"\xf3\x03\n\x06Result\x12\x35\n\nrun_result\x18\x11 \x01(\x0b\x32\x1f.wandb_internal.RunUpdateResultH\x00\x12\x34\n\x0b\x65xit_result\x18\x12 \x01(\x0b\x32\x1d.wandb_internal.RunExitResultH\x00\x12\x33\n\nlog_result\x18\x14 \x01(\x0b\x32\x1d.wandb_internal.HistoryResultH\x00\x12\x37\n\x0esummary_result\x18\x15 \x01(\x0b\x32\x1d.wandb_internal.SummaryResultH\x00\x12\x35\n\routput_result\x18\x16 \x01(\x0b\x32\x1c.wandb_internal.OutputResultH\x00\x12\x35\n\rconfig_result\x18\x17 \x01(\x0b\x32\x1c.wandb_internal.ConfigResultH\x00\x12,\n\x08response\x18\x64 \x01(\x0b\x32\x18.wandb_internal.ResponseH\x00\x12(\n\x07\x63ontrol\x18\x10 \x01(\x0b\x32\x17.wandb_internal.Control\x12\x0c\n\x04uuid\x18\x18 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._ResultInfoB\r\n\x0bresult_type\":\n\x0b\x46inalRecord\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"b\n\x0bVersionInfo\x12\x10\n\x08producer\x18\x01 \x01(\t\x12\x14\n\x0cmin_consumer\x18\x02 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"n\n\x0cHeaderRecord\x12\x31\n\x0cversion_info\x18\x01 \x01(\x0b\x32\x1b.wandb_internal.VersionInfo\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\";\n\x0c\x46ooterRecord\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\xde\x04\n\tRunRecord\x12\x0e\n\x06run_id\x18\x01 \x01(\t\x12\x0e\n\x06\x65ntity\x18\x02 \x01(\t\x12\x0f\n\x07project\x18\x03 \x01(\t\x12,\n\x06\x63onfig\x18\x04 \x01(\x0b\x32\x1c.wandb_internal.ConfigRecord\x12.\n\x07summary\x18\x05 \x01(\x0b\x32\x1d.wandb_internal.SummaryRecord\x12\x11\n\trun_group\x18\x06 \x01(\t\x12\x10\n\x08job_type\x18\x07 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x08 \x01(\t\x12\r\n\x05notes\x18\t \x01(\t\x12\x0c\n\x04tags\x18\n \x03(\t\x12\x30\n\x08settings\x18\x0b \x01(\x0b\x32\x1e.wandb_internal.SettingsRecord\x12\x10\n\x08sweep_id\x18\x0c \x01(\t\x12\x0c\n\x04host\x18\r \x01(\t\x12\x15\n\rstarting_step\x18\x0e \x01(\x03\x12\x12\n\nstorage_id\x18\x10 \x01(\t\x12.\n\nstart_time\x18\x11 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07resumed\x18\x12 \x01(\x08\x12\x32\n\ttelemetry\x18\x13 \x01(\x0b\x32\x1f.wandb_internal.TelemetryRecord\x12\x0f\n\x07runtime\x18\x14 \x01(\x05\x12*\n\x03git\x18\x15 \x01(\x0b\x32\x1d.wandb_internal.GitRepoRecord\x12\x0e\n\x06\x66orked\x18\x16 \x01(\x08\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\";\n\rGitRepoRecord\x12\x1a\n\nremote_url\x18\x01 \x01(\tR\x06remote\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\t\"c\n\x0fRunUpdateResult\x12&\n\x03run\x18\x01 \x01(\x0b\x32\x19.wandb_internal.RunRecord\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x19.wandb_internal.ErrorInfo\"\xac\x01\n\tErrorInfo\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x31\n\x04\x63ode\x18\x02 \x01(\x0e\x32#.wandb_internal.ErrorInfo.ErrorCode\"[\n\tErrorCode\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x11\n\rCOMMUNICATION\x10\x01\x12\x12\n\x0e\x41UTHENTICATION\x10\x02\x12\t\n\x05USAGE\x10\x03\x12\x0f\n\x0bUNSUPPORTED\x10\x04\"`\n\rRunExitRecord\x12\x11\n\texit_code\x18\x01 \x01(\x05\x12\x0f\n\x07runtime\x18\x02 \x01(\x05\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x0f\n\rRunExitResult\"B\n\x13RunPreemptingRecord\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x15\n\x13RunPreemptingResult\"i\n\x0eSettingsRecord\x12*\n\x04item\x18\x01 \x03(\x0b\x32\x1c.wandb_internal.SettingsItem\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"/\n\x0cSettingsItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nvalue_json\x18\x10 \x01(\t\"\x1a\n\x0bHistoryStep\x12\x0b\n\x03num\x18\x01 \x01(\x03\"\x92\x01\n\rHistoryRecord\x12)\n\x04item\x18\x01 \x03(\x0b\x32\x1b.wandb_internal.HistoryItem\x12)\n\x04step\x18\x02 \x01(\x0b\x32\x1b.wandb_internal.HistoryStep\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"B\n\x0bHistoryItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nnested_key\x18\x02 \x03(\t\x12\x12\n\nvalue_json\x18\x10 \x01(\t\"\x0f\n\rHistoryResult\"\xdc\x01\n\x0cOutputRecord\x12<\n\x0boutput_type\x18\x01 \x01(\x0e\x32\'.wandb_internal.OutputRecord.OutputType\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04line\x18\x03 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"$\n\nOutputType\x12\n\n\x06STDERR\x10\x00\x12\n\n\x06STDOUT\x10\x01\"\x0e\n\x0cOutputResult\"\xe2\x01\n\x0fOutputRawRecord\x12?\n\x0boutput_type\x18\x01 \x01(\x0e\x32*.wandb_internal.OutputRawRecord.OutputType\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04line\x18\x03 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"$\n\nOutputType\x12\n\n\x06STDERR\x10\x00\x12\n\n\x06STDOUT\x10\x01\"\x11\n\x0fOutputRawResult\"\x98\x03\n\x0cMetricRecord\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tglob_name\x18\x02 \x01(\t\x12\x13\n\x0bstep_metric\x18\x04 \x01(\t\x12\x19\n\x11step_metric_index\x18\x05 \x01(\x05\x12.\n\x07options\x18\x06 \x01(\x0b\x32\x1d.wandb_internal.MetricOptions\x12.\n\x07summary\x18\x07 \x01(\x0b\x32\x1d.wandb_internal.MetricSummary\x12\x35\n\x04goal\x18\x08 \x01(\x0e\x32\'.wandb_internal.MetricRecord.MetricGoal\x12/\n\x08_control\x18\t \x01(\x0b\x32\x1d.wandb_internal.MetricControl\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"B\n\nMetricGoal\x12\x0e\n\nGOAL_UNSET\x10\x00\x12\x11\n\rGOAL_MINIMIZE\x10\x01\x12\x11\n\rGOAL_MAXIMIZE\x10\x02\"\x0e\n\x0cMetricResult\"C\n\rMetricOptions\x12\x11\n\tstep_sync\x18\x01 \x01(\x08\x12\x0e\n\x06hidden\x18\x02 \x01(\x08\x12\x0f\n\x07\x64\x65\x66ined\x18\x03 \x01(\x08\"\"\n\rMetricControl\x12\x11\n\toverwrite\x18\x01 \x01(\x08\"o\n\rMetricSummary\x12\x0b\n\x03min\x18\x01 \x01(\x08\x12\x0b\n\x03max\x18\x02 \x01(\x08\x12\x0c\n\x04mean\x18\x03 \x01(\x08\x12\x0c\n\x04\x62\x65st\x18\x04 \x01(\x08\x12\x0c\n\x04last\x18\x05 \x01(\x08\x12\x0c\n\x04none\x18\x06 \x01(\x08\x12\x0c\n\x04\x63opy\x18\x07 \x01(\x08\"\x93\x01\n\x0c\x43onfigRecord\x12*\n\x06update\x18\x01 \x03(\x0b\x32\x1a.wandb_internal.ConfigItem\x12*\n\x06remove\x18\x02 \x03(\x0b\x32\x1a.wandb_internal.ConfigItem\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"A\n\nConfigItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nnested_key\x18\x02 \x03(\t\x12\x12\n\nvalue_json\x18\x10 \x01(\t\"\x0e\n\x0c\x43onfigResult\"\x96\x01\n\rSummaryRecord\x12+\n\x06update\x18\x01 \x03(\x0b\x32\x1b.wandb_internal.SummaryItem\x12+\n\x06remove\x18\x02 \x03(\x0b\x32\x1b.wandb_internal.SummaryItem\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"B\n\x0bSummaryItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nnested_key\x18\x02 \x03(\t\x12\x12\n\nvalue_json\x18\x10 \x01(\t\"\x0f\n\rSummaryResult\"d\n\x0b\x46ilesRecord\x12(\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x19.wandb_internal.FilesItem\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\xec\x01\n\tFilesItem\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x34\n\x06policy\x18\x02 \x01(\x0e\x32$.wandb_internal.FilesItem.PolicyType\x12\x30\n\x04type\x18\x03 \x01(\x0e\x32\".wandb_internal.FilesItem.FileType\"(\n\nPolicyType\x12\x07\n\x03NOW\x10\x00\x12\x07\n\x03\x45ND\x10\x01\x12\x08\n\x04LIVE\x10\x02\"9\n\x08\x46ileType\x12\t\n\x05OTHER\x10\x00\x12\t\n\x05WANDB\x10\x01\x12\t\n\x05MEDIA\x10\x02\x12\x0c\n\x08\x41RTIFACT\x10\x03J\x04\x08\x10\x10\x11\"\r\n\x0b\x46ilesResult\"\xe6\x01\n\x0bStatsRecord\x12\x39\n\nstats_type\x18\x01 \x01(\x0e\x32%.wandb_internal.StatsRecord.StatsType\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x04item\x18\x03 \x03(\x0b\x32\x19.wandb_internal.StatsItem\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x17\n\tStatsType\x12\n\n\x06SYSTEM\x10\x00\",\n\tStatsItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nvalue_json\x18\x10 \x01(\t\"\xe7\x03\n\x0e\x41rtifactRecord\x12\x0e\n\x06run_id\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x65ntity\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x0e\n\x06\x64igest\x18\x06 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t\x12\x10\n\x08metadata\x18\x08 \x01(\t\x12\x14\n\x0cuser_created\x18\t \x01(\x08\x12\x18\n\x10use_after_commit\x18\n \x01(\x08\x12\x0f\n\x07\x61liases\x18\x0b \x03(\t\x12\x32\n\x08manifest\x18\x0c \x01(\x0b\x32 .wandb_internal.ArtifactManifest\x12\x16\n\x0e\x64istributed_id\x18\r \x01(\t\x12\x10\n\x08\x66inalize\x18\x0e \x01(\x08\x12\x11\n\tclient_id\x18\x0f \x01(\t\x12\x1a\n\x12sequence_client_id\x18\x10 \x01(\t\x12\x0f\n\x07\x62\x61se_id\x18\x11 \x01(\t\x12\x1c\n\x14ttl_duration_seconds\x18\x12 \x01(\x03\x12\x0c\n\x04tags\x18\x13 \x03(\t\x12\x19\n\x11incremental_beta1\x18\x64 \x01(\x08\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\xd8\x01\n\x10\x41rtifactManifest\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x16\n\x0estorage_policy\x18\x02 \x01(\t\x12\x46\n\x15storage_policy_config\x18\x03 \x03(\x0b\x32\'.wandb_internal.StoragePolicyConfigItem\x12\x37\n\x08\x63ontents\x18\x04 \x03(\x0b\x32%.wandb_internal.ArtifactManifestEntry\x12\x1a\n\x12manifest_file_path\x18\x05 \x01(\t\"\xcf\x01\n\x15\x41rtifactManifestEntry\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0e\n\x06\x64igest\x18\x02 \x01(\t\x12\x0b\n\x03ref\x18\x03 \x01(\t\x12\x0c\n\x04size\x18\x04 \x01(\x03\x12\x10\n\x08mimetype\x18\x05 \x01(\t\x12\x12\n\nlocal_path\x18\x06 \x01(\t\x12\x19\n\x11\x62irth_artifact_id\x18\x07 \x01(\t\x12\x12\n\nskip_cache\x18\x08 \x01(\x08\x12(\n\x05\x65xtra\x18\x10 \x03(\x0b\x32\x19.wandb_internal.ExtraItem\",\n\tExtraItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nvalue_json\x18\x02 \x01(\t\":\n\x17StoragePolicyConfigItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nvalue_json\x18\x02 \x01(\t\"\x10\n\x0e\x41rtifactResult\"\x14\n\x12LinkArtifactResult\"\xf0\x01\n\x13LinkArtifactRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tserver_id\x18\x02 \x01(\t\x12\x16\n\x0eportfolio_name\x18\x03 \x01(\t\x12\x18\n\x10portfolio_entity\x18\x04 \x01(\t\x12\x19\n\x11portfolio_project\x18\x05 \x01(\t\x12\x19\n\x11portfolio_aliases\x18\x06 \x03(\t\x12\x1e\n\x16portfolio_organization\x18\x07 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"-\n\x14LinkArtifactResponse\x12\x15\n\rerror_message\x18\x01 \x01(\t\"h\n\x08TBRecord\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\x12\x0f\n\x07log_dir\x18\x01 \x01(\t\x12\x10\n\x08root_dir\x18\x03 \x01(\t\x12\x0c\n\x04save\x18\x02 \x01(\x08\"\n\n\x08TBResult\"}\n\x0b\x41lertRecord\x12\r\n\x05title\x18\x01 \x01(\t\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\r\n\x05level\x18\x03 \x01(\t\x12\x15\n\rwait_duration\x18\x04 \x01(\x03\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\r\n\x0b\x41lertResult\"\xe8\x11\n\x07Request\x12\x38\n\x0bstop_status\x18\x01 \x01(\x0b\x32!.wandb_internal.StopStatusRequestH\x00\x12>\n\x0enetwork_status\x18\x02 \x01(\x0b\x32$.wandb_internal.NetworkStatusRequestH\x00\x12-\n\x05\x64\x65\x66\x65r\x18\x03 \x01(\x0b\x32\x1c.wandb_internal.DeferRequestH\x00\x12\x38\n\x0bget_summary\x18\x04 \x01(\x0b\x32!.wandb_internal.GetSummaryRequestH\x00\x12-\n\x05login\x18\x05 \x01(\x0b\x32\x1c.wandb_internal.LoginRequestH\x00\x12-\n\x05pause\x18\x06 \x01(\x0b\x32\x1c.wandb_internal.PauseRequestH\x00\x12/\n\x06resume\x18\x07 \x01(\x0b\x32\x1d.wandb_internal.ResumeRequestH\x00\x12\x34\n\tpoll_exit\x18\x08 \x01(\x0b\x32\x1f.wandb_internal.PollExitRequestH\x00\x12@\n\x0fsampled_history\x18\t \x01(\x0b\x32%.wandb_internal.SampledHistoryRequestH\x00\x12@\n\x0fpartial_history\x18\n \x01(\x0b\x32%.wandb_internal.PartialHistoryRequestH\x00\x12\x34\n\trun_start\x18\x0b \x01(\x0b\x32\x1f.wandb_internal.RunStartRequestH\x00\x12<\n\rcheck_version\x18\x0c \x01(\x0b\x32#.wandb_internal.CheckVersionRequestH\x00\x12:\n\x0clog_artifact\x18\r \x01(\x0b\x32\".wandb_internal.LogArtifactRequestH\x00\x12\x44\n\x11\x64ownload_artifact\x18\x0e \x01(\x0b\x32\'.wandb_internal.DownloadArtifactRequestH\x00\x12\x35\n\tkeepalive\x18\x11 \x01(\x0b\x32 .wandb_internal.KeepaliveRequestH\x00\x12>\n\x0eserver_feature\x18\x12 \x01(\x0b\x32$.wandb_internal.ServerFeatureRequestH\x00\x12\x36\n\nrun_status\x18\x14 \x01(\x0b\x32 .wandb_internal.RunStatusRequestH\x00\x12/\n\x06\x63\x61ncel\x18\x15 \x01(\x0b\x32\x1d.wandb_internal.CancelRequestH\x00\x12\x33\n\x08metadata\x18\x16 \x01(\x0b\x32\x1f.wandb_internal.MetadataRequestH\x00\x12\x44\n\x11internal_messages\x18\x17 \x01(\x0b\x32\'.wandb_internal.InternalMessagesRequestH\x00\x12@\n\x0fpython_packages\x18\x18 \x01(\x0b\x32%.wandb_internal.PythonPackagesRequestH\x00\x12\x33\n\x08shutdown\x18@ \x01(\x0b\x32\x1f.wandb_internal.ShutdownRequestH\x00\x12/\n\x06\x61ttach\x18\x41 \x01(\x0b\x32\x1d.wandb_internal.AttachRequestH\x00\x12/\n\x06status\x18\x42 \x01(\x0b\x32\x1d.wandb_internal.StatusRequestH\x00\x12\x38\n\x0bserver_info\x18\x43 \x01(\x0b\x32!.wandb_internal.ServerInfoRequestH\x00\x12\x38\n\x0bsender_mark\x18\x44 \x01(\x0b\x32!.wandb_internal.SenderMarkRequestH\x00\x12\x38\n\x0bsender_read\x18\x45 \x01(\x0b\x32!.wandb_internal.SenderReadRequestH\x00\x12<\n\rstatus_report\x18\x46 \x01(\x0b\x32#.wandb_internal.StatusReportRequestH\x00\x12>\n\x0esummary_record\x18G \x01(\x0b\x32$.wandb_internal.SummaryRecordRequestH\x00\x12\x42\n\x10telemetry_record\x18H \x01(\x0b\x32&.wandb_internal.TelemetryRecordRequestH\x00\x12\x32\n\x08job_info\x18I \x01(\x0b\x32\x1e.wandb_internal.JobInfoRequestH\x00\x12\x45\n\x12get_system_metrics\x18J \x01(\x0b\x32\'.wandb_internal.GetSystemMetricsRequestH\x00\x12\x34\n\tjob_input\x18M \x01(\x0b\x32\x1f.wandb_internal.JobInputRequestH\x00\x12<\n\rlink_artifact\x18N \x01(\x0b\x32#.wandb_internal.LinkArtifactRequestH\x00\x12N\n\x17run_finish_without_exit\x18O \x01(\x0b\x32+.wandb_internal.RunFinishWithoutExitRequestH\x00\x12G\n\x13get_system_metadata\x18P \x01(\x0b\x32(.wandb_internal.GetSystemMetadataRequestH\x00\x12\x38\n\x0bsync_finish\x18Q \x01(\x0b\x32!.wandb_internal.SyncFinishRequestH\x00\x12\x39\n\x0btest_inject\x18\xe8\x07 \x01(\x0b\x32!.wandb_internal.TestInjectRequestH\x00\x42\x0e\n\x0crequest_typeJ\x04\x08K\x10LJ\x04\x08L\x10M\"\xa1\x0e\n\x08Response\x12?\n\x12keepalive_response\x18\x12 \x01(\x0b\x32!.wandb_internal.KeepaliveResponseH\x00\x12\x42\n\x14stop_status_response\x18\x13 \x01(\x0b\x32\".wandb_internal.StopStatusResponseH\x00\x12H\n\x17network_status_response\x18\x14 \x01(\x0b\x32%.wandb_internal.NetworkStatusResponseH\x00\x12\x37\n\x0elogin_response\x18\x18 \x01(\x0b\x32\x1d.wandb_internal.LoginResponseH\x00\x12\x42\n\x14get_summary_response\x18\x19 \x01(\x0b\x32\".wandb_internal.GetSummaryResponseH\x00\x12>\n\x12poll_exit_response\x18\x1a \x01(\x0b\x32 .wandb_internal.PollExitResponseH\x00\x12J\n\x18sampled_history_response\x18\x1b \x01(\x0b\x32&.wandb_internal.SampledHistoryResponseH\x00\x12>\n\x12run_start_response\x18\x1c \x01(\x0b\x32 .wandb_internal.RunStartResponseH\x00\x12\x46\n\x16\x63heck_version_response\x18\x1d \x01(\x0b\x32$.wandb_internal.CheckVersionResponseH\x00\x12\x44\n\x15log_artifact_response\x18\x1e \x01(\x0b\x32#.wandb_internal.LogArtifactResponseH\x00\x12N\n\x1a\x64ownload_artifact_response\x18\x1f \x01(\x0b\x32(.wandb_internal.DownloadArtifactResponseH\x00\x12H\n\x17server_feature_response\x18 \x01(\x0b\x32%.wandb_internal.ServerFeatureResponseH\x00\x12@\n\x13run_status_response\x18# \x01(\x0b\x32!.wandb_internal.RunStatusResponseH\x00\x12\x39\n\x0f\x63\x61ncel_response\x18$ \x01(\x0b\x32\x1e.wandb_internal.CancelResponseH\x00\x12N\n\x1ainternal_messages_response\x18% \x01(\x0b\x32(.wandb_internal.InternalMessagesResponseH\x00\x12=\n\x11shutdown_response\x18@ \x01(\x0b\x32 .wandb_internal.ShutdownResponseH\x00\x12\x39\n\x0f\x61ttach_response\x18\x41 \x01(\x0b\x32\x1e.wandb_internal.AttachResponseH\x00\x12\x39\n\x0fstatus_response\x18\x42 \x01(\x0b\x32\x1e.wandb_internal.StatusResponseH\x00\x12\x42\n\x14server_info_response\x18\x43 \x01(\x0b\x32\".wandb_internal.ServerInfoResponseH\x00\x12<\n\x11job_info_response\x18\x44 \x01(\x0b\x32\x1f.wandb_internal.JobInfoResponseH\x00\x12O\n\x1bget_system_metrics_response\x18\x45 \x01(\x0b\x32(.wandb_internal.GetSystemMetricsResponseH\x00\x12\x46\n\x16link_artifact_response\x18G \x01(\x0b\x32$.wandb_internal.LinkArtifactResponseH\x00\x12\x35\n\rsync_response\x18\x46 \x01(\x0b\x32\x1c.wandb_internal.SyncResponseH\x00\x12X\n run_finish_without_exit_response\x18H \x01(\x0b\x32,.wandb_internal.RunFinishWithoutExitResponseH\x00\x12Q\n\x1cget_system_metadata_response\x18I \x01(\x0b\x32).wandb_internal.GetSystemMetadataResponseH\x00\x12\x43\n\x14test_inject_response\x18\xe8\x07 \x01(\x0b\x32\".wandb_internal.TestInjectResponseH\x00\x42\x0f\n\rresponse_type\"\xc0\x02\n\x0c\x44\x65\x66\x65rRequest\x12\x36\n\x05state\x18\x01 \x01(\x0e\x32\'.wandb_internal.DeferRequest.DeferState\"\xf7\x01\n\nDeferState\x12\t\n\x05\x42\x45GIN\x10\x00\x12\r\n\tFLUSH_RUN\x10\x01\x12\x0f\n\x0b\x46LUSH_STATS\x10\x02\x12\x19\n\x15\x46LUSH_PARTIAL_HISTORY\x10\x03\x12\x0c\n\x08\x46LUSH_TB\x10\x04\x12\r\n\tFLUSH_SUM\x10\x05\x12\x13\n\x0f\x46LUSH_DEBOUNCER\x10\x06\x12\x10\n\x0c\x46LUSH_OUTPUT\x10\x07\x12\r\n\tFLUSH_JOB\x10\x08\x12\r\n\tFLUSH_DIR\x10\t\x12\x0c\n\x08\x46LUSH_FP\x10\n\x12\x0b\n\x07JOIN_FP\x10\x0b\x12\x0c\n\x08\x46LUSH_FS\x10\x0c\x12\x0f\n\x0b\x46LUSH_FINAL\x10\r\x12\x07\n\x03\x45ND\x10\x0e\"<\n\x0cPauseRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x0f\n\rPauseResponse\"=\n\rResumeRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x10\n\x0eResumeResponse\"M\n\x0cLoginRequest\x12\x0f\n\x07\x61pi_key\x18\x01 \x01(\t\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"&\n\rLoginResponse\x12\x15\n\ractive_entity\x18\x01 \x01(\t\"A\n\x11GetSummaryRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"?\n\x12GetSummaryResponse\x12)\n\x04item\x18\x01 \x03(\x0b\x32\x1b.wandb_internal.SummaryItem\"G\n\x17GetSystemMetricsRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"R\n\x12SystemMetricSample\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05value\x18\x02 \x01(\x02\"I\n\x13SystemMetricsBuffer\x12\x32\n\x06record\x18\x01 \x03(\x0b\x32\".wandb_internal.SystemMetricSample\"\xca\x01\n\x18GetSystemMetricsResponse\x12S\n\x0esystem_metrics\x18\x01 \x03(\x0b\x32;.wandb_internal.GetSystemMetricsResponse.SystemMetricsEntry\x1aY\n\x12SystemMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.wandb_internal.SystemMetricsBuffer:\x02\x38\x01\"H\n\x18GetSystemMetadataRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"N\n\x19GetSystemMetadataResponse\x12\x31\n\x08metadata\x18\x01 \x01(\x0b\x32\x1f.wandb_internal.MetadataRequest\"=\n\rStatusRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\")\n\x0eStatusResponse\x12\x17\n\x0frun_should_stop\x18\x01 \x01(\x08\"A\n\x11StopStatusRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"-\n\x12StopStatusResponse\x12\x17\n\x0frun_should_stop\x18\x01 \x01(\x08\"D\n\x14NetworkStatusRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"P\n\x15NetworkStatusResponse\x12\x37\n\x11network_responses\x18\x01 \x03(\x0b\x32\x1c.wandb_internal.HttpResponse\"D\n\x0cHttpResponse\x12\x18\n\x10http_status_code\x18\x01 \x01(\x05\x12\x1a\n\x12http_response_text\x18\x02 \x01(\t\"G\n\x17InternalMessagesRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"N\n\x18InternalMessagesResponse\x12\x32\n\x08messages\x18\x01 \x01(\x0b\x32 .wandb_internal.InternalMessages\"#\n\x10InternalMessages\x12\x0f\n\x07warning\x18\x01 \x03(\t\"?\n\x0fPollExitRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\xf5\x01\n\x10PollExitResponse\x12\x0c\n\x04\x64one\x18\x01 \x01(\x08\x12\x32\n\x0b\x65xit_result\x18\x02 \x01(\x0b\x32\x1d.wandb_internal.RunExitResult\x12\x35\n\x0cpusher_stats\x18\x03 \x01(\x0b\x32\x1f.wandb_internal.FilePusherStats\x12/\n\x0b\x66ile_counts\x18\x04 \x01(\x0b\x32\x1a.wandb_internal.FileCounts\x12\x37\n\x0foperation_stats\x18\x05 \x01(\x0b\x32\x1e.wandb_internal.OperationStats\"Y\n\x0eOperationStats\x12-\n\noperations\x18\x01 \x03(\x0b\x32\x19.wandb_internal.Operation\x12\x18\n\x10total_operations\x18\x02 \x01(\x03\"\x87\x01\n\tOperation\x12\x0c\n\x04\x64\x65sc\x18\x01 \x01(\t\x12\x17\n\x0fruntime_seconds\x18\x02 \x01(\x01\x12\x10\n\x08progress\x18\x03 \x01(\t\x12\x14\n\x0c\x65rror_status\x18\x04 \x01(\t\x12+\n\x08subtasks\x18\x05 \x03(\x0b\x32\x19.wandb_internal.Operation\"\x13\n\x11SenderMarkRequest\"\x13\n\x11SyncFinishRequest\"E\n\x0cSyncResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x19.wandb_internal.ErrorInfo\"?\n\x11SenderReadRequest\x12\x14\n\x0cstart_offset\x18\x01 \x01(\x03\x12\x14\n\x0c\x66inal_offset\x18\x02 \x01(\x03\"m\n\x13StatusReportRequest\x12\x12\n\nrecord_num\x18\x01 \x01(\x03\x12\x13\n\x0bsent_offset\x18\x02 \x01(\x03\x12-\n\tsync_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"F\n\x14SummaryRecordRequest\x12.\n\x07summary\x18\x01 \x01(\x0b\x32\x1d.wandb_internal.SummaryRecord\"L\n\x16TelemetryRecordRequest\x12\x32\n\ttelemetry\x18\x01 \x01(\x0b\x32\x1f.wandb_internal.TelemetryRecord\"A\n\x11ServerInfoRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"|\n\x12ServerInfoResponse\x12-\n\nlocal_info\x18\x01 \x01(\x0b\x32\x19.wandb_internal.LocalInfo\x12\x37\n\x0fserver_messages\x18\x02 \x01(\x0b\x32\x1e.wandb_internal.ServerMessages\"=\n\x0eServerMessages\x12+\n\x04item\x18\x01 \x03(\x0b\x32\x1d.wandb_internal.ServerMessage\"e\n\rServerMessage\x12\x12\n\nplain_text\x18\x01 \x01(\t\x12\x10\n\x08utf_text\x18\x02 \x01(\t\x12\x11\n\thtml_text\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\r\n\x05level\x18\x05 \x01(\x05\"c\n\nFileCounts\x12\x13\n\x0bwandb_count\x18\x01 \x01(\x05\x12\x13\n\x0bmedia_count\x18\x02 \x01(\x05\x12\x16\n\x0e\x61rtifact_count\x18\x03 \x01(\x05\x12\x13\n\x0bother_count\x18\x04 \x01(\x05\"U\n\x0f\x46ilePusherStats\x12\x16\n\x0euploaded_bytes\x18\x01 \x01(\x03\x12\x13\n\x0btotal_bytes\x18\x02 \x01(\x03\x12\x15\n\rdeduped_bytes\x18\x03 \x01(\x03\"\x1e\n\rFilesUploaded\x12\r\n\x05\x66iles\x18\x01 \x03(\t\"\xf4\x01\n\x17\x46ileTransferInfoRequest\x12\x42\n\x04type\x18\x01 \x01(\x0e\x32\x34.wandb_internal.FileTransferInfoRequest.TransferType\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x0c\n\x04size\x18\x04 \x01(\x03\x12\x11\n\tprocessed\x18\x05 \x01(\x03\x12/\n\x0b\x66ile_counts\x18\x06 \x01(\x0b\x32\x1a.wandb_internal.FileCounts\"(\n\x0cTransferType\x12\n\n\x06Upload\x10\x00\x12\x0c\n\x08\x44ownload\x10\x01\"1\n\tLocalInfo\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x13\n\x0bout_of_date\x18\x02 \x01(\x08\"?\n\x0fShutdownRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x12\n\x10ShutdownResponse\"P\n\rAttachRequest\x12\x11\n\tattach_id\x18\x14 \x01(\t\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"b\n\x0e\x41ttachResponse\x12&\n\x03run\x18\x01 \x01(\x0b\x32\x19.wandb_internal.RunRecord\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x19.wandb_internal.ErrorInfo\"\xd5\x02\n\x11TestInjectRequest\x12\x13\n\x0bhandler_exc\x18\x01 \x01(\x08\x12\x14\n\x0chandler_exit\x18\x02 \x01(\x08\x12\x15\n\rhandler_abort\x18\x03 \x01(\x08\x12\x12\n\nsender_exc\x18\x04 \x01(\x08\x12\x13\n\x0bsender_exit\x18\x05 \x01(\x08\x12\x14\n\x0csender_abort\x18\x06 \x01(\x08\x12\x0f\n\x07req_exc\x18\x07 \x01(\x08\x12\x10\n\x08req_exit\x18\x08 \x01(\x08\x12\x11\n\treq_abort\x18\t \x01(\x08\x12\x10\n\x08resp_exc\x18\n \x01(\x08\x12\x11\n\tresp_exit\x18\x0b \x01(\x08\x12\x12\n\nresp_abort\x18\x0c \x01(\x08\x12\x10\n\x08msg_drop\x18\r \x01(\x08\x12\x10\n\x08msg_hang\x18\x0e \x01(\x08\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x14\n\x12TestInjectResponse\"\x1e\n\rHistoryAction\x12\r\n\x05\x66lush\x18\x01 \x01(\x08\"\xca\x01\n\x15PartialHistoryRequest\x12)\n\x04item\x18\x01 \x03(\x0b\x32\x1b.wandb_internal.HistoryItem\x12)\n\x04step\x18\x02 \x01(\x0b\x32\x1b.wandb_internal.HistoryStep\x12-\n\x06\x61\x63tion\x18\x03 \x01(\x0b\x32\x1d.wandb_internal.HistoryAction\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x18\n\x16PartialHistoryResponse\"E\n\x15SampledHistoryRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"_\n\x12SampledHistoryItem\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\nnested_key\x18\x02 \x03(\t\x12\x14\n\x0cvalues_float\x18\x03 \x03(\x02\x12\x12\n\nvalues_int\x18\x04 \x03(\x03\"J\n\x16SampledHistoryResponse\x12\x30\n\x04item\x18\x01 \x03(\x0b\x32\".wandb_internal.SampledHistoryItem\"@\n\x10RunStatusRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"x\n\x11RunStatusResponse\x12\x18\n\x10sync_items_total\x18\x01 \x01(\x03\x12\x1a\n\x12sync_items_pending\x18\x02 \x01(\x03\x12-\n\tsync_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"g\n\x0fRunStartRequest\x12&\n\x03run\x18\x01 \x01(\x0b\x32\x19.wandb_internal.RunRecord\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x12\n\x10RunStartResponse\"K\n\x1bRunFinishWithoutExitRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x1e\n\x1cRunFinishWithoutExitResponse\"\\\n\x13\x43heckVersionRequest\x12\x17\n\x0f\x63urrent_version\x18\x01 \x01(\t\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"]\n\x14\x43heckVersionResponse\x12\x17\n\x0fupgrade_message\x18\x01 \x01(\t\x12\x14\n\x0cyank_message\x18\x02 \x01(\t\x12\x16\n\x0e\x64\x65lete_message\x18\x03 \x01(\t\">\n\x0eJobInfoRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"6\n\x0fJobInfoResponse\x12\x12\n\nsequenceId\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\"\x9f\x01\n\x12LogArtifactRequest\x12\x30\n\x08\x61rtifact\x18\x01 \x01(\x0b\x32\x1e.wandb_internal.ArtifactRecord\x12\x14\n\x0chistory_step\x18\x02 \x01(\x03\x12\x13\n\x0bstaging_dir\x18\x03 \x01(\t\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"A\n\x13LogArtifactResponse\x12\x13\n\x0b\x61rtifact_id\x18\x01 \x01(\t\x12\x15\n\rerror_message\x18\x02 \x01(\t\"\xbe\x01\n\x17\x44ownloadArtifactRequest\x12\x13\n\x0b\x61rtifact_id\x18\x01 \x01(\t\x12\x15\n\rdownload_root\x18\x02 \x01(\t\x12 \n\x18\x61llow_missing_references\x18\x04 \x01(\x08\x12\x12\n\nskip_cache\x18\x05 \x01(\x08\x12\x13\n\x0bpath_prefix\x18\x06 \x01(\t\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"1\n\x18\x44ownloadArtifactResponse\x12\x15\n\rerror_message\x18\x01 \x01(\t\"@\n\x10KeepaliveRequest\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x13\n\x11KeepaliveResponse\"q\n\x0c\x41rtifactInfo\x12\x10\n\x08\x61rtifact\x18\x01 \x01(\t\x12\x12\n\nentrypoint\x18\x02 \x03(\t\x12\x10\n\x08notebook\x18\x03 \x01(\x08\x12\x15\n\rbuild_context\x18\x04 \x01(\t\x12\x12\n\ndockerfile\x18\x05 \x01(\t\")\n\x07GitInfo\x12\x0e\n\x06remote\x18\x01 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\t\"\x87\x01\n\tGitSource\x12)\n\x08git_info\x18\x01 \x01(\x0b\x32\x17.wandb_internal.GitInfo\x12\x12\n\nentrypoint\x18\x02 \x03(\t\x12\x10\n\x08notebook\x18\x03 \x01(\x08\x12\x15\n\rbuild_context\x18\x04 \x01(\t\x12\x12\n\ndockerfile\x18\x05 \x01(\t\"\x1c\n\x0bImageSource\x12\r\n\x05image\x18\x01 \x01(\t\"\x8c\x01\n\x06Source\x12&\n\x03git\x18\x01 \x01(\x0b\x32\x19.wandb_internal.GitSource\x12.\n\x08\x61rtifact\x18\x02 \x01(\x0b\x32\x1c.wandb_internal.ArtifactInfo\x12*\n\x05image\x18\x03 \x01(\x0b\x32\x1b.wandb_internal.ImageSource\"k\n\tJobSource\x12\x10\n\x08_version\x18\x01 \x01(\t\x12\x13\n\x0bsource_type\x18\x02 \x01(\t\x12&\n\x06source\x18\x03 \x01(\x0b\x32\x16.wandb_internal.Source\x12\x0f\n\x07runtime\x18\x04 \x01(\t\"V\n\x12PartialJobArtifact\x12\x10\n\x08job_name\x18\x01 \x01(\t\x12.\n\x0bsource_info\x18\x02 \x01(\x0b\x32\x19.wandb_internal.JobSource\"\x9d\x01\n\x11UseArtifactRecord\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x33\n\x07partial\x18\x04 \x01(\x0b\x32\".wandb_internal.PartialJobArtifact\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x13\n\x11UseArtifactResult\"R\n\rCancelRequest\x12\x13\n\x0b\x63\x61ncel_slot\x18\x01 \x01(\t\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"\x10\n\x0e\x43\x61ncelResponse\"\'\n\x08\x44iskInfo\x12\r\n\x05total\x18\x01 \x01(\x04\x12\x0c\n\x04used\x18\x02 \x01(\x04\"\x1b\n\nMemoryInfo\x12\r\n\x05total\x18\x01 \x01(\x04\"/\n\x07\x43puInfo\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x15\n\rcount_logical\x18\x02 \x01(\r\"\x9a\x01\n\tAppleInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\necpu_cores\x18\x02 \x01(\r\x12\x12\n\npcpu_cores\x18\x03 \x01(\r\x12\x11\n\tgpu_cores\x18\x04 \x01(\r\x12\x11\n\tmemory_gb\x18\x05 \x01(\r\x12\x18\n\x10swap_total_bytes\x18\x06 \x01(\x04\x12\x17\n\x0fram_total_bytes\x18\x07 \x01(\x04\"]\n\rGpuNvidiaInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0cmemory_total\x18\x02 \x01(\x04\x12\x12\n\ncuda_cores\x18\x03 \x01(\r\x12\x14\n\x0c\x61rchitecture\x18\x04 \x01(\t\"\x89\x02\n\nGpuAmdInfo\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tunique_id\x18\x02 \x01(\t\x12\x15\n\rvbios_version\x18\x03 \x01(\t\x12\x19\n\x11performance_level\x18\x04 \x01(\t\x12\x15\n\rgpu_overdrive\x18\x05 \x01(\t\x12\x1c\n\x14gpu_memory_overdrive\x18\x06 \x01(\t\x12\x11\n\tmax_power\x18\x07 \x01(\t\x12\x0e\n\x06series\x18\x08 \x01(\t\x12\r\n\x05model\x18\t \x01(\t\x12\x0e\n\x06vendor\x18\n \x01(\t\x12\x0b\n\x03sku\x18\x0b \x01(\t\x12\x12\n\nsclk_range\x18\x0c \x01(\t\x12\x12\n\nmclk_range\x18\r \x01(\t\"n\n\x0cTrainiumInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06vendor\x18\x02 \x01(\t\x12\x1b\n\x13neuron_device_count\x18\x03 \x01(\r\x12#\n\x1bneuroncore_per_device_count\x18\x04 \x01(\r\"Q\n\x07TPUInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07hbm_gib\x18\x02 \x01(\r\x12\x18\n\x10\x64\x65vices_per_chip\x18\x03 \x01(\r\x12\r\n\x05\x63ount\x18\x04 \x01(\r\"\xcc\t\n\x0fMetadataRequest\x12\n\n\x02os\x18\x01 \x01(\t\x12\x0e\n\x06python\x18\x02 \x01(\t\x12=\n\x0cheartbeat_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x0bheartbeatAt\x12\x39\n\nstarted_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartedAt\x12\x0e\n\x06\x64ocker\x18\x05 \x01(\t\x12\x0c\n\x04\x63uda\x18\x06 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x07 \x03(\t\x12\r\n\x05state\x18\x08 \x01(\t\x12\x0f\n\x07program\x18\t \x01(\t\x12\x1b\n\tcode_path\x18\n \x01(\tR\x08\x63odePath\x12*\n\x03git\x18\x0b \x01(\x0b\x32\x1d.wandb_internal.GitRepoRecord\x12\r\n\x05\x65mail\x18\x0c \x01(\t\x12\x0c\n\x04root\x18\r \x01(\t\x12\x0c\n\x04host\x18\x0e \x01(\t\x12\x10\n\x08username\x18\x0f \x01(\t\x12\x12\n\nexecutable\x18\x10 \x01(\t\x12&\n\x0f\x63ode_path_local\x18\x11 \x01(\tR\rcodePathLocal\x12\r\n\x05\x63olab\x18\x12 \x01(\t\x12\x1c\n\tcpu_count\x18\x13 \x01(\rR\tcpu_count\x12,\n\x11\x63pu_count_logical\x18\x14 \x01(\rR\x11\x63pu_count_logical\x12\x15\n\x08gpu_type\x18\x15 \x01(\tR\x03gpu\x12\x1c\n\tgpu_count\x18\x16 \x01(\rR\tgpu_count\x12\x37\n\x04\x64isk\x18\x17 \x03(\x0b\x32).wandb_internal.MetadataRequest.DiskEntry\x12*\n\x06memory\x18\x18 \x01(\x0b\x32\x1a.wandb_internal.MemoryInfo\x12$\n\x03\x63pu\x18\x19 \x01(\x0b\x32\x17.wandb_internal.CpuInfo\x12(\n\x05\x61pple\x18\x1a \x01(\x0b\x32\x19.wandb_internal.AppleInfo\x12=\n\ngpu_nvidia\x18\x1b \x03(\x0b\x32\x1d.wandb_internal.GpuNvidiaInfoR\ngpu_nvidia\x12\x34\n\x07gpu_amd\x18\x1c \x03(\x0b\x32\x1a.wandb_internal.GpuAmdInfoR\x07gpu_amd\x12\x39\n\x05slurm\x18\x1d \x03(\x0b\x32*.wandb_internal.MetadataRequest.SlurmEntry\x12\x14\n\x0c\x63uda_version\x18\x1e \x01(\t\x12.\n\x08trainium\x18\x1f \x01(\x0b\x32\x1c.wandb_internal.TrainiumInfo\x12$\n\x03tpu\x18 \x01(\x0b\x32\x17.wandb_internal.TPUInfo\x12,\n\x0e_user_modified\x18\xc8\x01 \x01(\x08H\x00R\x0e_user_modified\x88\x01\x01\x1a\x45\n\tDiskEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.wandb_internal.DiskInfo:\x02\x38\x01\x1a,\n\nSlurmEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x11\n\x0fX_user_modified\"\x8d\x01\n\x15PythonPackagesRequest\x12\x44\n\x07package\x18\x01 \x03(\x0b\x32\x33.wandb_internal.PythonPackagesRequest.PythonPackage\x1a.\n\rPythonPackage\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\"\x1c\n\x0cJobInputPath\x12\x0c\n\x04path\x18\x01 \x03(\t\"\xd6\x01\n\x0eJobInputSource\x12\x44\n\nrun_config\x18\x01 \x01(\x0b\x32..wandb_internal.JobInputSource.RunConfigSourceH\x00\x12?\n\x04\x66ile\x18\x02 \x01(\x0b\x32/.wandb_internal.JobInputSource.ConfigFileSourceH\x00\x1a\x11\n\x0fRunConfigSource\x1a \n\x10\x43onfigFileSource\x12\x0c\n\x04path\x18\x01 \x01(\tB\x08\n\x06source\"\xc7\x01\n\x0fJobInputRequest\x12\x34\n\x0cinput_source\x18\x01 \x01(\x0b\x32\x1e.wandb_internal.JobInputSource\x12\x33\n\rinclude_paths\x18\x02 \x03(\x0b\x32\x1c.wandb_internal.JobInputPath\x12\x33\n\rexclude_paths\x18\x03 \x03(\x0b\x32\x1c.wandb_internal.JobInputPath\x12\x14\n\x0cinput_schema\x18\x04 \x01(\t\"t\n\x14ServerFeatureRequest\x12.\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32\x1d.wandb_internal.ServerFeature\x12,\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1c.wandb_internal._RequestInfo\"K\n\x15ServerFeatureResponse\x12\x32\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0b\x32!.wandb_internal.ServerFeatureItem\"2\n\x11ServerFeatureItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x02 \x01(\x08*G\n\rServerFeature\x12\x13\n\x0fLARGE_FILENAMES\x10\x00\x12\x11\n\rARTIFACT_TAGS\x10\x01\x12\x0e\n\nCLIENT_IDS\x10\x02\x42\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_internal_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\031core/pkg/service_go_proto' + _globals['_GETSYSTEMMETRICSRESPONSE_SYSTEMMETRICSENTRY']._loaded_options = None + _globals['_GETSYSTEMMETRICSRESPONSE_SYSTEMMETRICSENTRY']._serialized_options = b'8\001' + _globals['_METADATAREQUEST_DISKENTRY']._loaded_options = None + _globals['_METADATAREQUEST_DISKENTRY']._serialized_options = b'8\001' + _globals['_METADATAREQUEST_SLURMENTRY']._loaded_options = None + _globals['_METADATAREQUEST_SLURMENTRY']._serialized_options = b'8\001' + _globals['_SERVERFEATURE']._serialized_start=21995 + _globals['_SERVERFEATURE']._serialized_end=22066 + _globals['_RECORD']._serialized_start=180 + _globals['_RECORD']._serialized_end=1353 + _globals['_CONTROL']._serialized_start=1356 + _globals['_CONTROL']._serialized_end=1524 + _globals['_RESULT']._serialized_start=1527 + _globals['_RESULT']._serialized_end=2026 + _globals['_FINALRECORD']._serialized_start=2028 + _globals['_FINALRECORD']._serialized_end=2086 + _globals['_VERSIONINFO']._serialized_start=2088 + _globals['_VERSIONINFO']._serialized_end=2186 + _globals['_HEADERRECORD']._serialized_start=2188 + _globals['_HEADERRECORD']._serialized_end=2298 + _globals['_FOOTERRECORD']._serialized_start=2300 + _globals['_FOOTERRECORD']._serialized_end=2359 + _globals['_RUNRECORD']._serialized_start=2362 + _globals['_RUNRECORD']._serialized_end=2968 + _globals['_GITREPORECORD']._serialized_start=2970 + _globals['_GITREPORECORD']._serialized_end=3029 + _globals['_RUNUPDATERESULT']._serialized_start=3031 + _globals['_RUNUPDATERESULT']._serialized_end=3130 + _globals['_ERRORINFO']._serialized_start=3133 + _globals['_ERRORINFO']._serialized_end=3305 + _globals['_ERRORINFO_ERRORCODE']._serialized_start=3214 + _globals['_ERRORINFO_ERRORCODE']._serialized_end=3305 + _globals['_RUNEXITRECORD']._serialized_start=3307 + _globals['_RUNEXITRECORD']._serialized_end=3403 + _globals['_RUNEXITRESULT']._serialized_start=3405 + _globals['_RUNEXITRESULT']._serialized_end=3420 + _globals['_RUNPREEMPTINGRECORD']._serialized_start=3422 + _globals['_RUNPREEMPTINGRECORD']._serialized_end=3488 + _globals['_RUNPREEMPTINGRESULT']._serialized_start=3490 + _globals['_RUNPREEMPTINGRESULT']._serialized_end=3511 + _globals['_SETTINGSRECORD']._serialized_start=3513 + _globals['_SETTINGSRECORD']._serialized_end=3618 + _globals['_SETTINGSITEM']._serialized_start=3620 + _globals['_SETTINGSITEM']._serialized_end=3667 + _globals['_HISTORYSTEP']._serialized_start=3669 + _globals['_HISTORYSTEP']._serialized_end=3695 + _globals['_HISTORYRECORD']._serialized_start=3698 + _globals['_HISTORYRECORD']._serialized_end=3844 + _globals['_HISTORYITEM']._serialized_start=3846 + _globals['_HISTORYITEM']._serialized_end=3912 + _globals['_HISTORYRESULT']._serialized_start=3914 + _globals['_HISTORYRESULT']._serialized_end=3929 + _globals['_OUTPUTRECORD']._serialized_start=3932 + _globals['_OUTPUTRECORD']._serialized_end=4152 + _globals['_OUTPUTRECORD_OUTPUTTYPE']._serialized_start=4116 + _globals['_OUTPUTRECORD_OUTPUTTYPE']._serialized_end=4152 + _globals['_OUTPUTRESULT']._serialized_start=4154 + _globals['_OUTPUTRESULT']._serialized_end=4168 + _globals['_OUTPUTRAWRECORD']._serialized_start=4171 + _globals['_OUTPUTRAWRECORD']._serialized_end=4397 + _globals['_OUTPUTRAWRECORD_OUTPUTTYPE']._serialized_start=4116 + _globals['_OUTPUTRAWRECORD_OUTPUTTYPE']._serialized_end=4152 + _globals['_OUTPUTRAWRESULT']._serialized_start=4399 + _globals['_OUTPUTRAWRESULT']._serialized_end=4416 + _globals['_METRICRECORD']._serialized_start=4419 + _globals['_METRICRECORD']._serialized_end=4827 + _globals['_METRICRECORD_METRICGOAL']._serialized_start=4761 + _globals['_METRICRECORD_METRICGOAL']._serialized_end=4827 + _globals['_METRICRESULT']._serialized_start=4829 + _globals['_METRICRESULT']._serialized_end=4843 + _globals['_METRICOPTIONS']._serialized_start=4845 + _globals['_METRICOPTIONS']._serialized_end=4912 + _globals['_METRICCONTROL']._serialized_start=4914 + _globals['_METRICCONTROL']._serialized_end=4948 + _globals['_METRICSUMMARY']._serialized_start=4950 + _globals['_METRICSUMMARY']._serialized_end=5061 + _globals['_CONFIGRECORD']._serialized_start=5064 + _globals['_CONFIGRECORD']._serialized_end=5211 + _globals['_CONFIGITEM']._serialized_start=5213 + _globals['_CONFIGITEM']._serialized_end=5278 + _globals['_CONFIGRESULT']._serialized_start=5280 + _globals['_CONFIGRESULT']._serialized_end=5294 + _globals['_SUMMARYRECORD']._serialized_start=5297 + _globals['_SUMMARYRECORD']._serialized_end=5447 + _globals['_SUMMARYITEM']._serialized_start=5449 + _globals['_SUMMARYITEM']._serialized_end=5515 + _globals['_SUMMARYRESULT']._serialized_start=5517 + _globals['_SUMMARYRESULT']._serialized_end=5532 + _globals['_FILESRECORD']._serialized_start=5534 + _globals['_FILESRECORD']._serialized_end=5634 + _globals['_FILESITEM']._serialized_start=5637 + _globals['_FILESITEM']._serialized_end=5873 + _globals['_FILESITEM_POLICYTYPE']._serialized_start=5768 + _globals['_FILESITEM_POLICYTYPE']._serialized_end=5808 + _globals['_FILESITEM_FILETYPE']._serialized_start=5810 + _globals['_FILESITEM_FILETYPE']._serialized_end=5867 + _globals['_FILESRESULT']._serialized_start=5875 + _globals['_FILESRESULT']._serialized_end=5888 + _globals['_STATSRECORD']._serialized_start=5891 + _globals['_STATSRECORD']._serialized_end=6121 + _globals['_STATSRECORD_STATSTYPE']._serialized_start=6098 + _globals['_STATSRECORD_STATSTYPE']._serialized_end=6121 + _globals['_STATSITEM']._serialized_start=6123 + _globals['_STATSITEM']._serialized_end=6167 + _globals['_ARTIFACTRECORD']._serialized_start=6170 + _globals['_ARTIFACTRECORD']._serialized_end=6657 + _globals['_ARTIFACTMANIFEST']._serialized_start=6660 + _globals['_ARTIFACTMANIFEST']._serialized_end=6876 + _globals['_ARTIFACTMANIFESTENTRY']._serialized_start=6879 + _globals['_ARTIFACTMANIFESTENTRY']._serialized_end=7086 + _globals['_EXTRAITEM']._serialized_start=7088 + _globals['_EXTRAITEM']._serialized_end=7132 + _globals['_STORAGEPOLICYCONFIGITEM']._serialized_start=7134 + _globals['_STORAGEPOLICYCONFIGITEM']._serialized_end=7192 + _globals['_ARTIFACTRESULT']._serialized_start=7194 + _globals['_ARTIFACTRESULT']._serialized_end=7210 + _globals['_LINKARTIFACTRESULT']._serialized_start=7212 + _globals['_LINKARTIFACTRESULT']._serialized_end=7232 + _globals['_LINKARTIFACTREQUEST']._serialized_start=7235 + _globals['_LINKARTIFACTREQUEST']._serialized_end=7475 + _globals['_LINKARTIFACTRESPONSE']._serialized_start=7477 + _globals['_LINKARTIFACTRESPONSE']._serialized_end=7522 + _globals['_TBRECORD']._serialized_start=7524 + _globals['_TBRECORD']._serialized_end=7628 + _globals['_TBRESULT']._serialized_start=7630 + _globals['_TBRESULT']._serialized_end=7640 + _globals['_ALERTRECORD']._serialized_start=7642 + _globals['_ALERTRECORD']._serialized_end=7767 + _globals['_ALERTRESULT']._serialized_start=7769 + _globals['_ALERTRESULT']._serialized_end=7782 + _globals['_REQUEST']._serialized_start=7785 + _globals['_REQUEST']._serialized_end=10065 + _globals['_RESPONSE']._serialized_start=10068 + _globals['_RESPONSE']._serialized_end=11893 + _globals['_DEFERREQUEST']._serialized_start=11896 + _globals['_DEFERREQUEST']._serialized_end=12216 + _globals['_DEFERREQUEST_DEFERSTATE']._serialized_start=11969 + _globals['_DEFERREQUEST_DEFERSTATE']._serialized_end=12216 + _globals['_PAUSEREQUEST']._serialized_start=12218 + _globals['_PAUSEREQUEST']._serialized_end=12278 + _globals['_PAUSERESPONSE']._serialized_start=12280 + _globals['_PAUSERESPONSE']._serialized_end=12295 + _globals['_RESUMEREQUEST']._serialized_start=12297 + _globals['_RESUMEREQUEST']._serialized_end=12358 + _globals['_RESUMERESPONSE']._serialized_start=12360 + _globals['_RESUMERESPONSE']._serialized_end=12376 + _globals['_LOGINREQUEST']._serialized_start=12378 + _globals['_LOGINREQUEST']._serialized_end=12455 + _globals['_LOGINRESPONSE']._serialized_start=12457 + _globals['_LOGINRESPONSE']._serialized_end=12495 + _globals['_GETSUMMARYREQUEST']._serialized_start=12497 + _globals['_GETSUMMARYREQUEST']._serialized_end=12562 + _globals['_GETSUMMARYRESPONSE']._serialized_start=12564 + _globals['_GETSUMMARYRESPONSE']._serialized_end=12627 + _globals['_GETSYSTEMMETRICSREQUEST']._serialized_start=12629 + _globals['_GETSYSTEMMETRICSREQUEST']._serialized_end=12700 + _globals['_SYSTEMMETRICSAMPLE']._serialized_start=12702 + _globals['_SYSTEMMETRICSAMPLE']._serialized_end=12784 + _globals['_SYSTEMMETRICSBUFFER']._serialized_start=12786 + _globals['_SYSTEMMETRICSBUFFER']._serialized_end=12859 + _globals['_GETSYSTEMMETRICSRESPONSE']._serialized_start=12862 + _globals['_GETSYSTEMMETRICSRESPONSE']._serialized_end=13064 + _globals['_GETSYSTEMMETRICSRESPONSE_SYSTEMMETRICSENTRY']._serialized_start=12975 + _globals['_GETSYSTEMMETRICSRESPONSE_SYSTEMMETRICSENTRY']._serialized_end=13064 + _globals['_GETSYSTEMMETADATAREQUEST']._serialized_start=13066 + _globals['_GETSYSTEMMETADATAREQUEST']._serialized_end=13138 + _globals['_GETSYSTEMMETADATARESPONSE']._serialized_start=13140 + _globals['_GETSYSTEMMETADATARESPONSE']._serialized_end=13218 + _globals['_STATUSREQUEST']._serialized_start=13220 + _globals['_STATUSREQUEST']._serialized_end=13281 + _globals['_STATUSRESPONSE']._serialized_start=13283 + _globals['_STATUSRESPONSE']._serialized_end=13324 + _globals['_STOPSTATUSREQUEST']._serialized_start=13326 + _globals['_STOPSTATUSREQUEST']._serialized_end=13391 + _globals['_STOPSTATUSRESPONSE']._serialized_start=13393 + _globals['_STOPSTATUSRESPONSE']._serialized_end=13438 + _globals['_NETWORKSTATUSREQUEST']._serialized_start=13440 + _globals['_NETWORKSTATUSREQUEST']._serialized_end=13508 + _globals['_NETWORKSTATUSRESPONSE']._serialized_start=13510 + _globals['_NETWORKSTATUSRESPONSE']._serialized_end=13590 + _globals['_HTTPRESPONSE']._serialized_start=13592 + _globals['_HTTPRESPONSE']._serialized_end=13660 + _globals['_INTERNALMESSAGESREQUEST']._serialized_start=13662 + _globals['_INTERNALMESSAGESREQUEST']._serialized_end=13733 + _globals['_INTERNALMESSAGESRESPONSE']._serialized_start=13735 + _globals['_INTERNALMESSAGESRESPONSE']._serialized_end=13813 + _globals['_INTERNALMESSAGES']._serialized_start=13815 + _globals['_INTERNALMESSAGES']._serialized_end=13850 + _globals['_POLLEXITREQUEST']._serialized_start=13852 + _globals['_POLLEXITREQUEST']._serialized_end=13915 + _globals['_POLLEXITRESPONSE']._serialized_start=13918 + _globals['_POLLEXITRESPONSE']._serialized_end=14163 + _globals['_OPERATIONSTATS']._serialized_start=14165 + _globals['_OPERATIONSTATS']._serialized_end=14254 + _globals['_OPERATION']._serialized_start=14257 + _globals['_OPERATION']._serialized_end=14392 + _globals['_SENDERMARKREQUEST']._serialized_start=14394 + _globals['_SENDERMARKREQUEST']._serialized_end=14413 + _globals['_SYNCFINISHREQUEST']._serialized_start=14415 + _globals['_SYNCFINISHREQUEST']._serialized_end=14434 + _globals['_SYNCRESPONSE']._serialized_start=14436 + _globals['_SYNCRESPONSE']._serialized_end=14505 + _globals['_SENDERREADREQUEST']._serialized_start=14507 + _globals['_SENDERREADREQUEST']._serialized_end=14570 + _globals['_STATUSREPORTREQUEST']._serialized_start=14572 + _globals['_STATUSREPORTREQUEST']._serialized_end=14681 + _globals['_SUMMARYRECORDREQUEST']._serialized_start=14683 + _globals['_SUMMARYRECORDREQUEST']._serialized_end=14753 + _globals['_TELEMETRYRECORDREQUEST']._serialized_start=14755 + _globals['_TELEMETRYRECORDREQUEST']._serialized_end=14831 + _globals['_SERVERINFOREQUEST']._serialized_start=14833 + _globals['_SERVERINFOREQUEST']._serialized_end=14898 + _globals['_SERVERINFORESPONSE']._serialized_start=14900 + _globals['_SERVERINFORESPONSE']._serialized_end=15024 + _globals['_SERVERMESSAGES']._serialized_start=15026 + _globals['_SERVERMESSAGES']._serialized_end=15087 + _globals['_SERVERMESSAGE']._serialized_start=15089 + _globals['_SERVERMESSAGE']._serialized_end=15190 + _globals['_FILECOUNTS']._serialized_start=15192 + _globals['_FILECOUNTS']._serialized_end=15291 + _globals['_FILEPUSHERSTATS']._serialized_start=15293 + _globals['_FILEPUSHERSTATS']._serialized_end=15378 + _globals['_FILESUPLOADED']._serialized_start=15380 + _globals['_FILESUPLOADED']._serialized_end=15410 + _globals['_FILETRANSFERINFOREQUEST']._serialized_start=15413 + _globals['_FILETRANSFERINFOREQUEST']._serialized_end=15657 + _globals['_FILETRANSFERINFOREQUEST_TRANSFERTYPE']._serialized_start=15617 + _globals['_FILETRANSFERINFOREQUEST_TRANSFERTYPE']._serialized_end=15657 + _globals['_LOCALINFO']._serialized_start=15659 + _globals['_LOCALINFO']._serialized_end=15708 + _globals['_SHUTDOWNREQUEST']._serialized_start=15710 + _globals['_SHUTDOWNREQUEST']._serialized_end=15773 + _globals['_SHUTDOWNRESPONSE']._serialized_start=15775 + _globals['_SHUTDOWNRESPONSE']._serialized_end=15793 + _globals['_ATTACHREQUEST']._serialized_start=15795 + _globals['_ATTACHREQUEST']._serialized_end=15875 + _globals['_ATTACHRESPONSE']._serialized_start=15877 + _globals['_ATTACHRESPONSE']._serialized_end=15975 + _globals['_TESTINJECTREQUEST']._serialized_start=15978 + _globals['_TESTINJECTREQUEST']._serialized_end=16319 + _globals['_TESTINJECTRESPONSE']._serialized_start=16321 + _globals['_TESTINJECTRESPONSE']._serialized_end=16341 + _globals['_HISTORYACTION']._serialized_start=16343 + _globals['_HISTORYACTION']._serialized_end=16373 + _globals['_PARTIALHISTORYREQUEST']._serialized_start=16376 + _globals['_PARTIALHISTORYREQUEST']._serialized_end=16578 + _globals['_PARTIALHISTORYRESPONSE']._serialized_start=16580 + _globals['_PARTIALHISTORYRESPONSE']._serialized_end=16604 + _globals['_SAMPLEDHISTORYREQUEST']._serialized_start=16606 + _globals['_SAMPLEDHISTORYREQUEST']._serialized_end=16675 + _globals['_SAMPLEDHISTORYITEM']._serialized_start=16677 + _globals['_SAMPLEDHISTORYITEM']._serialized_end=16772 + _globals['_SAMPLEDHISTORYRESPONSE']._serialized_start=16774 + _globals['_SAMPLEDHISTORYRESPONSE']._serialized_end=16848 + _globals['_RUNSTATUSREQUEST']._serialized_start=16850 + _globals['_RUNSTATUSREQUEST']._serialized_end=16914 + _globals['_RUNSTATUSRESPONSE']._serialized_start=16916 + _globals['_RUNSTATUSRESPONSE']._serialized_end=17036 + _globals['_RUNSTARTREQUEST']._serialized_start=17038 + _globals['_RUNSTARTREQUEST']._serialized_end=17141 + _globals['_RUNSTARTRESPONSE']._serialized_start=17143 + _globals['_RUNSTARTRESPONSE']._serialized_end=17161 + _globals['_RUNFINISHWITHOUTEXITREQUEST']._serialized_start=17163 + _globals['_RUNFINISHWITHOUTEXITREQUEST']._serialized_end=17238 + _globals['_RUNFINISHWITHOUTEXITRESPONSE']._serialized_start=17240 + _globals['_RUNFINISHWITHOUTEXITRESPONSE']._serialized_end=17270 + _globals['_CHECKVERSIONREQUEST']._serialized_start=17272 + _globals['_CHECKVERSIONREQUEST']._serialized_end=17364 + _globals['_CHECKVERSIONRESPONSE']._serialized_start=17366 + _globals['_CHECKVERSIONRESPONSE']._serialized_end=17459 + _globals['_JOBINFOREQUEST']._serialized_start=17461 + _globals['_JOBINFOREQUEST']._serialized_end=17523 + _globals['_JOBINFORESPONSE']._serialized_start=17525 + _globals['_JOBINFORESPONSE']._serialized_end=17579 + _globals['_LOGARTIFACTREQUEST']._serialized_start=17582 + _globals['_LOGARTIFACTREQUEST']._serialized_end=17741 + _globals['_LOGARTIFACTRESPONSE']._serialized_start=17743 + _globals['_LOGARTIFACTRESPONSE']._serialized_end=17808 + _globals['_DOWNLOADARTIFACTREQUEST']._serialized_start=17811 + _globals['_DOWNLOADARTIFACTREQUEST']._serialized_end=18001 + _globals['_DOWNLOADARTIFACTRESPONSE']._serialized_start=18003 + _globals['_DOWNLOADARTIFACTRESPONSE']._serialized_end=18052 + _globals['_KEEPALIVEREQUEST']._serialized_start=18054 + _globals['_KEEPALIVEREQUEST']._serialized_end=18118 + _globals['_KEEPALIVERESPONSE']._serialized_start=18120 + _globals['_KEEPALIVERESPONSE']._serialized_end=18139 + _globals['_ARTIFACTINFO']._serialized_start=18141 + _globals['_ARTIFACTINFO']._serialized_end=18254 + _globals['_GITINFO']._serialized_start=18256 + _globals['_GITINFO']._serialized_end=18297 + _globals['_GITSOURCE']._serialized_start=18300 + _globals['_GITSOURCE']._serialized_end=18435 + _globals['_IMAGESOURCE']._serialized_start=18437 + _globals['_IMAGESOURCE']._serialized_end=18465 + _globals['_SOURCE']._serialized_start=18468 + _globals['_SOURCE']._serialized_end=18608 + _globals['_JOBSOURCE']._serialized_start=18610 + _globals['_JOBSOURCE']._serialized_end=18717 + _globals['_PARTIALJOBARTIFACT']._serialized_start=18719 + _globals['_PARTIALJOBARTIFACT']._serialized_end=18805 + _globals['_USEARTIFACTRECORD']._serialized_start=18808 + _globals['_USEARTIFACTRECORD']._serialized_end=18965 + _globals['_USEARTIFACTRESULT']._serialized_start=18967 + _globals['_USEARTIFACTRESULT']._serialized_end=18986 + _globals['_CANCELREQUEST']._serialized_start=18988 + _globals['_CANCELREQUEST']._serialized_end=19070 + _globals['_CANCELRESPONSE']._serialized_start=19072 + _globals['_CANCELRESPONSE']._serialized_end=19088 + _globals['_DISKINFO']._serialized_start=19090 + _globals['_DISKINFO']._serialized_end=19129 + _globals['_MEMORYINFO']._serialized_start=19131 + _globals['_MEMORYINFO']._serialized_end=19158 + _globals['_CPUINFO']._serialized_start=19160 + _globals['_CPUINFO']._serialized_end=19207 + _globals['_APPLEINFO']._serialized_start=19210 + _globals['_APPLEINFO']._serialized_end=19364 + _globals['_GPUNVIDIAINFO']._serialized_start=19366 + _globals['_GPUNVIDIAINFO']._serialized_end=19459 + _globals['_GPUAMDINFO']._serialized_start=19462 + _globals['_GPUAMDINFO']._serialized_end=19727 + _globals['_TRAINIUMINFO']._serialized_start=19729 + _globals['_TRAINIUMINFO']._serialized_end=19839 + _globals['_TPUINFO']._serialized_start=19841 + _globals['_TPUINFO']._serialized_end=19922 + _globals['_METADATAREQUEST']._serialized_start=19925 + _globals['_METADATAREQUEST']._serialized_end=21153 + _globals['_METADATAREQUEST_DISKENTRY']._serialized_start=21019 + _globals['_METADATAREQUEST_DISKENTRY']._serialized_end=21088 + _globals['_METADATAREQUEST_SLURMENTRY']._serialized_start=21090 + _globals['_METADATAREQUEST_SLURMENTRY']._serialized_end=21134 + _globals['_PYTHONPACKAGESREQUEST']._serialized_start=21156 + _globals['_PYTHONPACKAGESREQUEST']._serialized_end=21297 + _globals['_PYTHONPACKAGESREQUEST_PYTHONPACKAGE']._serialized_start=21251 + _globals['_PYTHONPACKAGESREQUEST_PYTHONPACKAGE']._serialized_end=21297 + _globals['_JOBINPUTPATH']._serialized_start=21299 + _globals['_JOBINPUTPATH']._serialized_end=21327 + _globals['_JOBINPUTSOURCE']._serialized_start=21330 + _globals['_JOBINPUTSOURCE']._serialized_end=21544 + _globals['_JOBINPUTSOURCE_RUNCONFIGSOURCE']._serialized_start=21483 + _globals['_JOBINPUTSOURCE_RUNCONFIGSOURCE']._serialized_end=21500 + _globals['_JOBINPUTSOURCE_CONFIGFILESOURCE']._serialized_start=21502 + _globals['_JOBINPUTSOURCE_CONFIGFILESOURCE']._serialized_end=21534 + _globals['_JOBINPUTREQUEST']._serialized_start=21547 + _globals['_JOBINPUTREQUEST']._serialized_end=21746 + _globals['_SERVERFEATUREREQUEST']._serialized_start=21748 + _globals['_SERVERFEATUREREQUEST']._serialized_end=21864 + _globals['_SERVERFEATURERESPONSE']._serialized_start=21866 + _globals['_SERVERFEATURERESPONSE']._serialized_end=21941 + _globals['_SERVERFEATUREITEM']._serialized_start=21943 + _globals['_SERVERFEATUREITEM']._serialized_end=21993 +# @@protoc_insertion_point(module_scope) diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_server_pb2.py b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_server_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..1a75bd945776b79fcde2524240114c92000e8917 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_server_pb2.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: wandb/proto/wandb_server.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from wandb.proto import wandb_base_pb2 as wandb_dot_proto_dot_wandb__base__pb2 +from wandb.proto import wandb_internal_pb2 as wandb_dot_proto_dot_wandb__internal__pb2 +from wandb.proto import wandb_settings_pb2 as wandb_dot_proto_dot_wandb__settings__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1ewandb/proto/wandb_server.proto\x12\x0ewandb_internal\x1a\x1cwandb/proto/wandb_base.proto\x1a wandb/proto/wandb_internal.proto\x1a wandb/proto/wandb_settings.proto\"k\n\x19ServerAuthenticateRequest\x12\x0f\n\x07\x61pi_key\x18\x01 \x01(\t\x12\x10\n\x08\x62\x61se_url\x18\x02 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"w\n\x1aServerAuthenticateResponse\x12\x16\n\x0e\x64\x65\x66\x61ult_entity\x18\x01 \x01(\t\x12\x14\n\x0c\x65rror_status\x18\x02 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"D\n\x15ServerShutdownRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x18\n\x16ServerShutdownResponse\"B\n\x13ServerStatusRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x16\n\x14ServerStatusResponse\"r\n\x17ServerInformInitRequest\x12*\n\x08settings\x18\x01 \x01(\x0b\x32\x18.wandb_internal.Settings\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1a\n\x18ServerInformInitResponse\"s\n\x18ServerInformStartRequest\x12*\n\x08settings\x18\x01 \x01(\x0b\x32\x18.wandb_internal.Settings\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1b\n\x19ServerInformStartResponse\"H\n\x19ServerInformFinishRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1c\n\x1aServerInformFinishResponse\"H\n\x19ServerInformAttachRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"u\n\x1aServerInformAttachResponse\x12*\n\x08settings\x18\x01 \x01(\x0b\x32\x18.wandb_internal.Settings\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"H\n\x19ServerInformDetachRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1c\n\x1aServerInformDetachResponse\"]\n\x1bServerInformTeardownRequest\x12\x11\n\texit_code\x18\x01 \x01(\x05\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1e\n\x1cServerInformTeardownResponse\"\xe7\x04\n\rServerRequest\x12\x30\n\x0erecord_publish\x18\x01 \x01(\x0b\x32\x16.wandb_internal.RecordH\x00\x12\x34\n\x12record_communicate\x18\x02 \x01(\x0b\x32\x16.wandb_internal.RecordH\x00\x12>\n\x0binform_init\x18\x03 \x01(\x0b\x32\'.wandb_internal.ServerInformInitRequestH\x00\x12\x42\n\rinform_finish\x18\x04 \x01(\x0b\x32).wandb_internal.ServerInformFinishRequestH\x00\x12\x42\n\rinform_attach\x18\x05 \x01(\x0b\x32).wandb_internal.ServerInformAttachRequestH\x00\x12\x42\n\rinform_detach\x18\x06 \x01(\x0b\x32).wandb_internal.ServerInformDetachRequestH\x00\x12\x46\n\x0finform_teardown\x18\x07 \x01(\x0b\x32+.wandb_internal.ServerInformTeardownRequestH\x00\x12@\n\x0cinform_start\x18\x08 \x01(\x0b\x32(.wandb_internal.ServerInformStartRequestH\x00\x12\x41\n\x0c\x61uthenticate\x18\t \x01(\x0b\x32).wandb_internal.ServerAuthenticateRequestH\x00\x42\x15\n\x13server_request_type\"\xfd\x04\n\x0eServerResponse\x12\x34\n\x12result_communicate\x18\x02 \x01(\x0b\x32\x16.wandb_internal.ResultH\x00\x12H\n\x14inform_init_response\x18\x03 \x01(\x0b\x32(.wandb_internal.ServerInformInitResponseH\x00\x12L\n\x16inform_finish_response\x18\x04 \x01(\x0b\x32*.wandb_internal.ServerInformFinishResponseH\x00\x12L\n\x16inform_attach_response\x18\x05 \x01(\x0b\x32*.wandb_internal.ServerInformAttachResponseH\x00\x12L\n\x16inform_detach_response\x18\x06 \x01(\x0b\x32*.wandb_internal.ServerInformDetachResponseH\x00\x12P\n\x18inform_teardown_response\x18\x07 \x01(\x0b\x32,.wandb_internal.ServerInformTeardownResponseH\x00\x12J\n\x15inform_start_response\x18\x08 \x01(\x0b\x32).wandb_internal.ServerInformStartResponseH\x00\x12K\n\x15\x61uthenticate_response\x18\t \x01(\x0b\x32*.wandb_internal.ServerAuthenticateResponseH\x00\x42\x16\n\x14server_response_typeB\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_server_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\031core/pkg/service_go_proto' + _globals['_SERVERAUTHENTICATEREQUEST']._serialized_start=148 + _globals['_SERVERAUTHENTICATEREQUEST']._serialized_end=255 + _globals['_SERVERAUTHENTICATERESPONSE']._serialized_start=257 + _globals['_SERVERAUTHENTICATERESPONSE']._serialized_end=376 + _globals['_SERVERSHUTDOWNREQUEST']._serialized_start=378 + _globals['_SERVERSHUTDOWNREQUEST']._serialized_end=446 + _globals['_SERVERSHUTDOWNRESPONSE']._serialized_start=448 + _globals['_SERVERSHUTDOWNRESPONSE']._serialized_end=472 + _globals['_SERVERSTATUSREQUEST']._serialized_start=474 + _globals['_SERVERSTATUSREQUEST']._serialized_end=540 + _globals['_SERVERSTATUSRESPONSE']._serialized_start=542 + _globals['_SERVERSTATUSRESPONSE']._serialized_end=564 + _globals['_SERVERINFORMINITREQUEST']._serialized_start=566 + _globals['_SERVERINFORMINITREQUEST']._serialized_end=680 + _globals['_SERVERINFORMINITRESPONSE']._serialized_start=682 + _globals['_SERVERINFORMINITRESPONSE']._serialized_end=708 + _globals['_SERVERINFORMSTARTREQUEST']._serialized_start=710 + _globals['_SERVERINFORMSTARTREQUEST']._serialized_end=825 + _globals['_SERVERINFORMSTARTRESPONSE']._serialized_start=827 + _globals['_SERVERINFORMSTARTRESPONSE']._serialized_end=854 + _globals['_SERVERINFORMFINISHREQUEST']._serialized_start=856 + _globals['_SERVERINFORMFINISHREQUEST']._serialized_end=928 + _globals['_SERVERINFORMFINISHRESPONSE']._serialized_start=930 + _globals['_SERVERINFORMFINISHRESPONSE']._serialized_end=958 + _globals['_SERVERINFORMATTACHREQUEST']._serialized_start=960 + _globals['_SERVERINFORMATTACHREQUEST']._serialized_end=1032 + _globals['_SERVERINFORMATTACHRESPONSE']._serialized_start=1034 + _globals['_SERVERINFORMATTACHRESPONSE']._serialized_end=1151 + _globals['_SERVERINFORMDETACHREQUEST']._serialized_start=1153 + _globals['_SERVERINFORMDETACHREQUEST']._serialized_end=1225 + _globals['_SERVERINFORMDETACHRESPONSE']._serialized_start=1227 + _globals['_SERVERINFORMDETACHRESPONSE']._serialized_end=1255 + _globals['_SERVERINFORMTEARDOWNREQUEST']._serialized_start=1257 + _globals['_SERVERINFORMTEARDOWNREQUEST']._serialized_end=1350 + _globals['_SERVERINFORMTEARDOWNRESPONSE']._serialized_start=1352 + _globals['_SERVERINFORMTEARDOWNRESPONSE']._serialized_end=1382 + _globals['_SERVERREQUEST']._serialized_start=1385 + _globals['_SERVERREQUEST']._serialized_end=2000 + _globals['_SERVERRESPONSE']._serialized_start=2003 + _globals['_SERVERRESPONSE']._serialized_end=2640 +# @@protoc_insertion_point(module_scope) diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_settings_pb2.py b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_settings_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..6247e18b0af0cdba46172b97d1d72f695eb6baef --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_settings_pb2.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: wandb/proto/wandb_settings.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n wandb/proto/wandb_settings.proto\x12\x0ewandb_internal\x1a\x1egoogle/protobuf/wrappers.proto\" \n\x0fListStringValue\x12\r\n\x05value\x18\x01 \x03(\t\"\x1d\n\x0cListIntValue\x12\r\n\x05value\x18\x01 \x03(\x05\"\x8a\x01\n\x17MapStringKeyStringValue\x12\x41\n\x05value\x18\x01 \x03(\x0b\x32\x32.wandb_internal.MapStringKeyStringValue.ValueEntry\x1a,\n\nValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcb\x01\n#MapStringKeyMapStringKeyStringValue\x12M\n\x05value\x18\x01 \x03(\x0b\x32>.wandb_internal.MapStringKeyMapStringKeyStringValue.ValueEntry\x1aU\n\nValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue:\x02\x38\x01\"\x9a\x01\n\x12OpenMetricsFilters\x12\x33\n\x08sequence\x18\x01 \x01(\x0b\x32\x1f.wandb_internal.ListStringValueH\x00\x12\x46\n\x07mapping\x18\x02 \x01(\x0b\x32\x33.wandb_internal.MapStringKeyMapStringKeyStringValueH\x00\x42\x07\n\x05value\"7\n\tRunMoment\x12\x0b\n\x03run\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x0e\n\x06metric\x18\x03 \x01(\t\"\x96J\n\x08Settings\x12-\n\x07\x61pi_key\x18\x37 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\x13identity_token_file\x18\xaa\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x10\x63redentials_file\x18\xab\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x14insecure_disable_ssl\x18\xb9\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x08_offline\x18\x1e \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06x_sync\x18\x1f \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\tsync_file\x18\x86\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x07_shared\x18\xa2\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x06run_id\x18k \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07run_url\x18q \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07project\x18\x61 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x65ntity\x18\x45 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cx_start_time\x18) \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08root_dir\x18i \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07log_dir\x18U \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0clog_internal\x18V \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tfiles_dir\x18\x46 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0bx_files_dir\x18\xb4\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0cignore_globs\x18N \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12.\n\x08\x62\x61se_url\x18\x39 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12=\n\x17x_file_stream_max_bytes\x18\xac\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x46\n\x1fx_file_stream_transmit_interval\x18\xaf\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x45\n\x14x_extra_http_headers\x18\x0e \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12=\n\x17x_file_stream_retry_max\x18\x93\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12K\n$x_file_stream_retry_wait_min_seconds\x18\x94\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12K\n$x_file_stream_retry_wait_max_seconds\x18\x95\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x43\n\x1dx_file_stream_timeout_seconds\x18\x0f \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x42\n\x1cx_file_stream_max_line_bytes\x18\xb2\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12?\n\x19x_file_transfer_retry_max\x18\x96\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12M\n&x_file_transfer_retry_wait_min_seconds\x18\x97\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12M\n&x_file_transfer_retry_wait_max_seconds\x18\x98\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x46\n\x1fx_file_transfer_timeout_seconds\x18\x99\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x39\n\x13x_graphql_retry_max\x18\x9a\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12G\n x_graphql_retry_wait_min_seconds\x18\x9b\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12G\n x_graphql_retry_wait_max_seconds\x18\x9c\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12@\n\x19x_graphql_timeout_seconds\x18\x9d\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x31\n\nhttp_proxy\x18\xa8\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0bhttps_proxy\x18\xa9\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\tx_proxies\x18\xc8\x01 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12-\n\x07program\x18_ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fprogram_relpath\x18` \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x10_code_path_local\x18\xa3\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x0fprogram_abspath\x18\x9f\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x05_args\x18\x01 \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12)\n\x03_os\x18 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x64ocker\x18\x43 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cx_executable\x18\r \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07_python\x18\" \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\tcolab_url\x18\xa0\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04host\x18M \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x08username\x18\x8d\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05\x65mail\x18\x44 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06resume\x18\x66 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x0bresume_from\x18\xa7\x01 \x01(\x0b\x32\x19.wandb_internal.RunMoment\x12-\n\tfork_from\x18\xa4\x01 \x01(\x0b\x32\x19.wandb_internal.RunMoment\x12\x38\n\x14\x64isable_job_creation\x18\x41 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\tsweep_url\x18\x83\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x16x_disable_update_check\x18\xa5\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0ex_disable_meta\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\tsave_code\x18s \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x0b\x64isable_git\x18? \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12;\n\x16x_disable_machine_info\x18\x9e\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0fx_disable_stats\x18\n \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x13x_stats_buffer_size\x18\xa1\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12@\n\x19x_stats_sampling_interval\x18\xae\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x30\n\x0bx_stats_pid\x18* \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12<\n\x12x_stats_disk_paths\x18\x92\x01 \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12H\n\"x_stats_neuron_monitor_config_path\x18. \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12O\n\x1ex_stats_open_metrics_endpoints\x18/ \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12H\n\x1cx_stats_open_metrics_filters\x18\x30 \x01(\x0b\x32\".wandb_internal.OpenMetricsFilters\x12S\n!x_stats_open_metrics_http_headers\x18\xb8\x01 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12=\n\x16x_stats_gpu_device_ids\x18\xba\x01 \x01(\x0b\x32\x1c.wandb_internal.ListIntValue\x12.\n\x07x_label\x18\xb5\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12=\n\x18x_require_legacy_service\x18\xad\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12;\n\x16x_show_operation_stats\x18\xb0\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0ex_primary_node\x18\xb6\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12:\n\x15x_update_finish_state\x18\xb7\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12<\n\x17\x61llow_offline_artifacts\x18\xb1\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\x07\x63onsole\x18< \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x11\x63onsole_multipart\x18\xa6\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x10sync_tensorboard\x18\xb3\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x0b_aws_lambda\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0fx_cli_only_mode\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06_colab\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x11x_disable_service\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12:\n\x16x_disable_setproctitle\x18\t \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x10x_disable_viewer\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x15x_flow_control_custom\x18\x10 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12;\n\x17x_flow_control_disabled\x18\x11 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12>\n\x18x_internal_check_process\x18\x12 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x08_ipython\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x08_jupyter\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x0ex_jupyter_root\x18\x16 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x07_kaggle\x18\x17 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12=\n\x18x_live_policy_rate_limit\x18\x18 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12<\n\x17x_live_policy_wait_time\x18\x19 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x30\n\x0bx_log_level\x18\x1a \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x35\n\x10x_network_buffer\x18\x1b \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12)\n\x05_noop\x18\x1c \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\t_notebook\x18\x1d \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\t_platform\x18! \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\x12x_runqueue_item_id\x18# \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x13x_save_requirements\x18% \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x13x_service_transport\x18& \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x0ex_service_wait\x18\' \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x35\n\x0f_start_datetime\x18( \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\r_tmp_code_dir\x18\x31 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x08_windows\x18\x34 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x10\x61llow_val_change\x18\x35 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\tanonymous\x18\x36 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12P\n\x1f\x61zure_account_url_to_access_key\x18\x38 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12.\n\x08\x63ode_dir\x18: \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0c\x63onfig_paths\x18; \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12\x30\n\ndeployment\x18= \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0c\x64isable_code\x18> \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x31\n\rdisable_hints\x18@ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x08\x64isabled\x18\x42 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12)\n\x05\x66orce\x18G \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\ngit_commit\x18H \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\ngit_remote\x18I \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x0egit_remote_url\x18J \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08git_root\x18K \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x11heartbeat_seconds\x18L \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x32\n\x0cinit_timeout\x18O \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x08is_local\x18P \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\njob_source\x18Q \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rlabel_disable\x18R \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06launch\x18S \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x12launch_config_path\x18T \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\x14log_symlink_internal\x18W \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10log_symlink_user\x18X \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08log_user\x18Y \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rlogin_timeout\x18Z \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12*\n\x04mode\x18\\ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rnotebook_name\x18] \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0bproject_url\x18\x62 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x05quiet\x18\x63 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06reinit\x18\x64 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12+\n\x07relogin\x18\x65 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0cresume_fname\x18g \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x07resumed\x18h \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\trun_group\x18j \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0crun_job_type\x18l \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08run_mode\x18m \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08run_name\x18n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\trun_notes\x18o \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x08run_tags\x18p \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12\x35\n\x11sagemaker_disable\x18r \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x0fsettings_system\x18t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\x12settings_workspace\x18u \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x0bshow_colors\x18v \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12.\n\nshow_emoji\x18w \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x0bshow_errors\x18x \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\tshow_info\x18y \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x31\n\rshow_warnings\x18z \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06silent\x18{ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0cstart_method\x18| \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x06strict\x18} \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0esummary_errors\x18~ \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x34\n\x0fsummary_timeout\x18\x7f \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x36\n\x10summary_warnings\x18\x80\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12/\n\x08sweep_id\x18\x81\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x10sweep_param_path\x18\x82\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x07symlink\x18\x84\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x08sync_dir\x18\x85\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\x13sync_symlink_latest\x18\x87\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12J\n%table_raise_on_max_row_limit_exceeded\x18\x8a\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x08timespec\x18\x8b\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x07tmp_dir\x18\x8c\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\twandb_dir\x18\x8e\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0ex_jupyter_name\x18\x8f\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0ex_jupyter_path\x18\x90\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x08job_name\x18\x91\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValueJ\x04\x08\x03\x10\x04J\x04\x08\x06\x10\x07J\x04\x08\x0c\x10\rJ\x04\x08\x13\x10\x14J\x04\x08$\x10%J\x04\x08+\x10,J\x04\x08,\x10-J\x04\x08-\x10.J\x04\x08\x32\x10\x33J\x04\x08\x33\x10\x34J\x04\x08[\x10\\J\x04\x08^\x10_J\x06\x08\x88\x01\x10\x89\x01J\x06\x08\x89\x01\x10\x8a\x01\x42\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_settings_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\031core/pkg/service_go_proto' + _globals['_MAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._loaded_options = None + _globals['_MAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._serialized_options = b'8\001' + _globals['_MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._loaded_options = None + _globals['_MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._serialized_options = b'8\001' + _globals['_LISTSTRINGVALUE']._serialized_start=84 + _globals['_LISTSTRINGVALUE']._serialized_end=116 + _globals['_LISTINTVALUE']._serialized_start=118 + _globals['_LISTINTVALUE']._serialized_end=147 + _globals['_MAPSTRINGKEYSTRINGVALUE']._serialized_start=150 + _globals['_MAPSTRINGKEYSTRINGVALUE']._serialized_end=288 + _globals['_MAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._serialized_start=244 + _globals['_MAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._serialized_end=288 + _globals['_MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE']._serialized_start=291 + _globals['_MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE']._serialized_end=494 + _globals['_MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._serialized_start=409 + _globals['_MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY']._serialized_end=494 + _globals['_OPENMETRICSFILTERS']._serialized_start=497 + _globals['_OPENMETRICSFILTERS']._serialized_end=651 + _globals['_RUNMOMENT']._serialized_start=653 + _globals['_RUNMOMENT']._serialized_end=708 + _globals['_SETTINGS']._serialized_start=711 + _globals['_SETTINGS']._serialized_end=10205 +# @@protoc_insertion_point(module_scope) diff --git a/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_telemetry_pb2.py b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_telemetry_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..e03573a2291154a3606885ccfdd07a92eb0c6b57 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wandb/proto/v5/wandb_telemetry_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: wandb/proto/wandb_telemetry.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from wandb.proto import wandb_base_pb2 as wandb_dot_proto_dot_wandb__base__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!wandb/proto/wandb_telemetry.proto\x12\x0ewandb_internal\x1a\x1cwandb/proto/wandb_base.proto\"\xdb\x03\n\x0fTelemetryRecord\x12-\n\x0cimports_init\x18\x01 \x01(\x0b\x32\x17.wandb_internal.Imports\x12/\n\x0eimports_finish\x18\x02 \x01(\x0b\x32\x17.wandb_internal.Imports\x12(\n\x07\x66\x65\x61ture\x18\x03 \x01(\x0b\x32\x17.wandb_internal.Feature\x12\x16\n\x0epython_version\x18\x04 \x01(\t\x12\x13\n\x0b\x63li_version\x18\x05 \x01(\t\x12\x1b\n\x13huggingface_version\x18\x06 \x01(\t\x12 \n\x03\x65nv\x18\x08 \x01(\x0b\x32\x13.wandb_internal.Env\x12%\n\x05label\x18\t \x01(\x0b\x32\x16.wandb_internal.Labels\x12.\n\ndeprecated\x18\n \x01(\x0b\x32\x1a.wandb_internal.Deprecated\x12&\n\x06issues\x18\x0b \x01(\x0b\x32\x16.wandb_internal.Issues\x12\x14\n\x0c\x63ore_version\x18\x0c \x01(\t\x12\x10\n\x08platform\x18\r \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x11\n\x0fTelemetryResult\"\xf5\r\n\x07Imports\x12\r\n\x05torch\x18\x01 \x01(\x08\x12\r\n\x05keras\x18\x02 \x01(\x08\x12\x12\n\ntensorflow\x18\x03 \x01(\x08\x12\x0e\n\x06\x66\x61stai\x18\x04 \x01(\x08\x12\x0f\n\x07sklearn\x18\x05 \x01(\x08\x12\x0f\n\x07xgboost\x18\x06 \x01(\x08\x12\x10\n\x08\x63\x61tboost\x18\x07 \x01(\x08\x12\x10\n\x08lightgbm\x18\x08 \x01(\x08\x12\x19\n\x11pytorch_lightning\x18\t \x01(\x08\x12\x0e\n\x06ignite\x18\n \x01(\x08\x12\x14\n\x0ctransformers\x18\x0b \x01(\x08\x12\x0b\n\x03jax\x18\x0c \x01(\x08\x12\x10\n\x08metaflow\x18\r \x01(\x08\x12\x10\n\x08\x61llennlp\x18\x0e \x01(\x08\x12\x11\n\tautogluon\x18\x0f \x01(\x08\x12\x11\n\tautokeras\x18\x10 \x01(\x08\x12\x10\n\x08\x63\x61talyst\x18\x12 \x01(\x08\x12\x10\n\x08\x64\x65\x65pchem\x18\x15 \x01(\x08\x12\x0f\n\x07\x64\x65\x65pctr\x18\x16 \x01(\x08\x12\x0f\n\x07pycaret\x18\x1c \x01(\x08\x12\x14\n\x0cpytorchvideo\x18\x1d \x01(\x08\x12\x0b\n\x03ray\x18\x1e \x01(\x08\x12\x1a\n\x12simpletransformers\x18\x1f \x01(\x08\x12\x0e\n\x06skorch\x18 \x01(\x08\x12\r\n\x05spacy\x18! \x01(\x08\x12\r\n\x05\x66lash\x18\" \x01(\x08\x12\x0e\n\x06optuna\x18# \x01(\x08\x12\x0f\n\x07recbole\x18$ \x01(\x08\x12\x0c\n\x04mmcv\x18% \x01(\x08\x12\r\n\x05mmdet\x18& \x01(\x08\x12\x11\n\ttorchdrug\x18\' \x01(\x08\x12\x11\n\ttorchtext\x18( \x01(\x08\x12\x13\n\x0btorchvision\x18) \x01(\x08\x12\r\n\x05\x65legy\x18* \x01(\x08\x12\x12\n\ndetectron2\x18+ \x01(\x08\x12\r\n\x05\x66lair\x18, \x01(\x08\x12\x0c\n\x04\x66lax\x18- \x01(\x08\x12\x0c\n\x04syft\x18. \x01(\x08\x12\x0b\n\x03TTS\x18/ \x01(\x08\x12\r\n\x05monai\x18\x30 \x01(\x08\x12\x17\n\x0fhuggingface_hub\x18\x31 \x01(\x08\x12\r\n\x05hydra\x18\x32 \x01(\x08\x12\x10\n\x08\x64\x61tasets\x18\x33 \x01(\x08\x12\x0e\n\x06sacred\x18\x34 \x01(\x08\x12\x0e\n\x06joblib\x18\x35 \x01(\x08\x12\x0c\n\x04\x64\x61sk\x18\x36 \x01(\x08\x12\x0f\n\x07\x61syncio\x18\x37 \x01(\x08\x12\x11\n\tpaddleocr\x18\x38 \x01(\x08\x12\r\n\x05ppdet\x18\x39 \x01(\x08\x12\x11\n\tpaddleseg\x18: \x01(\x08\x12\x11\n\tpaddlenlp\x18; \x01(\x08\x12\r\n\x05mmseg\x18< \x01(\x08\x12\r\n\x05mmocr\x18= \x01(\x08\x12\r\n\x05mmcls\x18> \x01(\x08\x12\x0c\n\x04timm\x18? \x01(\x08\x12\x0f\n\x07\x66\x61irseq\x18@ \x01(\x08\x12\x12\n\ndeepchecks\x18\x41 \x01(\x08\x12\x10\n\x08\x63omposer\x18\x42 \x01(\x08\x12\x10\n\x08sparseml\x18\x43 \x01(\x08\x12\x10\n\x08\x61nomalib\x18\x44 \x01(\x08\x12\r\n\x05zenml\x18\x45 \x01(\x08\x12\x12\n\ncolossalai\x18\x46 \x01(\x08\x12\x12\n\naccelerate\x18G \x01(\x08\x12\x0e\n\x06merlin\x18H \x01(\x08\x12\x0f\n\x07nanodet\x18I \x01(\x08\x12#\n\x1bsegmentation_models_pytorch\x18J \x01(\x08\x12\x1d\n\x15sentence_transformers\x18K \x01(\x08\x12\x0b\n\x03\x64gl\x18L \x01(\x08\x12\x17\n\x0ftorch_geometric\x18M \x01(\x08\x12\x0c\n\x04jina\x18N \x01(\x08\x12\x0e\n\x06kornia\x18O \x01(\x08\x12\x16\n\x0e\x61lbumentations\x18P \x01(\x08\x12\x10\n\x08keras_cv\x18Q \x01(\x08\x12\x10\n\x08mmengine\x18R \x01(\x08\x12\x11\n\tdiffusers\x18S \x01(\x08\x12\x0b\n\x03trl\x18T \x01(\x08\x12\x0c\n\x04trlx\x18U \x01(\x08\x12\x11\n\tlangchain\x18V \x01(\x08\x12\x13\n\x0bllama_index\x18W \x01(\x08\x12\x15\n\rstability_sdk\x18X \x01(\x08\x12\x0f\n\x07prefect\x18Y \x01(\x08\x12\x13\n\x0bprefect_ray\x18Z \x01(\x08\x12\x10\n\x08pinecone\x18[ \x01(\x08\x12\x10\n\x08\x63hromadb\x18\\ \x01(\x08\x12\x10\n\x08weaviate\x18] \x01(\x08\x12\x13\n\x0bpromptlayer\x18^ \x01(\x08\x12\x0e\n\x06openai\x18_ \x01(\x08\x12\x0e\n\x06\x63ohere\x18` \x01(\x08\x12\x11\n\tanthropic\x18\x61 \x01(\x08\x12\x0c\n\x04peft\x18\x62 \x01(\x08\x12\x0f\n\x07optimum\x18\x63 \x01(\x08\x12\x10\n\x08\x65valuate\x18\x64 \x01(\x08\x12\x10\n\x08langflow\x18\x65 \x01(\x08\x12\x12\n\nkeras_core\x18\x66 \x01(\x08\x12\x18\n\x10lightning_fabric\x18g \x01(\x08\x12\x1c\n\x14\x63urated_transformers\x18h \x01(\x08\x12\x0e\n\x06orjson\x18i \x01(\x08\x12\x11\n\tlightning\x18j \x01(\x08\"\xb2\x0c\n\x07\x46\x65\x61ture\x12\r\n\x05watch\x18\x01 \x01(\x08\x12\x0e\n\x06\x66inish\x18\x02 \x01(\x08\x12\x0c\n\x04save\x18\x03 \x01(\x08\x12\x0f\n\x07offline\x18\x04 \x01(\x08\x12\x0f\n\x07resumed\x18\x05 \x01(\x08\x12\x0c\n\x04grpc\x18\x06 \x01(\x08\x12\x0e\n\x06metric\x18\x07 \x01(\x08\x12\r\n\x05keras\x18\x08 \x01(\x08\x12\x11\n\tsagemaker\x18\t \x01(\x08\x12\x1c\n\x14\x61rtifact_incremental\x18\n \x01(\x08\x12\x10\n\x08metaflow\x18\x0b \x01(\x08\x12\x0f\n\x07prodigy\x18\x0c \x01(\x08\x12\x15\n\rset_init_name\x18\r \x01(\x08\x12\x13\n\x0bset_init_id\x18\x0e \x01(\x08\x12\x15\n\rset_init_tags\x18\x0f \x01(\x08\x12\x17\n\x0fset_init_config\x18\x10 \x01(\x08\x12\x14\n\x0cset_run_name\x18\x11 \x01(\x08\x12\x14\n\x0cset_run_tags\x18\x12 \x01(\x08\x12\x17\n\x0fset_config_item\x18\x13 \x01(\x08\x12\x0e\n\x06launch\x18\x14 \x01(\x08\x12\x1c\n\x14torch_profiler_trace\x18\x15 \x01(\x08\x12\x0b\n\x03sb3\x18\x16 \x01(\x08\x12\x0f\n\x07service\x18\x17 \x01(\x08\x12\x17\n\x0finit_return_run\x18\x18 \x01(\x08\x12\x1f\n\x17lightgbm_wandb_callback\x18\x19 \x01(\x08\x12\x1c\n\x14lightgbm_log_summary\x18\x1a \x01(\x08\x12\x1f\n\x17\x63\x61tboost_wandb_callback\x18\x1b \x01(\x08\x12\x1c\n\x14\x63\x61tboost_log_summary\x18\x1c \x01(\x08\x12\x17\n\x0ftensorboard_log\x18\x1d \x01(\x08\x12\x16\n\x0e\x65stimator_hook\x18\x1e \x01(\x08\x12\x1e\n\x16xgboost_wandb_callback\x18\x1f \x01(\x08\x12\"\n\x1axgboost_old_wandb_callback\x18 \x01(\x08\x12\x0e\n\x06\x61ttach\x18! \x01(\x08\x12\x19\n\x11tensorboard_patch\x18\" \x01(\x08\x12\x18\n\x10tensorboard_sync\x18# \x01(\x08\x12\x15\n\rkfp_wandb_log\x18$ \x01(\x08\x12\x1b\n\x13maybe_run_overwrite\x18% \x01(\x08\x12\x1c\n\x14keras_metrics_logger\x18& \x01(\x08\x12\x1e\n\x16keras_model_checkpoint\x18\' \x01(\x08\x12!\n\x19keras_wandb_eval_callback\x18( \x01(\x08\x12\x1d\n\x15\x66low_control_overflow\x18) \x01(\x08\x12\x0c\n\x04sync\x18* \x01(\x08\x12\x1d\n\x15\x66low_control_disabled\x18+ \x01(\x08\x12\x1b\n\x13\x66low_control_custom\x18, \x01(\x08\x12\x18\n\x10service_disabled\x18- \x01(\x08\x12\x14\n\x0copen_metrics\x18. \x01(\x08\x12\x1a\n\x12ultralytics_yolov8\x18/ \x01(\x08\x12\x17\n\x0fimporter_mlflow\x18\x30 \x01(\x08\x12\x15\n\rsync_tfevents\x18\x31 \x01(\x08\x12\x15\n\rasync_uploads\x18\x32 \x01(\x08\x12\x16\n\x0eopenai_autolog\x18\x33 \x01(\x08\x12\x18\n\x10langchain_tracer\x18\x34 \x01(\x08\x12\x16\n\x0e\x63ohere_autolog\x18\x35 \x01(\x08\x12\x1b\n\x13hf_pipeline_autolog\x18\x36 \x01(\x08\x12\x0c\n\x04\x63ore\x18\x37 \x01(\x08\x12\r\n\x05lib_c\x18\x38 \x01(\x08\x12\x0f\n\x07lib_cpp\x18\x39 \x01(\x08\x12\x19\n\x11openai_finetuning\x18: \x01(\x08\x12\x19\n\x11\x64iffusers_autolog\x18; \x01(\x08\x12\x1f\n\x17lightning_fabric_logger\x18< \x01(\x08\x12\x14\n\x0cset_step_log\x18= \x01(\x08\x12\x13\n\x0bset_summary\x18> \x01(\x08\x12\x16\n\x0emetric_summary\x18? \x01(\x08\x12\x13\n\x0bmetric_goal\x18@ \x01(\x08\x12\x15\n\rmetric_hidden\x18\x41 \x01(\x08\x12\x18\n\x10metric_step_sync\x18\x42 \x01(\x08\x12\x13\n\x0bshared_mode\x18\x43 \x01(\x08\"\x96\x02\n\x03\x45nv\x12\x0f\n\x07jupyter\x18\x01 \x01(\x08\x12\x0e\n\x06kaggle\x18\x02 \x01(\x08\x12\x0f\n\x07windows\x18\x03 \x01(\x08\x12\x0e\n\x06m1_gpu\x18\x04 \x01(\x08\x12\x13\n\x0bstart_spawn\x18\x05 \x01(\x08\x12\x12\n\nstart_fork\x18\x06 \x01(\x08\x12\x18\n\x10start_forkserver\x18\x07 \x01(\x08\x12\x14\n\x0cstart_thread\x18\x08 \x01(\x08\x12\x10\n\x08maybe_mp\x18\t \x01(\x08\x12\x10\n\x08trainium\x18\n \x01(\x08\x12\x0b\n\x03pex\x18\x0b \x01(\x08\x12\r\n\x05\x63olab\x18\x0c \x01(\x08\x12\x0f\n\x07ipython\x18\r \x01(\x08\x12\x12\n\naws_lambda\x18\x0e \x01(\x08\x12\x0f\n\x07\x61md_gpu\x18\x0f \x01(\x08\"H\n\x06Labels\x12\x13\n\x0b\x63ode_string\x18\x01 \x01(\t\x12\x13\n\x0brepo_string\x18\x02 \x01(\t\x12\x14\n\x0c\x63ode_version\x18\x03 \x01(\t\"\xb5\x04\n\nDeprecated\x12!\n\x19keras_callback__data_type\x18\x01 \x01(\x08\x12\x11\n\trun__mode\x18\x02 \x01(\x08\x12\x19\n\x11run__save_no_args\x18\x03 \x01(\x08\x12\x11\n\trun__join\x18\x04 \x01(\x08\x12\r\n\x05plots\x18\x05 \x01(\x08\x12\x15\n\rrun__log_sync\x18\x06 \x01(\x08\x12!\n\x19init__config_include_keys\x18\x07 \x01(\x08\x12!\n\x19init__config_exclude_keys\x18\x08 \x01(\x08\x12\"\n\x1akeras_callback__save_model\x18\t \x01(\x08\x12\x18\n\x10langchain_tracer\x18\n \x01(\x08\x12\x1a\n\x12\x61rtifact__get_path\x18\x0b \x01(\x08\x12#\n\x1b\x61rtifactmanifestentry__name\x18\x0c \x01(\x08\x12\x1e\n\x16\x61pi__artifact_versions\x18\r \x01(\x08\x12(\n artifact_collection__change_type\x18\x0e \x01(\x08\x12\x1f\n\x17run__define_metric_copy\x18\x0f \x01(\x08\x12\x14\n\x0crun_disabled\x18\x10 \x01(\x08\x12\x16\n\x0ekeras_callback\x18\x11 \x01(\x08\x12$\n\x1crun__define_metric_best_goal\x18\x12 \x01(\x08\x12\x19\n\x11run__finish_quiet\x18\x13 \x01(\x08\"|\n\x06Issues\x12%\n\x1dsettings__validation_warnings\x18\x01 \x01(\x08\x12!\n\x19settings__unexpected_args\x18\x02 \x01(\x08\x12(\n settings__preprocessing_warnings\x18\x03 \x01(\x08\x42\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_telemetry_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\031core/pkg/service_go_proto' + _globals['_TELEMETRYRECORD']._serialized_start=84 + _globals['_TELEMETRYRECORD']._serialized_end=559 + _globals['_TELEMETRYRESULT']._serialized_start=561 + _globals['_TELEMETRYRESULT']._serialized_end=578 + _globals['_IMPORTS']._serialized_start=581 + _globals['_IMPORTS']._serialized_end=2362 + _globals['_FEATURE']._serialized_start=2365 + _globals['_FEATURE']._serialized_end=3951 + _globals['_ENV']._serialized_start=3954 + _globals['_ENV']._serialized_end=4232 + _globals['_LABELS']._serialized_start=4234 + _globals['_LABELS']._serialized_end=4306 + _globals['_DEPRECATED']._serialized_start=4309 + _globals['_DEPRECATED']._serialized_end=4874 + _globals['_ISSUES']._serialized_start=4876 + _globals['_ISSUES']._serialized_end=5000 +# @@protoc_insertion_point(module_scope)