diff --git a/.gitattributes b/.gitattributes index 80e541c4acece66a442ee7d0a38f79648e69728c..00cb0759e0db75ec265673f1a64fb5dfaff8e092 100644 --- a/.gitattributes +++ b/.gitattributes @@ -518,3 +518,4 @@ mantis_evalkit/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99-934c mantis_evalkit/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 filter=lfs diff=lfs merge=lfs -text mantis_evalkit/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text mantis_evalkit/lib/python3.10/site-packages/kiwisolver/_cext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +moondream/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/mantis_evalkit/lib/python3.10/site-packages/pip/_internal/exceptions.py b/mantis_evalkit/lib/python3.10/site-packages/pip/_internal/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..45a876a850dfec2295ea982e1eadd5ab26364cd4 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/pip/_internal/exceptions.py @@ -0,0 +1,809 @@ +"""Exceptions used throughout package. + +This module MUST NOT try to import from anything within `pip._internal` to +operate. This is expected to be importable from any/all files within the +subpackage and, thus, should not depend on them. +""" + +import configparser +import contextlib +import locale +import logging +import pathlib +import re +import sys +from itertools import chain, groupby, repeat +from typing import TYPE_CHECKING, Dict, Iterator, List, Literal, Optional, Union + +from pip._vendor.packaging.requirements import InvalidRequirement +from pip._vendor.packaging.version import InvalidVersion +from pip._vendor.rich.console import Console, ConsoleOptions, RenderResult +from pip._vendor.rich.markup import escape +from pip._vendor.rich.text import Text + +if TYPE_CHECKING: + from hashlib import _Hash + + from pip._vendor.requests.models import Request, Response + + from pip._internal.metadata import BaseDistribution + from pip._internal.req.req_install import InstallRequirement + +logger = logging.getLogger(__name__) + + +# +# Scaffolding +# +def _is_kebab_case(s: str) -> bool: + return re.match(r"^[a-z]+(-[a-z]+)*$", s) is not None + + +def _prefix_with_indent( + s: Union[Text, str], + console: Console, + *, + prefix: str, + indent: str, +) -> Text: + if isinstance(s, Text): + text = s + else: + text = console.render_str(s) + + return console.render_str(prefix, overflow="ignore") + console.render_str( + f"\n{indent}", overflow="ignore" + ).join(text.split(allow_blank=True)) + + +class PipError(Exception): + """The base pip error.""" + + +class DiagnosticPipError(PipError): + """An error, that presents diagnostic information to the user. + + This contains a bunch of logic, to enable pretty presentation of our error + messages. Each error gets a unique reference. Each error can also include + additional context, a hint and/or a note -- which are presented with the + main error message in a consistent style. + + This is adapted from the error output styling in `sphinx-theme-builder`. + """ + + reference: str + + def __init__( + self, + *, + kind: 'Literal["error", "warning"]' = "error", + reference: Optional[str] = None, + message: Union[str, Text], + context: Optional[Union[str, Text]], + hint_stmt: Optional[Union[str, Text]], + note_stmt: Optional[Union[str, Text]] = None, + link: Optional[str] = None, + ) -> None: + # Ensure a proper reference is provided. + if reference is None: + assert hasattr(self, "reference"), "error reference not provided!" + reference = self.reference + assert _is_kebab_case(reference), "error reference must be kebab-case!" + + self.kind = kind + self.reference = reference + + self.message = message + self.context = context + + self.note_stmt = note_stmt + self.hint_stmt = hint_stmt + + self.link = link + + super().__init__(f"<{self.__class__.__name__}: {self.reference}>") + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__}(" + f"reference={self.reference!r}, " + f"message={self.message!r}, " + f"context={self.context!r}, " + f"note_stmt={self.note_stmt!r}, " + f"hint_stmt={self.hint_stmt!r}" + ")>" + ) + + def __rich_console__( + self, + console: Console, + options: ConsoleOptions, + ) -> RenderResult: + colour = "red" if self.kind == "error" else "yellow" + + yield f"[{colour} bold]{self.kind}[/]: [bold]{self.reference}[/]" + yield "" + + if not options.ascii_only: + # Present the main message, with relevant context indented. + if self.context is not None: + yield _prefix_with_indent( + self.message, + console, + prefix=f"[{colour}]×[/] ", + indent=f"[{colour}]│[/] ", + ) + yield _prefix_with_indent( + self.context, + console, + prefix=f"[{colour}]╰─>[/] ", + indent=f"[{colour}] [/] ", + ) + else: + yield _prefix_with_indent( + self.message, + console, + prefix="[red]×[/] ", + indent=" ", + ) + else: + yield self.message + if self.context is not None: + yield "" + yield self.context + + if self.note_stmt is not None or self.hint_stmt is not None: + yield "" + + if self.note_stmt is not None: + yield _prefix_with_indent( + self.note_stmt, + console, + prefix="[magenta bold]note[/]: ", + indent=" ", + ) + if self.hint_stmt is not None: + yield _prefix_with_indent( + self.hint_stmt, + console, + prefix="[cyan bold]hint[/]: ", + indent=" ", + ) + + if self.link is not None: + yield "" + yield f"Link: {self.link}" + + +# +# Actual Errors +# +class ConfigurationError(PipError): + """General exception in configuration""" + + +class InstallationError(PipError): + """General exception during installation""" + + +class MissingPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml has `build-system`, but no `build-system.requires`.""" + + reference = "missing-pyproject-build-system-requires" + + def __init__(self, *, package: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid pyproject.toml file.\n" + "The [build-system] table is missing the mandatory `requires` key." + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + +class InvalidPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml an invalid `build-system.requires`.""" + + reference = "invalid-pyproject-build-system-requires" + + def __init__(self, *, package: str, reason: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid `build-system.requires` key in " + f"pyproject.toml.\n{reason}" + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + +class NoneMetadataError(PipError): + """Raised when accessing a Distribution's "METADATA" or "PKG-INFO". + + This signifies an inconsistency, when the Distribution claims to have + the metadata file (if not, raise ``FileNotFoundError`` instead), but is + not actually able to produce its content. This may be due to permission + errors. + """ + + def __init__( + self, + dist: "BaseDistribution", + metadata_name: str, + ) -> None: + """ + :param dist: A Distribution object. + :param metadata_name: The name of the metadata being accessed + (can be "METADATA" or "PKG-INFO"). + """ + self.dist = dist + self.metadata_name = metadata_name + + def __str__(self) -> str: + # Use `dist` in the error message because its stringification + # includes more information, like the version and location. + return f"None {self.metadata_name} metadata found for distribution: {self.dist}" + + +class UserInstallationInvalid(InstallationError): + """A --user install is requested on an environment without user site.""" + + def __str__(self) -> str: + return "User base directory is not specified" + + +class InvalidSchemeCombination(InstallationError): + def __str__(self) -> str: + before = ", ".join(str(a) for a in self.args[:-1]) + return f"Cannot set {before} and {self.args[-1]} together" + + +class DistributionNotFound(InstallationError): + """Raised when a distribution cannot be found to satisfy a requirement""" + + +class RequirementsFileParseError(InstallationError): + """Raised when a general error occurs parsing a requirements file line.""" + + +class BestVersionAlreadyInstalled(PipError): + """Raised when the most up-to-date version of a package is already + installed.""" + + +class BadCommand(PipError): + """Raised when virtualenv or a command is not found""" + + +class CommandError(PipError): + """Raised when there is an error in command-line arguments""" + + +class PreviousBuildDirError(PipError): + """Raised when there's a previous conflicting build directory""" + + +class NetworkConnectionError(PipError): + """HTTP connection error""" + + def __init__( + self, + error_msg: str, + response: Optional["Response"] = None, + request: Optional["Request"] = None, + ) -> None: + """ + Initialize NetworkConnectionError with `request` and `response` + objects. + """ + self.response = response + self.request = request + self.error_msg = error_msg + if ( + self.response is not None + and not self.request + and hasattr(response, "request") + ): + self.request = self.response.request + super().__init__(error_msg, response, request) + + def __str__(self) -> str: + return str(self.error_msg) + + +class InvalidWheelFilename(InstallationError): + """Invalid wheel filename.""" + + +class UnsupportedWheel(InstallationError): + """Unsupported wheel.""" + + +class InvalidWheel(InstallationError): + """Invalid (e.g. corrupt) wheel.""" + + def __init__(self, location: str, name: str): + self.location = location + self.name = name + + def __str__(self) -> str: + return f"Wheel '{self.name}' located at {self.location} is invalid." + + +class MetadataInconsistent(InstallationError): + """Built metadata contains inconsistent information. + + This is raised when the metadata contains values (e.g. name and version) + that do not match the information previously obtained from sdist filename, + user-supplied ``#egg=`` value, or an install requirement name. + """ + + def __init__( + self, ireq: "InstallRequirement", field: str, f_val: str, m_val: str + ) -> None: + self.ireq = ireq + self.field = field + self.f_val = f_val + self.m_val = m_val + + def __str__(self) -> str: + return ( + f"Requested {self.ireq} has inconsistent {self.field}: " + f"expected {self.f_val!r}, but metadata has {self.m_val!r}" + ) + + +class MetadataInvalid(InstallationError): + """Metadata is invalid.""" + + def __init__(self, ireq: "InstallRequirement", error: str) -> None: + self.ireq = ireq + self.error = error + + def __str__(self) -> str: + return f"Requested {self.ireq} has invalid metadata: {self.error}" + + +class InstallationSubprocessError(DiagnosticPipError, InstallationError): + """A subprocess call failed.""" + + reference = "subprocess-exited-with-error" + + def __init__( + self, + *, + command_description: str, + exit_code: int, + output_lines: Optional[List[str]], + ) -> None: + if output_lines is None: + output_prompt = Text("See above for output.") + else: + output_prompt = ( + Text.from_markup(f"[red][{len(output_lines)} lines of output][/]\n") + + Text("".join(output_lines)) + + Text.from_markup(R"[red]\[end of output][/]") + ) + + super().__init__( + message=( + f"[green]{escape(command_description)}[/] did not run successfully.\n" + f"exit code: {exit_code}" + ), + context=output_prompt, + hint_stmt=None, + note_stmt=( + "This error originates from a subprocess, and is likely not a " + "problem with pip." + ), + ) + + self.command_description = command_description + self.exit_code = exit_code + + def __str__(self) -> str: + return f"{self.command_description} exited with {self.exit_code}" + + +class MetadataGenerationFailed(InstallationSubprocessError, InstallationError): + reference = "metadata-generation-failed" + + def __init__( + self, + *, + package_details: str, + ) -> None: + super(InstallationSubprocessError, self).__init__( + message="Encountered error while generating package metadata.", + context=escape(package_details), + hint_stmt="See above for details.", + note_stmt="This is an issue with the package mentioned above, not pip.", + ) + + def __str__(self) -> str: + return "metadata generation failed" + + +class HashErrors(InstallationError): + """Multiple HashError instances rolled into one for reporting""" + + def __init__(self) -> None: + self.errors: List[HashError] = [] + + def append(self, error: "HashError") -> None: + self.errors.append(error) + + def __str__(self) -> str: + lines = [] + self.errors.sort(key=lambda e: e.order) + for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__): + lines.append(cls.head) + lines.extend(e.body() for e in errors_of_cls) + if lines: + return "\n".join(lines) + return "" + + def __bool__(self) -> bool: + return bool(self.errors) + + +class HashError(InstallationError): + """ + A failure to verify a package against known-good hashes + + :cvar order: An int sorting hash exception classes by difficulty of + recovery (lower being harder), so the user doesn't bother fretting + about unpinned packages when he has deeper issues, like VCS + dependencies, to deal with. Also keeps error reports in a + deterministic order. + :cvar head: A section heading for display above potentially many + exceptions of this kind + :ivar req: The InstallRequirement that triggered this error. This is + pasted on after the exception is instantiated, because it's not + typically available earlier. + + """ + + req: Optional["InstallRequirement"] = None + head = "" + order: int = -1 + + def body(self) -> str: + """Return a summary of me for display under the heading. + + This default implementation simply prints a description of the + triggering requirement. + + :param req: The InstallRequirement that provoked this error, with + its link already populated by the resolver's _populate_link(). + + """ + return f" {self._requirement_name()}" + + def __str__(self) -> str: + return f"{self.head}\n{self.body()}" + + def _requirement_name(self) -> str: + """Return a description of the requirement that triggered me. + + This default implementation returns long description of the req, with + line numbers + + """ + return str(self.req) if self.req else "unknown package" + + +class VcsHashUnsupported(HashError): + """A hash was provided for a version-control-system-based requirement, but + we don't have a method for hashing those.""" + + order = 0 + head = ( + "Can't verify hashes for these requirements because we don't " + "have a way to hash version control repositories:" + ) + + +class DirectoryUrlHashUnsupported(HashError): + """A hash was provided for a version-control-system-based requirement, but + we don't have a method for hashing those.""" + + order = 1 + head = ( + "Can't verify hashes for these file:// requirements because they " + "point to directories:" + ) + + +class HashMissing(HashError): + """A hash was needed for a requirement but is absent.""" + + order = 2 + head = ( + "Hashes are required in --require-hashes mode, but they are " + "missing from some requirements. Here is a list of those " + "requirements along with the hashes their downloaded archives " + "actually had. Add lines like these to your requirements files to " + "prevent tampering. (If you did not enable --require-hashes " + "manually, note that it turns on automatically when any package " + "has a hash.)" + ) + + def __init__(self, gotten_hash: str) -> None: + """ + :param gotten_hash: The hash of the (possibly malicious) archive we + just downloaded + """ + self.gotten_hash = gotten_hash + + def body(self) -> str: + # Dodge circular import. + from pip._internal.utils.hashes import FAVORITE_HASH + + package = None + if self.req: + # In the case of URL-based requirements, display the original URL + # seen in the requirements file rather than the package name, + # so the output can be directly copied into the requirements file. + package = ( + self.req.original_link + if self.req.is_direct + # In case someone feeds something downright stupid + # to InstallRequirement's constructor. + else getattr(self.req, "req", None) + ) + return " {} --hash={}:{}".format( + package or "unknown package", FAVORITE_HASH, self.gotten_hash + ) + + +class HashUnpinned(HashError): + """A requirement had a hash specified but was not pinned to a specific + version.""" + + order = 3 + head = ( + "In --require-hashes mode, all requirements must have their " + "versions pinned with ==. These do not:" + ) + + +class HashMismatch(HashError): + """ + Distribution file hash values don't match. + + :ivar package_name: The name of the package that triggered the hash + mismatch. Feel free to write to this after the exception is raise to + improve its error message. + + """ + + order = 4 + head = ( + "THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS " + "FILE. If you have updated the package versions, please update " + "the hashes. Otherwise, examine the package contents carefully; " + "someone may have tampered with them." + ) + + def __init__(self, allowed: Dict[str, List[str]], gots: Dict[str, "_Hash"]) -> None: + """ + :param allowed: A dict of algorithm names pointing to lists of allowed + hex digests + :param gots: A dict of algorithm names pointing to hashes we + actually got from the files under suspicion + """ + self.allowed = allowed + self.gots = gots + + def body(self) -> str: + return f" {self._requirement_name()}:\n{self._hash_comparison()}" + + def _hash_comparison(self) -> str: + """ + Return a comparison of actual and expected hash values. + + Example:: + + Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde + or 123451234512345123451234512345123451234512345 + Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef + + """ + + def hash_then_or(hash_name: str) -> "chain[str]": + # For now, all the decent hashes have 6-char names, so we can get + # away with hard-coding space literals. + return chain([hash_name], repeat(" or")) + + lines: List[str] = [] + for hash_name, expecteds in self.allowed.items(): + prefix = hash_then_or(hash_name) + lines.extend((f" Expected {next(prefix)} {e}") for e in expecteds) + lines.append( + f" Got {self.gots[hash_name].hexdigest()}\n" + ) + return "\n".join(lines) + + +class UnsupportedPythonVersion(InstallationError): + """Unsupported python version according to Requires-Python package + metadata.""" + + +class ConfigurationFileCouldNotBeLoaded(ConfigurationError): + """When there are errors while loading a configuration file""" + + def __init__( + self, + reason: str = "could not be loaded", + fname: Optional[str] = None, + error: Optional[configparser.Error] = None, + ) -> None: + super().__init__(error) + self.reason = reason + self.fname = fname + self.error = error + + def __str__(self) -> str: + if self.fname is not None: + message_part = f" in {self.fname}." + else: + assert self.error is not None + message_part = f".\n{self.error}\n" + return f"Configuration file {self.reason}{message_part}" + + +_DEFAULT_EXTERNALLY_MANAGED_ERROR = f"""\ +The Python environment under {sys.prefix} is managed externally, and may not be +manipulated by the user. Please use specific tooling from the distributor of +the Python installation to interact with this environment instead. +""" + + +class ExternallyManagedEnvironment(DiagnosticPipError): + """The current environment is externally managed. + + This is raised when the current environment is externally managed, as + defined by `PEP 668`_. The ``EXTERNALLY-MANAGED`` configuration is checked + and displayed when the error is bubbled up to the user. + + :param error: The error message read from ``EXTERNALLY-MANAGED``. + """ + + reference = "externally-managed-environment" + + def __init__(self, error: Optional[str]) -> None: + if error is None: + context = Text(_DEFAULT_EXTERNALLY_MANAGED_ERROR) + else: + context = Text(error) + super().__init__( + message="This environment is externally managed", + context=context, + note_stmt=( + "If you believe this is a mistake, please contact your " + "Python installation or OS distribution provider. " + "You can override this, at the risk of breaking your Python " + "installation or OS, by passing --break-system-packages." + ), + hint_stmt=Text("See PEP 668 for the detailed specification."), + ) + + @staticmethod + def _iter_externally_managed_error_keys() -> Iterator[str]: + # LC_MESSAGES is in POSIX, but not the C standard. The most common + # platform that does not implement this category is Windows, where + # using other categories for console message localization is equally + # unreliable, so we fall back to the locale-less vendor message. This + # can always be re-evaluated when a vendor proposes a new alternative. + try: + category = locale.LC_MESSAGES + except AttributeError: + lang: Optional[str] = None + else: + lang, _ = locale.getlocale(category) + if lang is not None: + yield f"Error-{lang}" + for sep in ("-", "_"): + before, found, _ = lang.partition(sep) + if not found: + continue + yield f"Error-{before}" + yield "Error" + + @classmethod + def from_config( + cls, + config: Union[pathlib.Path, str], + ) -> "ExternallyManagedEnvironment": + parser = configparser.ConfigParser(interpolation=None) + try: + parser.read(config, encoding="utf-8") + section = parser["externally-managed"] + for key in cls._iter_externally_managed_error_keys(): + with contextlib.suppress(KeyError): + return cls(section[key]) + except KeyError: + pass + except (OSError, UnicodeDecodeError, configparser.ParsingError): + from pip._internal.utils._log import VERBOSE + + exc_info = logger.isEnabledFor(VERBOSE) + logger.warning("Failed to read %s", config, exc_info=exc_info) + return cls(None) + + +class UninstallMissingRecord(DiagnosticPipError): + reference = "uninstall-no-record-file" + + def __init__(self, *, distribution: "BaseDistribution") -> None: + installer = distribution.installer + if not installer or installer == "pip": + dep = f"{distribution.raw_name}=={distribution.version}" + hint = Text.assemble( + "You might be able to recover from this via: ", + (f"pip install --force-reinstall --no-deps {dep}", "green"), + ) + else: + hint = Text( + f"The package was installed by {installer}. " + "You should check if it can uninstall the package." + ) + + super().__init__( + message=Text(f"Cannot uninstall {distribution}"), + context=( + "The package's contents are unknown: " + f"no RECORD file was found for {distribution.raw_name}." + ), + hint_stmt=hint, + ) + + +class LegacyDistutilsInstall(DiagnosticPipError): + reference = "uninstall-distutils-installed-package" + + def __init__(self, *, distribution: "BaseDistribution") -> None: + super().__init__( + message=Text(f"Cannot uninstall {distribution}"), + context=( + "It is a distutils installed project and thus we cannot accurately " + "determine which files belong to it which would lead to only a partial " + "uninstall." + ), + hint_stmt=None, + ) + + +class InvalidInstalledPackage(DiagnosticPipError): + reference = "invalid-installed-package" + + def __init__( + self, + *, + dist: "BaseDistribution", + invalid_exc: Union[InvalidRequirement, InvalidVersion], + ) -> None: + installed_location = dist.installed_location + + if isinstance(invalid_exc, InvalidRequirement): + invalid_type = "requirement" + else: + invalid_type = "version" + + super().__init__( + message=Text( + f"Cannot process installed package {dist} " + + (f"in {installed_location!r} " if installed_location else "") + + f"because it has an invalid {invalid_type}:\n{invalid_exc.args[0]}" + ), + context=( + "Starting with pip 24.1, packages with invalid " + f"{invalid_type}s can not be processed." + ), + hint_stmt="To proceed this package must be uninstalled.", + ) diff --git a/moondream/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc b/moondream/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4a42d9e0b028ab4231593b41097023bbb9550b8 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:628d7b3c3dc8df2b6184f400d285bbe9476cf07f5fe2890c7d5da0f2f9822ecb +size 165904 diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b20095db35be1159c4f550e2686229d1412a0f7 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, c10::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, c10::optional scale=c10::nullopt, const c10::optional & causal_diagonal={}, const c10::optional & seqlen_k={}); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward_compositeimplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..10a946cc407750504c06018e6f3399f66f320936 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1); +TORCH_API at::Tensor _embedding_bag_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0d060589c1280d6da947d1b936f5e26fc25048f1 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_log1p_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_log1p_slow(at::TensorList self); +TORCH_API void foreach_tensor_log1p_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_log1p_cuda(at::TensorList self); +TORCH_API void foreach_tensor_log1p_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..de31a883889cfc20c99365a52d60e2850c2ef73c --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_log { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_log_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_log_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d859eb393547257af445ee56cc433260033534c9 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_round_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_round_slow(at::TensorList self); +TORCH_API void foreach_tensor_round_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_round_cuda(at::TensorList self); +TORCH_API void foreach_tensor_round_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c5ad41b10890d56b9c0a0b642d675c5ade57a761 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adamw_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adamw_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adamw_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adamw_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fe78e3a05dce7a97b4ece22777afc5c1212bda50 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _nested_view_from_buffer_copy_out(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out); +TORCH_API at::Tensor _nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_shape_as_tensor_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_shape_as_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..98054c765d663575c88a9efd5efb4cb70e06218c --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_shape_as_tensor_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _shape_as_tensor(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_native.h new file mode 100644 index 0000000000000000000000000000000000000000..eca29878a4b404364f6833556fef22f63e143bca --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c1d705d46b37c09eaf649306f371ffd46b2a9458 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _to_sparse_csc_out(const at::Tensor & self, c10::optional dense_dim, at::Tensor & out); +TORCH_API at::Tensor dense_to_sparse_csc(const at::Tensor & self, c10::optional dense_dim=c10::nullopt); +TORCH_API at::Tensor coo_to_sparse_csc(const at::Tensor & self, c10::optional dense_dim=c10::nullopt); +TORCH_API at::Tensor sparse_compressed_to_sparse_csc(const at::Tensor & self, c10::optional dense_dim=c10::nullopt); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eac5ea58c518f4dbf8ef07e41129edc396cc4953 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_avg_pool2d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); +}; + +struct TORCH_API adaptive_avg_pool2d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..86e691af45088836e54babde6d08e2215cf8c789 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool2d_out_cpu : public at::meta::structured_adaptive_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +struct TORCH_API structured_adaptive_max_pool2d_out_cuda : public at::meta::structured_adaptive_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/angle_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/angle_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..675ed3503cb8b85d87939a4cee7e448f766ab08e --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/angle_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor angle(const at::Tensor & self); +TORCH_API at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aa7f917da7380ef160e65eb62589c254260dc7b1 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor argsort(const at::Tensor & self, int64_t dim=-1, bool descending=false); +TORCH_API at::Tensor & argsort_stable_out(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out); +TORCH_API at::Tensor argsort_stable(const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false); +TORCH_API at::Tensor argsort(const at::Tensor & self, at::Dimname dim, bool descending=false); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h new file mode 100644 index 0000000000000000000000000000000000000000..81d03bd67f6b8af15ad56bf5b71b91690dacf703 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p=2, c10::optional compute_mode=c10::nullopt); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..45be9d7513b5bb8956986f05c2fb9fa94f25b563 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API col2im_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::col2im") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +}; + +struct TORCH_API col2im { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::col2im") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/copy_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7cc0f58ad766b4ff5321a64c6e1b7105a3d80d1d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/copy_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & copy_out(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out); +TORCH_API at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor & copy_nested_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor & copy_sparse_wrapper_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor & copy_sparse_compressed_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor & copy_mkldnn_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor copy(const at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..946edb16883ffccb46a7ecf3811f29ffa77664de --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor cudnn_grid_sampler(const at::Tensor & self, const at::Tensor & grid); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b541ef800e29740a8c277a5fb527fb64480cbf54 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_digamma_out : public at::meta::structured_digamma { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/dstack_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/dstack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9c2c56de3d9ff3dda532f449bf2e6c7d1a9953f1 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/dstack_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API dstack { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dstack(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API dstack_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d9ee3c8e9d1783821ed3466f324743527bc2e0ee --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API embedding_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymInt, c10::SymInt, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..caba613590a82bfe94f46e78313151495f4310b1 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor exp(const at::Tensor & self); +TORCH_API at::Tensor & exp_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..29cdd6392e00467bc08f0fac60061e91d980664b --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train); +TORCH_API at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h new file mode 100644 index 0000000000000000000000000000000000000000..bc6c945c0854928cfd497af8a67b8477111aa9c8 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor +inline at::Tensor flatten_dense_tensors(at::TensorList tensors) { + return at::_ops::flatten_dense_tensors::call(tensors); +} + +} diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/flip_cpu_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/flip_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f6b4164348e2f9dbbe646eea0abb0d17724e657a --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/flip_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims); + +} // namespace cpu +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/glu_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/glu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..170f77aba58c7bb1618754c36dc0ebfd3861dd97 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/glu_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out); +}; + +struct TORCH_API glu { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu(Tensor self, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f87bc0f0a5b07c21e4a312fc00f1880f6b344e6 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & grid_sampler_2d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f29b0293645726c227b2264e4eaf866a1e179a4 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7df1d154e1ec3b7475058ddf6e3f0ebaffcff6c6 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a608c04a32c1332d2699020d43d10d6c1b702aee --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, int64_t bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::TensorList bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e20abaad71c819df823ed82aa19b0d55bf11487e --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a10c3d3aead9f28e4d8b15b299c58b9ff91c4118 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c7a8c38227e92fecf7e5334feb04228974bcfc8d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor instance_norm(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2835cad8658c03a36792f905646a876c5eedc0ba --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & int_repr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor int_repr_quantized_cpu(const at::Tensor & self); +TORCH_API at::Tensor int_repr_quantized_cuda(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f77c55550e1b5645a5540c3d6e077725ed92578c --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_distributed { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_distributed") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_distributed(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a99e27ace3e434034132c76bb96b962f81749a6f --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isnan(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..223aad6f7b37d034add411d904a49ebca57b8e43 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_isneginf_out : public at::meta::structured_isneginf { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor isneginf_sparse(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isneginf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b40d121d4e8baff929717269d4d145fab1f748cf --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..99b0f6716cedbd169834f261cafd4382d460d38b --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d483fbf5b6b84db85f32883a2bb899c32915d63d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_cholesky_ex_out : public at::meta::structured_linalg_cholesky_ex { +void impl(const at::Tensor & self, bool upper, bool check_errors, const at::Tensor & L, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5b1d64d56ef10a989a2b7ebfb99dfedb104ceb3 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e6f4f3423d58435bef913eb243e5618540dab8de --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h new file mode 100644 index 0000000000000000000000000000000000000000..075ac5dcbdf891c582ab88ad4536d9ed181c4d18 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) +inline ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq::call(self, b, rcond, driver); +} + +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} + +} diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..641306ff680c0e24511b642a01ef3aaf9cadb6aa --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_logaddexp_out : public at::meta::structured_logaddexp { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc0eb6bcaa7c042344e812ccae9ee120001ec548 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor & logspace_cuda_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1fa15f464916f595e51672dae099617e8501b511 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API margin_ranking_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::margin_ranking_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/mode_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/mode_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..81465a5a243e67536a7e6006df0072a63c5b520e --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/mode_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mode { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim); +}; + +struct TORCH_API mode_values { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "values") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API mode_dimname { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim); +}; + +struct TORCH_API mode_dimname_out { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9d12033fc87e73a6f6417df56bdf446093fe812 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor nan_to_num(const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt); +TORCH_API at::Tensor & nan_to_num_(at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nanquantile.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nanquantile.h new file mode 100644 index 0000000000000000000000000000000000000000..8c8e3f818db432fcb8747b88645cab2f79658835 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nanquantile.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor nanquantile(const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation); +} + +// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_out::call(self, q, dim, keepdim, interpolation, out); +} +// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nanquantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::nanquantile_out::call(self, q, dim, keepdim, interpolation, out); +} + +// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor nanquantile(const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation); +} + +// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} +// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nanquantile_outf(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::nanquantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} + +} diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_strided_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_strided_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..27087b6af4671a3c4082ddb2cd7d8675c0ab27c1 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_strided_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API new_empty_strided { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::new_empty_strided") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API new_empty_strided_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::new_empty_strided") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..aa950ae077158805edd176a52976b6f311364c9b --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nll_loss_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, c10::SymInt, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +}; + +struct TORCH_API nll_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, c10::SymInt, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0c8722a8f4acab65399e3b8c56441858ab707f01 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); +TORCH_API ::std::tuple nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight); +TORCH_API ::std::tuple nll_loss_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); +TORCH_API ::std::tuple nll_loss_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/pad_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/pad_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1aeb86b0f356bf6cd6c3e6ad61d9e30d05846406 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/pad_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor pad_symint(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode="constant", c10::optional value=c10::nullopt); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c34055815665dc2590dbcacb5b196fb1f6e9295b --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & pixel_unshuffle_out(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out); +TORCH_API at::Tensor pixel_unshuffle_cpu(const at::Tensor & self, int64_t downscale_factor); +TORCH_API at::Tensor math_pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_axis_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_axis_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8b053651d163059d12c1a1fd2ca8c8ef0c4b1fd3 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_axis_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t q_per_channel_axis(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b835ffabf6b50062b766847ec3363cb6b834063b --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_replication_pad2d_out_cpu : public at::meta::structured_replication_pad2d { +void impl(const at::Tensor & self, at::ArrayRef padding, const at::Tensor & out); +}; +struct TORCH_API structured_replication_pad2d_out_cuda : public at::meta::structured_replication_pad2d { +void impl(const at::Tensor & self, at::ArrayRef padding, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0bf099eb614d36598e6cb568a6a6440c5dcba927 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rnn_relu_cell { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_relu_cell") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_native.h new file mode 100644 index 0000000000000000000000000000000000000000..013e9cbc07e3dcfa0c2ca6acd8f6b3cbf34b6150 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor rrelu_with_noise_cpu(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_out_cpu(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & rrelu_with_noise_cpu_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor rrelu_with_noise_cuda(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_out_cuda(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & rrelu_with_noise_cuda_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e34f4b2acfaff2944c5b8a6e25e77cb5f2eaefb --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options={}); +TORCH_API at::Tensor scalar_tensor(const at::Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & scalar_tensor_out(at::Tensor & out, const at::Scalar & s); +TORCH_API at::Tensor & scalar_tensor_outf(const at::Scalar & s, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted.h new file mode 100644 index 0000000000000000000000000000000000000000..e76c6a393968c2c194c1a7380b5050d2b500cac5 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor +inline at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter); +} + +// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} +// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Tensor_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} + +// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor +inline at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter); +} + +// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} +// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Scalar_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} + +} diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f428a1d5c816f147904bef6bd61780ad852bce10 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index); +TORCH_API at::Tensor select_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..996e28f921ece8d52101b984686e1b6745694e3b --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sin(const at::Tensor & self); +TORCH_API at::Tensor & sin_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..794f27dccaf5d7d2e32d0b50d36db00fedbdaf3d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_dilated2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_dilated2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d77cc0b1cf4783b5ff658b0c96f383f0c6e7f9d8 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & soft_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_meta_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8cb59a021a937603b3f311fef6060131decd0afe --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_i1(const at::Tensor & self); +TORCH_API at::Tensor & special_i1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_ops.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4659804730f7954942d888e09f73a2d19c605a3e --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_logit { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_logit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_logit(Tensor self, float? eps=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional eps); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps); +}; + +struct TORCH_API special_logit_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_logit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional eps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_cuda_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e40ed94c353a457eedcfcc5cc2b77ecc87cfbf19 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_scaled_modified_bessel_k1(const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k1_out(at::Tensor & out, const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k1_outf(const at::Tensor & x, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/sub_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/sub_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d7b8ebad92e1d9b43c3902295e07ec903cf5f0c5 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/sub_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sub_out : public at::meta::structured_sub_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_sub_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub_sparse(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out_sparse(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_sparse_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub_zerotensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc30daf79f5429d8e8f7bcaa1bb562b08d7f484d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor transpose(const at::Tensor & self, int64_t dim0, int64_t dim1); +TORCH_API at::Tensor & transpose_(at::Tensor & self, int64_t dim0, int64_t dim1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6f7c9db51ef65c0c349c512fa6c77e2c74d68ed1 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector unsafe_split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0); +TORCH_API ::std::vector unsafe_split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0); +TORCH_API void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0); +TORCH_API void unsafe_split_with_sizes_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out); +TORCH_API void unsafe_split_with_sizes_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0); +TORCH_API void unsafe_split_with_sizes_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_native.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0a632a6a45aa97db338f13afb38cd7a121f894b8 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_linear1d_backward_out_cpu : public at::meta::structured_upsample_linear1d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_linear1d_backward_out_cuda : public at::meta::structured_upsample_linear1d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_cpu_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5f19040e26759ba35cea821c43b6755ae597717f --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/view_meta_dispatch.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/view_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a03452aa9fd2a975250c202e431fe0f21012f286 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/view_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor view(const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size); + +} // namespace meta +} // namespace at diff --git a/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/xor.h b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/xor.h new file mode 100644 index 0000000000000000000000000000000000000000..8777dca7c7102f2af61610bd90eace68443e8c87 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/torch/include/ATen/ops/xor.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor __xor__(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__xor___Scalar::call(self, other); +} + +// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__xor___Tensor::call(self, other); +} + +}